@article {pmid38638340, year = {2024}, author = {Abbas, Q and Alyas, T and Alghamdi, T and Alkhodre, AB and Albouq, S and Niazi, M and Tabassum, N}, title = {Redefining governance: a critical analysis of sustainability transformation in e-governance.}, journal = {Frontiers in big data}, volume = {7}, number = {}, pages = {1349116}, pmid = {38638340}, issn = {2624-909X}, abstract = {With the rapid growth of information and communication technologies, governments worldwide are embracing digital transformation to enhance service delivery and governance practices. In the rapidly evolving landscape of information technology (IT), secure data management stands as a cornerstone for organizations aiming to safeguard sensitive information. Robust data modeling techniques are pivotal in structuring and organizing data, ensuring its integrity, and facilitating efficient retrieval and analysis. As the world increasingly emphasizes sustainability, integrating eco-friendly practices into data management processes becomes imperative. This study focuses on the specific context of Pakistan and investigates the potential of cloud computing in advancing e-governance capabilities. Cloud computing offers scalability, cost efficiency, and enhanced data security, making it an ideal technology for digital transformation. Through an extensive literature review, analysis of case studies, and interviews with stakeholders, this research explores the current state of e-governance in Pakistan, identifies the challenges faced, and proposes a framework for leveraging cloud computing to overcome these challenges. The findings reveal that cloud computing can significantly enhance the accessibility, scalability, and cost-effectiveness of e-governance services, thereby improving citizen engagement and satisfaction. This study provides valuable insights for policymakers, government agencies, and researchers interested in the digital transformation of e-governance in Pakistan and offers a roadmap for leveraging cloud computing technologies in similar contexts. The findings contribute to the growing body of knowledge on e-governance and cloud computing, supporting the advancement of digital governance practices globally. This research identifies monitoring parameters necessary to establish a sustainable e-governance system incorporating big data and cloud computing. The proposed framework, Monitoring and Assessment System using Cloud (MASC), is validated through secondary data analysis and successfully fulfills the research objectives. By leveraging big data and cloud computing, governments can revolutionize their digital governance practices, driving transformative changes and enhancing efficiency and effectiveness in public administration.}, } @article {pmid38628614, year = {2024}, author = {Wang, TH and Kao, CC and Chang, TH}, title = {Ensemble Machine Learning for Predicting 90-Day Outcomes and Analyzing Risk Factors in Acute Kidney Injury Requiring Dialysis.}, journal = {Journal of multidisciplinary healthcare}, volume = {17}, number = {}, pages = {1589-1602}, pmid = {38628614}, issn = {1178-2390}, abstract = {PURPOSE: Our objectives were to (1) employ ensemble machine learning algorithms utilizing real-world clinical data to predict 90-day prognosis, including dialysis dependence and mortality, following the first hospitalized dialysis and (2) identify the significant factors associated with overall outcomes.

PATIENTS AND METHODS: We identified hospitalized patients with Acute kidney injury requiring dialysis (AKI-D) from a dataset of the Taipei Medical University Clinical Research Database (TMUCRD) from January 2008 to December 2020. The extracted data comprise demographics, comorbidities, medications, and laboratory parameters. Ensemble machine learning models were developed utilizing real-world clinical data through the Google Cloud Platform.

RESULTS: The Study Analyzed 1080 Patients in the Dialysis-Dependent Module, Out of Which 616 Received Regular Dialysis After 90 Days. Our Ensemble Model, Consisting of 25 Feedforward Neural Network Models, Demonstrated the Best Performance with an Auroc of 0.846. We Identified the Baseline Creatinine Value, Assessed at Least 90 Days Before the Initial Dialysis, as the Most Crucial Factor. We selected 2358 patients, 984 of whom were deceased after 90 days, for the survival module. The ensemble model, comprising 15 feedforward neural network models and 10 gradient-boosted decision tree models, achieved superior performance with an AUROC of 0.865. The pre-dialysis creatinine value, tested within 90 days prior to the initial dialysis, was identified as the most significant factor.

CONCLUSION: Ensemble machine learning models outperform logistic regression models in predicting outcomes of AKI-D, compared to existing literature. Our study, which includes a large sample size from three different hospitals, supports the significance of the creatinine value tested before the first hospitalized dialysis in determining overall prognosis. Healthcare providers could benefit from utilizing our validated prediction model to improve clinical decision-making and enhance patient care for the high-risk population.}, } @article {pmid38628390, year = {2024}, author = {Fujinami, H and Kuraishi, S and Teramoto, A and Shimada, S and Takahashi, S and Ando, T and Yasuda, I}, title = {Development of a novel endoscopic hemostasis-assisted navigation AI system in the standardization of post-ESD coagulation.}, journal = {Endoscopy international open}, volume = {12}, number = {4}, pages = {E520-E525}, pmid = {38628390}, issn = {2364-3722}, abstract = {Background and study aims While gastric endoscopic submucosal dissection (ESD) has become a treatment with fewer complications, delayed bleeding remains a challenge. Post-ESD coagulation (PEC) is performed to prevent delayed bleeding. Therefore, we developed an artificial intelligence (AI) to detect vessels that require PEC in real time. Materials and methods Training data were extracted from 153 gastric ESD videos with sufficient images taken with a second-look endoscopy (SLE) and annotated as follows: (1) vessels that showed bleeding during SLE without PEC; (2) vessels that did not bleed during SLE with PEC; and (3) vessels that did not bleed even without PEC. The training model was created using Google Cloud Vertex AI and a program was created to display the vessels requiring PEC in real time using a bounding box. The evaluation of this AI was verified with 12 unlearned test videos, including four cases that required additional coagulation during SLE. Results The results of the test video validation indicated that 109 vessels on the ulcer required cauterization. Of these, 80 vessels (73.4%) were correctly determined as not requiring additional treatment. However, 25 vessels (22.9%), which did not require PEC, were overestimated. In the four videos that required additional coagulation in SLE, AI was able to detect all bleeding vessels. Conclusions The effectiveness and safety of this endoscopic treatment-assisted AI system that identifies visible vessels requiring PEC should be confirmed in future studies.}, } @article {pmid38625954, year = {2024}, author = {Frimpong, T and Hayfron Acquah, JB and Missah, YM and Dawson, JK and Ayawli, BBK and Baah, P and Sam, SA}, title = {Securing cloud data using secret key 4 optimization algorithm (SK4OA) with a non-linearity run time trend.}, journal = {PloS one}, volume = {19}, number = {4}, pages = {e0301760}, pmid = {38625954}, issn = {1932-6203}, mesh = {*Algorithms ; *Information Storage and Retrieval ; Cloud Computing ; Computer Security ; Microcomputers ; }, abstract = {Cloud computing alludes to the on-demand availability of personal computer framework resources, primarily information storage and processing power, without the customer's direct personal involvement. Cloud computing has developed dramatically among many organizations due to its benefits such as cost savings, resource pooling, broad network access, and ease of management; nonetheless, security has been a major concern. Researchers have proposed several cryptographic methods to offer cloud data security; however, their execution times are linear and longer. A Security Key 4 Optimization Algorithm (SK4OA) with a non-linear run time is proposed in this paper. The secret key of SK4OA determines the run time rather than the size of the data as such is able to transmit large volumes of data with minimal bandwidth and able to resist security attacks like brute force since its execution timings are unpredictable. A data set from Kaggle was used to determine the algorithm's mean and standard deviation after thirty (30) times of execution. Data sizes of 3KB, 5KB, 8KB, 12KB, and 16 KB were used in this study. There was an empirical analysis done against RC4, Salsa20, and Chacha20 based on encryption time, decryption time, throughput and memory utilization. The analysis showed that SK4OA generated lowest mean non-linear run time of 5.545±2.785 when 16KB of data was executed. Additionally, SK4OA's standard deviation was greater, indicating that the observed data varied far from the mean. However, RC4, Salsa20, and Chacha20 showed smaller standard deviations making them more clustered around the mean resulting in predictable run times.}, } @article {pmid38610575, year = {2024}, author = {Ocampo, AF and Fida, MR and Elmokashfi, A and Bryhni, H}, title = {Assessing the Cloud-RAN in the Linux Kernel: Sharing Computing and Network Resources.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610575}, issn = {1424-8220}, abstract = {Cloud-based Radio Access Network (Cloud-RAN) leverages virtualization to enable the coexistence of multiple virtual Base Band Units (vBBUs) with collocated workloads on a single edge computer, aiming for economic and operational efficiency. However, this coexistence can cause performance degradation in vBBUs due to resource contention. In this paper, we conduct an empirical analysis of vBBU performance on a Linux RT-Kernel, highlighting the impact of resource sharing with user-space tasks and Kernel threads. Furthermore, we evaluate CPU management strategies such as CPU affinity and CPU isolation as potential solutions to these performance challenges. Our results highlight that the implementation of CPU affinity can significantly reduce throughput variability by up to 40%, decrease vBBU's NACK ratios, and reduce vBBU scheduling latency within the Linux RT-Kernel. Collectively, these findings underscore the potential of CPU management strategies to enhance vBBU performance in Cloud-RAN environments, enabling more efficient and stable network operations. The paper concludes with a discussion on the efficient realization of Cloud-RAN, elucidating the benefits of implementing proposed CPU affinity allocations. The demonstrated enhancements, including reduced scheduling latency and improved end-to-end throughput, affirm the practicality and efficacy of the proposed strategies for optimizing Cloud-RAN deployments.}, } @article {pmid38610476, year = {2024}, author = {Liang, YP and Chang, CM and Chung, CC}, title = {Implementation of Lightweight Convolutional Neural Networks with an Early Exit Mechanism Utilizing 40 nm CMOS Process for Fire Detection in Unmanned Aerial Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610476}, issn = {1424-8220}, support = {MOST-111-2221- E-194-049-//Ministry of Science and Technology of Taiwan/ ; }, abstract = {The advancement of unmanned aerial vehicles (UAVs) enables early detection of numerous disasters. Efforts have been made to automate the monitoring of data from UAVs, with machine learning methods recently attracting significant interest. These solutions often face challenges with high computational costs and energy usage. Conventionally, data from UAVs are processed using cloud computing, where they are sent to the cloud for analysis. However, this method might not meet the real-time needs of disaster relief scenarios. In contrast, edge computing provides real-time processing at the site but still struggles with computational and energy efficiency issues. To overcome these obstacles and enhance resource utilization, this paper presents a convolutional neural network (CNN) model with an early exit mechanism designed for fire detection in UAVs. This model is implemented using TSMC 40 nm CMOS technology, which aids in hardware acceleration. Notably, the neural network has a modest parameter count of 11.2 k. In the hardware computation part, the CNN circuit completes fire detection in approximately 230,000 cycles. Power-gating techniques are also used to turn off inactive memory, contributing to reduced power consumption. The experimental results show that this neural network reaches a maximum accuracy of 81.49% in the hardware implementation stage. After automatic layout and routing, the CNN hardware accelerator can operate at 300 MHz, consuming 117 mW of power.}, } @article {pmid38610447, year = {2024}, author = {Gomes, B and Soares, C and Torres, JM and Karmali, K and Karmali, S and Moreira, RS and Sobral, P}, title = {An Efficient Edge Computing-Enabled Network for Used Cooking Oil Collection.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610447}, issn = {1424-8220}, abstract = {In Portugal, more than 98% of domestic cooking oil is disposed of improperly every day. This avoids recycling/reconverting into another energy. Is also may become a potential harmful contaminant of soil and water. Driven by the utility of recycled cooking oil, and leveraging the exponential growth of ubiquitous computing approaches, we propose an IoT smart solution for domestic used cooking oil (UCO) collection bins. We call this approach SWAN, which stands for Smart Waste Accumulation Network. It is deployed and evaluated in Portugal. It consists of a countrywide network of collection bin units, available in public areas. Two metrics are considered to evaluate the system's success: (i) user engagement, and (ii) used cooking oil collection efficiency. The presented system should (i) perform under scenarios of temporary communication network failures, and (ii) be scalable to accommodate an ever-growing number of installed collection units. Thus, we choose a disruptive approach from the traditional cloud computing paradigm. It relies on edge node infrastructure to process, store, and act upon the locally collected data. The communication appears as a delay-tolerant task, i.e., an edge computing solution. We conduct a comparative analysis revealing the benefits of the edge computing enabled collection bin vs. a cloud computing solution. The studied period considers four years of collected data. An exponential increase in the amount of used cooking oil collected is identified, with the developed solution being responsible for surpassing the national collection totals of previous years. During the same period, we also improved the collection process as we were able to more accurately estimate the optimal collection and system's maintenance intervals.}, } @article {pmid38610327, year = {2024}, author = {Armijo, A and Zamora-Sánchez, D}, title = {Integration of Railway Bridge Structural Health Monitoring into the Internet of Things with a Digital Twin: A Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610327}, issn = {1424-8220}, support = {ZL-2020/00902//Basque Government/ ; GA10112353//European Commission/ ; GA10108395//European Commission/ ; }, abstract = {Structural health monitoring (SHM) is critical for ensuring the safety of infrastructure such as bridges. This article presents a digital twin solution for the SHM of railway bridges using low-cost wireless accelerometers and machine learning (ML). The system architecture combines on-premises edge computing and cloud analytics to enable efficient real-time monitoring and complete storage of relevant time-history datasets. After train crossings, the accelerometers stream raw vibration data, which are processed in the frequency domain and analyzed using machine learning to detect anomalies that indicate potential structural issues. The digital twin approach is demonstrated on an in-service railway bridge for which vibration data were collected over two years under normal operating conditions. By learning allowable ranges for vibration patterns, the digital twin model identifies abnormal spectral peaks that indicate potential changes in structural integrity. The long-term pilot proves that this affordable SHM system can provide automated and real-time warnings of bridge damage and also supports the use of in-house-designed sensors with lower cost and edge computing capabilities such as those used in the demonstration. The successful on-premises-cloud hybrid implementation provides a cost effective and scalable model for expanding monitoring to thousands of railway bridges, democratizing SHM to improve safety by avoiding catastrophic failures.}, } @article {pmid38610235, year = {2024}, author = {Gaffurini, M and Flammini, A and Ferrari, P and Fernandes Carvalho, D and Godoy, EP and Sisinni, E}, title = {End-to-End Emulation of LoRaWAN Architecture and Infrastructure in Complex Smart City Scenarios Exploiting Containers.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {7}, pages = {}, pmid = {38610235}, issn = {1424-8220}, support = {1033 17/06/2022, CN00000023//European Union/ ; }, abstract = {In a LoRaWAN network, the backend is generally distributed as Software as a Service (SaaS) based on container technology, and recently, a containerized version of the LoRaWAN node stack is also available. Exploiting the disaggregation of LoRaWAN components, this paper focuses on the emulation of complex end-to-end architecture and infrastructures for smart city scenarios, leveraging on lightweight virtualization technology. The fundamental metrics to gain insights and evaluate the scaling complexity of the emulated scenario are defined. Then, the methodology is applied to use cases taken from a real LoRaWAN application in a smart city with hundreds of nodes. As a result, the proposed approach based on containers allows for the following: (i) deployments of functionalities on diverse distributed hosts; (ii) the use of the very same SW running on real nodes; (iii) the simple configuration and management of the emulation process; (iv) affordable costs. Both premise and cloud servers are considered as emulation platforms to evaluate the resource request and emulation cost of the proposed approach. For instance, emulating one hour of an entire LoRaWAN network with hundreds of nodes requires very affordable hardware that, if realized with a cloud-based computing platform, may cost less than USD 1.}, } @article {pmid38609681, year = {2024}, author = {Gupta, P and Shukla, DP}, title = {Demi-decadal land use land cover change analysis of Mizoram, India, with topographic correction using machine learning algorithm.}, journal = {Environmental science and pollution research international}, volume = {}, number = {}, pages = {}, pmid = {38609681}, issn = {1614-7499}, abstract = {Mizoram (India) is part of UNESCO's biodiversity hotspots in India that is primarily populated by tribes who engage in shifting agriculture. Hence, the land use land cover (LULC) pattern of the state is frequently changing. We have used Landsat 5 and 8 satellite images to prepare LULC maps from 2000 to 2020 in every 5 years. The atmospherically corrected images were pre-processed for removal of cloud cover and then classified into six classes: waterbodies, farmland, settlement, open forest, dense forest, and bare land. We applied four machine learning (ML) algorithms for classification, namely, random forest (RF), classification and regression tree (CART), minimum distance (MD), and support vector machine (SVM) for the images from 2000 to 2020. With 80% training and 20% testing data, we found that the RF classifier works best with the most accuracy than other classifiers. The average overall accuracy (OA) and Kappa coefficient (KC) from 2000 to 2020 were 84.00% and 0.79 when the RF classifier was used. When using SVM, CART, and MD, the average OA and KC were 78.06%, 0.73; 78.60%, 0.72; and 73.32%, 0.65, respectively. We utilised three methods of topographic correction, namely, C-correction, SCS (sun canopy sensor) correction, and SCS + C correction to reduce the misclassification due to shadow effects. SCS + C correction worked best for this region; hence, we prepared LULC maps on SCS + C corrected satellite image. Hence, we have used RF classifier for LULC preparation demi-decadal from 2000 to 2020. The OA for 2000, 2005, 2010, 2015, and 2020 was found to be 84%, 81%, 81%, 85%, and 89%, respectively, using RF. The dense forest decreased from 2000 to 2020 with an increase in open forest, settlement, and agriculture; nevertheless, when Farmland was low, there was an increase in the barren land. The results were significantly improved with the topographic correction, and misclassification was quite less.}, } @article {pmid38609409, year = {2024}, author = {Zhang, Y and Geng, H and Su, L and He, S and Lu, L}, title = {An efficient polynomial-based verifiable computation scheme on multi-source outsourced data.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {8512}, pmid = {38609409}, issn = {2045-2322}, support = {U22B2038//Research and Verification of Key Technologies for Secure and Efficient Federated Learning/ ; }, abstract = {With the development of cloud computing, users are more inclined to outsource complex computing tasks to cloud servers with strong computing capacity, and the cloud returns the final calculation results. However, the cloud is not completely trustworthy, which may leak the data of user and even return incorrect calculations on purpose. Therefore, it is important to verify the results of computing tasks without revealing the privacy of the users. Among all the computing tasks, the polynomial calculation is widely used in information security, linear algebra, signal processing and other fields. Most existing polynomial-based verifiable computation schemes require that the input of the polynomial function must come from a single data source, which means that the data must be signed by a single user. However, the input of the polynomial may come from multiple users in the practical application. In order to solve this problem, the researchers have proposed some schemes for multi-source outsourced data, but these schemes have the common problem of low efficiency. To improve the efficiency, this paper proposes an efficient polynomial-based verifiable computation scheme on multi-source outsourced data. We optimize the polynomials using Horner's method to increase the speed of verification, in which the addition gate and the multiplication gate can be interleaved to represent the polynomial function. In order to adapt to this structure, we design the corresponding homomorphic verification tag, so that the input of the polynomial can come from multiple data sources. We prove the correctness and rationality of the scheme, and carry out numerical analysis and evaluation research to verify the efficiency of the scheme. The experimental indicate that data contributors can sign 1000 new data in merely 2 s, while the verification of a delegated polynomial function with a power of 100 requires only 18 ms. These results confirm that the proposed scheme is better than the existing scheme.}, } @article {pmid38606391, year = {2024}, author = {Li, S and Nair, R and Naqvi, SM}, title = {Acoustic and Text Features Analysis for Adult ADHD Screening: A Data-Driven Approach Utilizing DIVA Interview.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {12}, number = {}, pages = {359-370}, pmid = {38606391}, issn = {2168-2372}, mesh = {Adult ; Humans ; *Attention Deficit Disorder with Hyperactivity/diagnosis ; Treatment Outcome ; Magnetic Resonance Imaging ; }, abstract = {Attention Deficit Hyperactivity Disorder (ADHD) is a neurodevelopmental disorder commonly seen in childhood that leads to behavioural changes in social development and communication patterns, often continues into undiagnosed adulthood due to a global shortage of psychiatrists, resulting in delayed diagnoses with lasting consequences on individual's well-being and the societal impact. Recently, machine learning methodologies have been incorporated into healthcare systems to facilitate the diagnosis and enhance the potential prediction of treatment outcomes for mental health conditions. In ADHD detection, the previous research focused on utilizing functional magnetic resonance imaging (fMRI) or Electroencephalography (EEG) signals, which require costly equipment and trained personnel for data collection. In recent years, speech and text modalities have garnered increasing attention due to their cost-effectiveness and non-wearable sensing in data collection. In this research, conducted in collaboration with the Cumbria, Northumberland, Tyne and Wear NHS Foundation Trust, we gathered audio data from both ADHD patients and normal controls based on the clinically popular Diagnostic Interview for ADHD in adults (DIVA). Subsequently, we transformed the speech data into text modalities through the utilization of the Google Cloud Speech API. We extracted both acoustic and text features from the data, encompassing traditional acoustic features (e.g., MFCC), specialized feature sets (e.g., eGeMAPS), as well as deep-learned linguistic and semantic features derived from pre-trained deep learning models. These features are employed in conjunction with a support vector machine for ADHD classification, yielding promising outcomes in the utilization of audio and text data for effective adult ADHD screening. Clinical impact: This research introduces a transformative approach in ADHD diagnosis, employing speech and text analysis to facilitate early and more accessible detection, particularly beneficial in areas with limited psychiatric resources. Clinical and Translational Impact Statement: The successful application of machine learning techniques in analyzing audio and text data for ADHD screening represents a significant advancement in mental health diagnostics, paving the way for its integration into clinical settings and potentially improving patient outcomes on a broader scale.}, } @article {pmid38601602, year = {2024}, author = {Sachdeva, S and Bhatia, S and Al Harrasi, A and Shah, YA and Anwer, K and Philip, AK and Shah, SFA and Khan, A and Ahsan Halim, S}, title = {Unraveling the role of cloud computing in health care system and biomedical sciences.}, journal = {Heliyon}, volume = {10}, number = {7}, pages = {e29044}, pmid = {38601602}, issn = {2405-8440}, abstract = {Cloud computing has emerged as a transformative force in healthcare and biomedical sciences, offering scalable, on-demand resources for managing vast amounts of data. This review explores the integration of cloud computing within these fields, highlighting its pivotal role in enhancing data management, security, and accessibility. We examine the application of cloud computing in various healthcare domains, including electronic medical records, telemedicine, and personalized patient care, as well as its impact on bioinformatics research, particularly in genomics, proteomics, and metabolomics. The review also addresses the challenges and ethical considerations associated with cloud-based healthcare solutions, such as data privacy and cybersecurity. By providing a comprehensive overview, we aim to assist readers in understanding the significance of cloud computing in modern medical applications and its potential to revolutionize both patient care and biomedical research.}, } @article {pmid38591672, year = {2024}, author = {Hicks, CB and Martinez, TJ}, title = {Massively scalable workflows for quantum chemistry: BigChem and ChemCloud.}, journal = {The Journal of chemical physics}, volume = {160}, number = {14}, pages = {}, doi = {10.1063/5.0190834}, pmid = {38591672}, issn = {1089-7690}, abstract = {Electronic structure theory, i.e., quantum chemistry, is the fundamental building block for many problems in computational chemistry. We present a new distributed computing framework (BigChem), which allows for an efficient solution of many quantum chemistry problems in parallel. BigChem is designed to be easily composable and leverages industry-standard middleware (e.g., Celery, RabbitMQ, and Redis) for distributed approaches to large scale problems. BigChem can harness any collection of worker nodes, including ones on cloud providers (such as AWS or Azure), local clusters, or supercomputer centers (and any mixture of these). BigChem builds upon MolSSI packages, such as QCEngine to standardize the operation of numerous computational chemistry programs, demonstrated here with Psi4, xtb, geomeTRIC, and TeraChem. BigChem delivers full utilization of compute resources at scale, offers a programable canvas for designing sophisticated quantum chemistry workflows, and is fault tolerant to node failures and network disruptions. We demonstrate linear scalability of BigChem running computational chemistry workloads on up to 125 GPUs. Finally, we present ChemCloud, a web API to BigChem and successor to TeraChem Cloud. ChemCloud delivers scalable and secure access to BigChem over the Internet.}, } @article {pmid38589881, year = {2024}, author = {Holl, F and Clarke, L and Raffort, T and Serres, E and Archer, L and Saaristo, P}, title = {The Red Cross Red Crescent Health Information System (RCHIS): an electronic medical records and health information management system for the red cross red crescent emergency response units.}, journal = {Conflict and health}, volume = {18}, number = {1}, pages = {28}, pmid = {38589881}, issn = {1752-1505}, abstract = {BACKGROUND: The Red Cross and Red Crescent Movement (RCRC) utilizes specialized Emergency Response Units (ERUs) for international disaster response. However, data collection and reporting within ERUs have been time-consuming and paper-based. The Red Cross Red Crescent Health Information System (RCHIS) was developed to improve clinical documentation and reporting, ensuring accuracy and ease of use while increasing compliance with reporting standards.

CASE PRESENTATION: RCHIS is an Electronic Medical Record (EMR) and Health Information System (HIS) designed for RCRC ERUs. It can be accessed on Android tablets or Windows laptops, both online and offline. The system securely stores data on Microsoft Azure cloud, with synchronization facilitated through a local ERU server. The functional architecture covers all clinical functions of ERU clinics and hospitals, incorporating user-friendly features. A pilot study was conducted with the Portuguese Red Cross (PRC) during a large-scale event. Thirteen super users were trained and subsequently trained the staff. During the four-day pilot, 77 user accounts were created, and 243 patient files were documented. Feedback indicated that RCHIS was easy to use, requiring minimal training time, and had sufficient training for full utilization. Real-time reporting facilitated coordination with the civil defense authority.

CONCLUSIONS: The development and pilot use of RCHIS demonstrated its feasibility and efficacy within RCRC ERUs. The system addressed the need for an EMR and HIS solution, enabling comprehensive clinical documentation and supporting administrative reporting functions. The pilot study validated the training of trainers' approach and paved the way for further domestic use of RCHIS. RCHIS has the potential to improve patient safety, quality of care, and reporting efficiency within ERUs. Automated reporting reduces the burden on ERU leadership, while electronic compilation enhances record completeness and correctness. Ongoing feedback collection and feature development continue to enhance RCHIS's functionality. Further trainings took place in 2023 and preparations for international deployments are under way. RCHIS represents a significant step toward improved emergency medical care and coordination within the RCRC and has implications for similar systems in other Emergency Medical Teams.}, } @article {pmid38586319, year = {2024}, author = {Chen, A and Yu, S and Yang, X and Huang, D and Ren, Y}, title = {IoT data security in outsourced databases: A survey of verifiable database.}, journal = {Heliyon}, volume = {10}, number = {7}, pages = {e28117}, pmid = {38586319}, issn = {2405-8440}, abstract = {With the swift advancement of cloud computing and the Internet of Things (IoT), to address the issue of massive data storage, IoT devices opt to offload their data to cloud servers so as to alleviate the pressure of resident storage and computation. However, storing local data in an outsourced database is bound to face the danger of tampering. To handle the above problem, a verifiable database (VDB), which was initially suggested in 2011, has garnered sustained interest from researchers. The concept of VDB enables resource-limited clients to securely outsource extremely large databases to untrusted servers, where users can retrieve database records and modify them by allocating new values, and any attempts at tampering will be detected. This paper provides a systematic summary of VDB. First, a definition of VDB is given, along with correctness and security proofs. And the VDB based on commitment constructions is introduced separately, mainly divided into vector commitments and polynomial commitments. Then VDB schemes based on delegated polynomial functions are introduced, mainly in combination with Merkle trees and forward-secure symmetric searchable encryption. We then classify the current VDB schemes relying on four different assumptions. Besides, we classify the established VDB schemes built upon two different groups. Finally, we introduce the applications and future development of VDB. To our knowledge, this is the first VDB review paper to date.}, } @article {pmid38585837, year = {2024}, author = {Mimar, S and Paul, AS and Lucarelli, N and Border, S and Santo, BA and Naglah, A and Barisoni, L and Hodgin, J and Rosenberg, AZ and Clapp, W and Sarder, P and , }, title = {ComPRePS: An Automated Cloud-based Image Analysis tool to democratize AI in Digital Pathology.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38585837}, support = {R01 DK118431/DK/NIDDK NIH HHS/United States ; R21 DK128668/DK/NIDDK NIH HHS/United States ; R01 DK114485/DK/NIDDK NIH HHS/United States ; U01 DK133090/DK/NIDDK NIH HHS/United States ; R01 DK129541/DK/NIDDK NIH HHS/United States ; OT2 OD033753/OD/NIH HHS/United States ; }, abstract = {Artificial intelligence (AI) has extensive applications in a wide range of disciplines including healthcare and clinical practice. Advances in high-resolution whole-slide brightfield microscopy allow for the digitization of histologically stained tissue sections, producing gigapixel-scale whole-slide images (WSI). The significant improvement in computing and revolution of deep neural network (DNN)-based AI technologies over the last decade allow us to integrate massively parallelized computational power, cutting-edge AI algorithms, and big data storage, management, and processing. Applied to WSIs, AI has created opportunities for improved disease diagnostics and prognostics with the ultimate goal of enhancing precision medicine and resulting patient care. The National Institutes of Health (NIH) has recognized the importance of developing standardized principles for data management and discovery for the advancement of science and proposed the Findable, Accessible, Interoperable, Reusable, (FAIR) Data Principles[1] with the goal of building a modernized biomedical data resource ecosystem to establish collaborative research communities. In line with this mission and to democratize AI-based image analysis in digital pathology, we propose ComPRePS: an end-to-end automated Computational Renal Pathology Suite which combines massive scalability, on-demand cloud computing, and an easy-to-use web-based user interface for data upload, storage, management, slide-level visualization, and domain expert interaction. Moreover, our platform is equipped with both in-house and collaborator developed sophisticated AI algorithms in the back-end server for image analysis to identify clinically relevant micro-anatomic functional tissue units (FTU) and to extract image features.}, } @article {pmid38584872, year = {2024}, author = {Copeland, CJ and Roddy, JW and Schmidt, AK and Secor, PR and Wheeler, TJ}, title = {VIBES: a workflow for annotating and visualizing viral sequences integrated into bacterial genomes.}, journal = {NAR genomics and bioinformatics}, volume = {6}, number = {2}, pages = {lqae030}, pmid = {38584872}, issn = {2631-9268}, support = {R01 AI138981/AI/NIAID NIH HHS/United States ; R01 GM132600/GM/NIGMS NIH HHS/United States ; }, abstract = {Bacteriophages are viruses that infect bacteria. Many bacteriophages integrate their genomes into the bacterial chromosome and become prophages. Prophages may substantially burden or benefit host bacteria fitness, acting in some cases as parasites and in others as mutualists. Some prophages have been demonstrated to increase host virulence. The increasing ease of bacterial genome sequencing provides an opportunity to deeply explore prophage prevalence and insertion sites. Here we present VIBES (Viral Integrations in Bacterial genomES), a workflow intended to automate prophage annotation in complete bacterial genome sequences. VIBES provides additional context to prophage annotations by annotating bacterial genes and viral proteins in user-provided bacterial and viral genomes. The VIBES pipeline is implemented as a Nextflow-driven workflow, providing a simple, unified interface for execution on local, cluster and cloud computing environments. For each step of the pipeline, a container including all necessary software dependencies is provided. VIBES produces results in simple tab-separated format and generates intuitive and interactive visualizations for data exploration. Despite VIBES's primary emphasis on prophage annotation, its generic alignment-based design allows it to be deployed as a general-purpose sequence similarity search manager. We demonstrate the utility of the VIBES prophage annotation workflow by searching for 178 Pf phage genomes across 1072 Pseudomonas spp. genomes.}, } @article {pmid38578775, year = {2024}, author = {Nawaz Tareen, F and Alvi, AN and Alsamani, B and Alkhathami, M and Alsadie, D and Alosaimi, N}, title = {EOTE-FSC: An efficient offloaded task execution for fog enabled smart cities.}, journal = {PloS one}, volume = {19}, number = {4}, pages = {e0298363}, pmid = {38578775}, issn = {1932-6203}, mesh = {Cities ; *Algorithms ; *Communication ; Health Facilities ; Information Science ; }, abstract = {Smart cities provide ease in lifestyle to their community members with the help of Information and Communication Technology (ICT). It provides better water, waste and energy management, enhances the security and safety of its citizens and offers better health facilities. Most of these applications are based on IoT-based sensor networks, that are deployed in different areas of applications according to their demand. Due to limited processing capabilities, sensor nodes cannot process multiple tasks simultaneously and need to offload some of their tasks to remotely placed cloud servers, which may cause delays. To reduce the delay, computing nodes are placed in different vicinitys acting as fog-computing nodes are used, to execute the offloaded tasks. It has been observed that the offloaded tasks are not uniformly received by fog computing nodes and some fog nodes may receive more tasks as some may receive less number of tasks. This may cause an increase in overall task execution time. Furthermore, these tasks comprise different priority levels and must be executed before their deadline. In this work, an Efficient Offloaded Task Execution for Fog enabled Smart cities (EOTE - FSC) is proposed. EOTE - FSC proposes a load balancing mechanism by modifying the greedy algorithm to efficiently distribute the offloaded tasks to its attached fog nodes to reduce the overall task execution time. This results in the successful execution of most of the tasks within their deadline. In addition, EOTE - FSC modifies the task sequencing with a deadline algorithm for the fog node to optimally execute the offloaded tasks in such a way that most of the high-priority tasks are entertained. The load balancing results of EOTE - FSC are compared with state-of-the-art well-known Round Robin, Greedy, Round Robin with longest job first, and Round Robin with shortest job first algorithms. However, fog computing results of EOTE - FSC are compared with the First Come First Serve algorithm. The results show that the EOTE - FSC effectively offloaded the tasks on fog nodes and the maximum load on the fog computing nodes is reduced up to 29%, 27.3%, 23%, and 24.4% as compared to Round Robin, Greedy, Round Robin with LJF and Round Robin with SJF algorithms respectively. However, task execution in the proposed EOTE - FSC executes a maximum number of offloaded high-priority tasks as compared to the FCFS algorithm within the same computing capacity of fog nodes.}, } @article {pmid38568312, year = {2024}, author = {Khan, NS and Roy, SK and Talukdar, S and Billah, M and Iqbal, A and Zzaman, RU and Chowdhury, A and Mahtab, SB and Mallick, J}, title = {Empowering real-time flood impact assessment through the integration of machine learning and Google Earth Engine: a comprehensive approach.}, journal = {Environmental science and pollution research international}, volume = {}, number = {}, pages = {}, pmid = {38568312}, issn = {1614-7499}, abstract = {Floods cause substantial losses to life and property, especially in flood-prone regions like northwestern Bangladesh. Timely and precise evaluation of flood impacts is critical for effective flood management and decision-making. This research demonstrates an integrated approach utilizing machine learning and Google Earth Engine to enable real-time flood assessment. Synthetic aperture radar (SAR) data from Sentinel-1 and the Google Earth Engine platform were employed to generate near real-time flood maps of the 2020 flood in Kurigram and Lalmonirhat. An automatic thresholding technique quantified flooded areas. For land use/land cover (LULC) analysis, Sentinel-2's high resolution and machine learning models like artificial neural networks (ANN), random forests (RF) and support vector machines (SVM) were leveraged. ANN delivered the best LULC mapping with 0.94 accuracy based on metrics like accuracy, kappa, mean F1 score, mean sensitivity, mean specificity, mean positive predictive value, mean negative value, mean precision, mean recall, mean detection rate and mean balanced accuracy. Results showed over 600,000 people exposed at peak inundation in July-about 17% of the population. The machine learning-enabled LULC maps reliably identified vulnerable areas to prioritize flood management. Over half of croplands flooded in July. This research demonstrates the potential of integrating SAR, machine learning and cloud computing to empower authorities through real-time monitoring and accurate LULC mapping essential for effective flood response. The proposed comprehensive methodology can assist stakeholders in developing data-driven flood management strategies to reduce impacts.}, } @article {pmid38560228, year = {2024}, author = {Gheni, HM and AbdulRahaim, LA and Abdellatif, A}, title = {Real-time driver identification in IoV: A deep learning and cloud integration approach.}, journal = {Heliyon}, volume = {10}, number = {7}, pages = {e28109}, pmid = {38560228}, issn = {2405-8440}, abstract = {The Internet of Vehicles (IoV) emerges as a pivotal extension of the Internet of Things (IoT), specifically geared towards transforming the automotive landscape. In this evolving ecosystem, the demand for a seamless end-to-end system becomes paramount for enhancing operational efficiency and safety. Hence, this study introduces an innovative method for real-time driver identification by integrating cloud computing with deep learning. Utilizing the integrated capabilities of Google Cloud, Thingsboard, and Apache Kafka, the developed solution tailored for IoV technology is adept at managing real-time data collection, processing, prediction, and visualization, with resilience against sensor data anomalies. Also, this research suggests an appropriate method for driver identification by utilizing a combination of Convolutional Neural Networks (CNN) and multi-head self-attention in the proposed approach. The proposed model is validated on two datasets: Security and collected. Moreover, the results show that the proposed model surpassed the previous works by achieving an accuracy and F1 score of 99.95%. Even when challenged with data anomalies, this model maintains a high accuracy of 96.2%. By achieving accurate driver identification results, the proposed end-to-end IoV system can aid in optimizing fleet management, vehicle security, personalized driving experiences, insurance, and risk assessment. This emphasizes its potential for road safety and managing transportation more effectively.}, } @article {pmid38559152, year = {2024}, author = {Li, Y and Xue, F and Li, B and Yang, Y and Fan, Z and Shu, J and Yang, X and Wang, X and Lin, J and Copana, C and Zhao, B}, title = {Analyzing bivariate cross-trait genetic architecture in GWAS summary statistics with the BIGA cloud computing platform.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38559152}, abstract = {As large-scale biobanks provide increasing access to deep phenotyping and genomic data, genome-wide association studies (GWAS) are rapidly uncovering the genetic architecture behind various complex traits and diseases. GWAS publications typically make their summary-level data (GWAS summary statistics) publicly available, enabling further exploration of genetic overlaps between phenotypes gathered from different studies and cohorts. However, systematically analyzing high-dimensional GWAS summary statistics for thousands of phenotypes can be both logistically challenging and computationally demanding. In this paper, we introduce BIGA (https://bigagwas.org/), a website that aims to offer unified data analysis pipelines and processed data resources for cross-trait genetic architecture analyses using GWAS summary statistics. We have developed a framework to implement statistical genetics tools on a cloud computing platform, combined with extensive curated GWAS data resources. Through BIGA, users can upload data, submit jobs, and share results, providing the research community with a convenient tool for consolidating GWAS data and generating new insights.}, } @article {pmid38559026, year = {2024}, author = {Marini, S and Barquero, A and Wadhwani, AA and Bian, J and Ruiz, J and Boucher, C and Prosperi, M}, title = {OCTOPUS: Disk-based, Multiplatform, Mobile-friendly Metagenomics Classifier.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38559026}, support = {R01 AI141810/AI/NIAID NIH HHS/United States ; R01 AI145552/AI/NIAID NIH HHS/United States ; R01 AI170187/AI/NIAID NIH HHS/United States ; }, abstract = {Portable genomic sequencers such as Oxford Nanopore's MinION enable real-time applications in both clinical and environmental health, e.g., detection of bacterial outbreaks. However, there is a bottleneck in the downstream analytics when bioinformatics pipelines are unavailable, e.g., when cloud processing is unreachable due to absence of Internet connection, or only low-end computing devices can be carried on site. For instance, metagenomics classifiers usually require a large amount of memory or specific operating systems/libraries. In this work, we present a platform-friendly software for portable metagenomic analysis of Nanopore data, the Oligomer-based Classifier of Taxonomic Operational and Pan-genome Units via Singletons (OCTOPUS). OCTOPUS is written in Java, reimplements several features of the popular Kraken2 and KrakenUniq software, with original components for improving metagenomics classification on incomplete/sampled reference databases (e.g., selection of bacteria of public health priority), making it ideal for running on smartphones or tablets. We indexed both OCTOPUS and Kraken2 on a bacterial database with ~4,000 reference genomes, then simulated a positive (bacterial genomes from the same species, but different genomes) and two negative (viral, mammalian) Nanopore test sets. On the bacterial test set OCTOPUS yielded sensitivity and precision comparable to Kraken2 (94.4% and 99.8% versus 94.5% and 99.1%, respectively). On non-bacterial sequences (mammals and viral), OCTOPUS dramatically decreased (4- to 16-fold) the false positive rate when compared to Kraken2 (2.1% and 0.7% versus 8.2% and 11.2%, respectively). We also developed customized databases including viruses, and the World Health Organization's set of bacteria of concern for drug resistance, tested with real Nanopore data on an Android smartphone. OCTOPUS is publicly available at https://github.com/DataIntellSystLab/OCTOPUS and https://github.com/Ruiz-HCI-Lab/OctopusMobile.}, } @article {pmid38555378, year = {2024}, author = {Du, J and Dong, G and Ning, J and Xu, Z and Yang, R}, title = {Identity-based controlled delegated outsourcing data integrity auditing scheme.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {7582}, pmid = {38555378}, issn = {2045-2322}, support = {2023SKY007//Yunnan Minzu University Graduate Research Innovation Fund Project/ ; 61662089//National Natural Science Foundation of China/ ; }, abstract = {With the continuous development of cloud computing, the application of cloud storage has become more and more popular. To ensure the integrity and availability of cloud data, scholars have proposed several cloud data auditing schemes. Still, most need help with outsourced data integrity, controlled outsourcing, and source file auditing. Therefore, we propose a controlled delegation outsourcing data integrity auditing scheme based on the identity-based encryption model. Our proposed scheme allows users to specify a dedicated agent to assist in uploading data to the cloud. These authorized proxies use recognizable identities for authentication and authorization, thus avoiding the need for cumbersome certificate management in a secure distributed computing system. While solving the above problems, our scheme adopts a bucket-based red-black tree structure to efficiently realize the dynamic updating of data, which can complete the updating of data and rebalancing of structural updates constantly and realize the high efficiency of data operations. We define the security model of the scheme in detail and prove the scheme's security under the difficult problem assumption. In the performance analysis section, the proposed scheme is analyzed experimentally in comparison with other schemes, and the results show that the proposed scheme is efficient and secure.}, } @article {pmid38546988, year = {2024}, author = {Chen, X and Xu, G and Xu, X and Jiang, H and Tian, Z and Ma, T}, title = {Multicenter Hierarchical Federated Learning With Fault-Tolerance Mechanisms for Resilient Edge Computing Networks.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TNNLS.2024.3362974}, pmid = {38546988}, issn = {2162-2388}, abstract = {In the realm of federated learning (FL), the conventional dual-layered architecture, comprising a central parameter server and peripheral devices, often encounters challenges due to its significant reliance on the central server for communication and security. This dependence becomes particularly problematic in scenarios involving potential malfunctions of devices and servers. While existing device-edge-cloud hierarchical FL (HFL) models alleviate some dependence on central servers and reduce communication overheads, they primarily focus on load balancing within edge computing networks and fall short of achieving complete decentralization and edge-centric model aggregation. Addressing these limitations, we introduce the multicenter HFL (MCHFL) framework. This innovative framework replaces the traditional single central server architecture with a distributed network of robust global aggregation centers located at the edge, inherently enhancing fault tolerance crucial for maintaining operational integrity amidst edge network disruptions. Our comprehensive experiments with the MNIST, FashionMNIST, and CIFAR-10 datasets demonstrate the MCHFL's superior performance. Notably, even under high paralysis ratios of up to 50%, the MCHFL maintains high accuracy levels, with maximum accuracy reductions of only 2.60%, 5.12%, and 16.73% on these datasets, respectively. This performance significantly surpasses the notable accuracy declines observed in traditional single-center models under similar conditions. To the best of our knowledge, the MCHFL is the first edge multicenter FL framework with theoretical underpinnings. Our extensive experimental results across various datasets validate the MCHFL's effectiveness, showcasing its higher accuracy, faster convergence speed, and stronger robustness compared to single-center models, thereby establishing it as a pioneering paradigm in edge multicenter FL.}, } @article {pmid38545518, year = {2024}, author = {Lock, C and Toh, EMS and Keong, NC}, title = {Structural volumetric and Periodic Table DTI patterns in Complex Normal Pressure Hydrocephalus-Toward the principles of a translational taxonomy.}, journal = {Frontiers in human neuroscience}, volume = {18}, number = {}, pages = {1188533}, pmid = {38545518}, issn = {1662-5161}, abstract = {INTRODUCTION: We previously proposed a novel taxonomic framework to describe the diffusion tensor imaging (DTI) profiles of white matter tracts by their diffusivity and neural properties. We have shown the relevance of this strategy toward interpreting brain tissue signatures in Classic Normal Pressure Hydrocephalus vs. comparator cohorts of mild traumatic brain injury and Alzheimer's disease. In this iteration of the Periodic Table of DTI Elements, we examined patterns of tissue distortion in Complex NPH (CoNPH) and validated the methodology against an open-access dataset of healthy subjects, to expand its accessibility to a larger community.

METHODS: DTI measures for 12 patients with CoNPH with multiple comorbidities and 45 cognitively normal controls from the ADNI database were derived using the image processing pipeline on the brainlife.io open cloud computing platform. Using the Periodic Table algorithm, DTI profiles for CoNPH vs. controls were mapped according to injury patterns.

RESULTS: Structural volumes in most structures tested were significantly lower and the lateral ventricles higher in CoNPH vs. controls. In CoNPH, significantly lower fractional anisotropy (FA) and higher mean, axial, and radial diffusivities (MD, L1, and L2 and 3, respectively) were observed in white matter related to the lateral ventricles. Most diffusivity measures across supratentorial and infratentorial structures were significantly higher in CoNPH, with the largest differences in the cerebellum cortex. In subcortical deep gray matter structures, CoNPH and controls differed most significantly in the hippocampus, with the CoNPH group having a significantly lower FA and higher MD, L1, and L2 and 3. Cerebral and cerebellar white matter demonstrated more potential reversibility of injury compared to cerebral and cerebellar cortices.

DISCUSSION: The findings of widespread and significant reductions in subcortical deep gray matter structures, in comparison to healthy controls, support the hypothesis that Complex NPH cohorts retain imaging features associated with Classic NPH. The use of the algorithm of the Periodic Table allowed for greater consistency in the interpretation of DTI results by focusing on patterns of injury rather than an over-reliance on the interrogation of individual measures by statistical significance alone. Our aim is to provide a prototype that could be refined for an approach toward the concept of a "translational taxonomy."}, } @article {pmid38544154, year = {2024}, author = {Kang, S and Lee, S and Jung, Y}, title = {Design of Network-on-Chip-Based Restricted Coulomb Energy Neural Network Accelerator on FPGA Device.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {6}, pages = {}, pmid = {38544154}, issn = {1424-8220}, support = {00144288, 00144290//Ministry of Trade, Industry and Energy/ ; }, abstract = {Sensor applications in internet of things (IoT) systems, coupled with artificial intelligence (AI) technology, are becoming an increasingly significant part of modern life. For low-latency AI computation in IoT systems, there is a growing preference for edge-based computing over cloud-based alternatives. The restricted coulomb energy neural network (RCE-NN) is a machine learning algorithm well-suited for implementation on edge devices due to its simple learning and recognition scheme. In addition, because the RCE-NN generates neurons as needed, it is easy to adjust the network structure and learn additional data. Therefore, the RCE-NN can provide edge-based real-time processing for various sensor applications. However, previous RCE-NN accelerators have limited scalability when the number of neurons increases. In this paper, we propose a network-on-chip (NoC)-based RCE-NN accelerator and present the results of implementation on a field-programmable gate array (FPGA). NoC is an effective solution for managing massive interconnections. The proposed RCE-NN accelerator utilizes a hierarchical-star (H-star) topology, which efficiently handles a large number of neurons, along with routers specifically designed for the RCE-NN. These approaches result in only a slight decrease in the maximum operating frequency as the number of neurons increases. Consequently, the maximum operating frequency of the proposed RCE-NN accelerator with 512 neurons increased by 126.1% compared to a previous RCE-NN accelerator. This enhancement was verified with two datasets for gas and sign language recognition, achieving accelerations of up to 54.8% in learning time and up to 45.7% in recognition time. The NoC scheme of the proposed RCE-NN accelerator is an appropriate solution to ensure the scalability of the neural network while providing high-performance on-chip learning and recognition.}, } @article {pmid38544035, year = {2024}, author = {Zhan, Y and Xie, W and Shi, R and Huang, Y and Zheng, X}, title = {Dynamic Privacy-Preserving Anonymous Authentication Scheme for Condition-Matching in Fog-Cloud-Based VANETs.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {6}, pages = {}, pmid = {38544035}, issn = {1424-8220}, support = {61872091//National Natural Science Foundation of China/ ; JCKY20 19102C001//National Defense Basic Research Program of China/ ; 62372110//National Natural Science Foundation of China/ ; 2023J02008//Fujian Provincial Natural Science of Foundation/ ; 2020B0101090005//Key-Area Research and Development Program of Guangdong Province/ ; YSPTZX202145//The specific research fund of The Innovation Platform for Academician of Hainan Province/ ; 2022HZ022022//Major Special Project for Industrial Science and Technology in Fujian Province/ ; 2022H0012//Industrial Guiding Project in Fujian/ ; 2022L3003//Special Project of Central Finance Guiding Local Development/ ; }, abstract = {Secure group communication in Vehicle Ad hoc Networks (VANETs) over open channels remains a challenging task. To enable secure group communications with conditional privacy, it is necessary to establish a secure session using Authenticated Key Agreement (AKA). However, existing AKAs suffer from problems such as cross-domain dynamic group session key negotiation and heavy computational burdens on the Trusted Authority (TA) and vehicles. To address these challenges, we propose a dynamic privacy-preserving anonymous authentication scheme for condition matching in fog-cloud-based VANETs. The scheme employs general Elliptic Curve Cryptosystem (ECC) technology and fog-cloud computing methods to decrease computational overhead for On-Board Units (OBUs) and supports multiple TAs for improved service quality and robustness. Furthermore, certificateless technology alleviates TAs of key management burdens. The security analysis indicates that our solution satisfies the communication security and privacy requirements. Experimental simulations verify that our method achieves optimal overall performance with lower computational costs and smaller communication overhead compared to state-of-the-art solutions.}, } @article {pmid38540411, year = {2024}, author = {Yuan, DY and Park, JH and Li, Z and Thomas, R and Hwang, DM and Fu, L}, title = {A New Cloud-Native Tool for Pharmacogenetic Analysis.}, journal = {Genes}, volume = {15}, number = {3}, pages = {}, pmid = {38540411}, issn = {2073-4425}, support = {LMMD Strategic Innovation Fund//Sunnybrook Health Sciences Centre/ ; }, mesh = {Humans ; *Pharmacogenomic Testing ; *Pharmacogenetics/methods ; High-Throughput Nucleotide Sequencing/methods ; Genomics/methods ; Computational Biology ; }, abstract = {BACKGROUND: The advancement of next-generation sequencing (NGS) technologies provides opportunities for large-scale Pharmacogenetic (PGx) studies and pre-emptive PGx testing to cover a wide range of genotypes present in diverse populations. However, NGS-based PGx testing is limited by the lack of comprehensive computational tools to support genetic data analysis and clinical decisions.

METHODS: Bioinformatics utilities specialized for human genomics and the latest cloud-based technologies were used to develop a bioinformatics pipeline for analyzing the genomic sequence data and reporting PGx genotypes. A database was created and integrated in the pipeline for filtering the actionable PGx variants and clinical interpretations. Strict quality verification procedures were conducted on variant calls with the whole genome sequencing (WGS) dataset of the 1000 Genomes Project (G1K). The accuracy of PGx allele identification was validated using the WGS dataset of the Pharmacogenetics Reference Materials from the Centers for Disease Control and Prevention (CDC).

RESULTS: The newly created bioinformatics pipeline, Pgxtools, can analyze genomic sequence data, identify actionable variants in 13 PGx relevant genes, and generate reports annotated with specific interpretations and recommendations based on clinical practice guidelines. Verified with two independent methods, we have found that Pgxtools consistently identifies variants more accurately than the results in the G1K dataset on GRCh37 and GRCh38.

CONCLUSIONS: Pgxtools provides an integrated workflow for large-scale genomic data analysis and PGx clinical decision support. Implemented with cloud-native technologies, it is highly portable in a wide variety of environments from a single laptop to High-Performance Computing (HPC) clusters and cloud platforms for different production scales and requirements.}, } @article {pmid38535044, year = {2024}, author = {Kukkar, A and Kumar, Y and Sandhu, JK and Kaur, M and Walia, TS and Amoon, M}, title = {DengueFog: A Fog Computing-Enabled Weighted Random Forest-Based Smart Health Monitoring System for Automatic Dengue Prediction.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {14}, number = {6}, pages = {}, pmid = {38535044}, issn = {2075-4418}, abstract = {Dengue is a distinctive and fatal infectious disease that spreads through female mosquitoes called Aedes aegypti. It is a notable concern for developing countries due to its low diagnosis rate. Dengue has the most astounding mortality level as compared to other diseases due to tremendous platelet depletion. Hence, it can be categorized as a life-threatening fever as compared to the same class of fevers. Additionally, it has been shown that dengue fever shares many of the same symptoms as other flu-based fevers. On the other hand, the research community is closely monitoring the popular research fields related to IoT, fog, and cloud computing for the diagnosis and prediction of diseases. IoT, fog, and cloud-based technologies are used for constructing a number of health care systems. Accordingly, in this study, a DengueFog monitoring system was created based on fog computing for prediction and detection of dengue sickness. Additionally, the proposed DengueFog system includes a weighted random forest (WRF) classifier to monitor and predict the dengue infection. The proposed system's efficacy was evaluated using data on dengue infection. This dataset was gathered between 2016 and 2018 from several hospitals in the Delhi-NCR region. The accuracy, F-value, recall, precision, error rate, and specificity metrics were used to assess the simulation results of the suggested monitoring system. It was demonstrated that the proposed DengueFog monitoring system with WRF outperforms the traditional classifiers.}, } @article {pmid38531975, year = {2024}, author = {Ali, I and Wassif, K and Bayomi, H}, title = {Dimensionality reduction for images of IoT using machine learning.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {7205}, pmid = {38531975}, issn = {2045-2322}, abstract = {Sensors, wearables, mobile devices, and other Internet of Things (IoT) devices are becoming increasingly integrated into all aspects of our lives. They are capable of gathering enormous amounts of data, such as image data, which can then be sent to the cloud for processing. However, this results in an increase in network traffic and latency. To overcome these difficulties, edge computing has been proposed as a paradigm for computing that brings processing closer to the location where data is produced. This paper explores the merging of cloud and edge computing for IoT and investigates approaches using machine learning for dimensionality reduction of images on the edge, employing the autoencoder deep learning-based approach and principal component analysis (PCA). The encoded data is then sent to the cloud server, where it is used directly for any machine learning task without significantly impacting the accuracy of the data processed in the cloud. The proposed approach has been evaluated on an object detection task using a set of 4000 images randomly chosen from three datasets: COCO, human detection, and HDA datasets. Results show that a 77% reduction in data did not have a significant impact on the object detection task's accuracy.}, } @article {pmid38531933, year = {2024}, author = {Huettmann, F and Andrews, P and Steiner, M and Das, AK and Philip, J and Mi, C and Bryans, N and Barker, B}, title = {A super SDM (species distribution model) 'in the cloud' for better habitat-association inference with a 'big data' application of the Great Gray Owl for Alaska.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {7213}, pmid = {38531933}, issn = {2045-2322}, support = {-EWHALE lab//University of Alaska Fairbanks/ ; -EWHALE lab//University of Alaska Fairbanks/ ; }, abstract = {The currently available distribution and range maps for the Great Grey Owl (GGOW; Strix nebulosa) are ambiguous, contradictory, imprecise, outdated, often hand-drawn and thus not quantified, not based on data or scientific. In this study, we present a proof of concept with a biological application for technical and biological workflow progress on latest global open access 'Big Data' sharing, Open-source methods of R and geographic information systems (OGIS and QGIS) assessed with six recent multi-evidence citizen-science sightings of the GGOW. This proposed workflow can be applied for quantified inference for any species-habitat model such as typically applied with species distribution models (SDMs). Using Random Forest-an ensemble-type model of Machine Learning following Leo Breiman's approach of inference from predictions-we present a Super SDM for GGOWs in Alaska running on Oracle Cloud Infrastructure (OCI). These Super SDMs were based on best publicly available data (410 occurrences + 1% new assessment sightings) and over 100 environmental GIS habitat predictors ('Big Data'). The compiled global open access data and the associated workflow overcome for the first time the limitations of traditionally used PC and laptops. It breaks new ground and has real-world implications for conservation and land management for GGOW, for Alaska, and for other species worldwide as a 'new' baseline. As this research field remains dynamic, Super SDMs can have limits, are not the ultimate and final statement on species-habitat associations yet, but they summarize all publicly available data and information on a topic in a quantified and testable fashion allowing fine-tuning and improvements as needed. At minimum, they allow for low-cost rapid assessment and a great leap forward to be more ecological and inclusive of all information at-hand. Using GGOWs, here we aim to correct the perception of this species towards a more inclusive, holistic, and scientifically correct assessment of this urban-adapted owl in the Anthropocene, rather than a mysterious wilderness-inhabiting species (aka 'Phantom of the North'). Such a Super SDM was never created for any bird species before and opens new perspectives for impact assessment policy and global sustainability.}, } @article {pmid38528619, year = {2024}, author = {Budge, J and Carrell, T and Yaqub, M and Wafa, H and Waltham, M and Pilecka, I and Kelly, J and Murphy, C and Palmer, S and Wang, Y and Clough, RE}, title = {The ARIA trial protocol: a randomised controlled trial to assess the clinical, technical, and cost-effectiveness of a cloud-based, ARtificially Intelligent image fusion system in comparison to standard treatment to guide endovascular Aortic aneurysm repair.}, journal = {Trials}, volume = {25}, number = {1}, pages = {214}, pmid = {38528619}, issn = {1745-6215}, support = {NIHR201004//Invention for Innovation Programme/ ; }, mesh = {Humans ; *Aortic Aneurysm, Abdominal/diagnostic imaging/surgery ; Cost-Benefit Analysis ; Cloud Computing ; *Endovascular Procedures/methods ; *Blood Vessel Prosthesis Implantation/adverse effects ; Treatment Outcome ; Retrospective Studies ; Randomized Controlled Trials as Topic ; Multicenter Studies as Topic ; }, abstract = {BACKGROUND: Endovascular repair of aortic aneurysmal disease is established due to perceived advantages in patient survival, reduced postoperative complications, and shorter hospital lengths of stay. High spatial and contrast resolution 3D CT angiography images are used to plan the procedures and inform device selection and manufacture, but in standard care, the surgery is performed using image-guidance from 2D X-ray fluoroscopy with injection of nephrotoxic contrast material to visualise the blood vessels. This study aims to assess the benefit to patients, practitioners, and the health service of a novel image fusion medical device (Cydar EV), which allows this high-resolution 3D information to be available to operators at the time of surgery.

METHODS: The trial is a multi-centre, open label, two-armed randomised controlled clinical trial of 340 patient, randomised 1:1 to either standard treatment in endovascular aneurysm repair or treatment using Cydar EV, a CE-marked medical device comprising of cloud computing, augmented intelligence, and computer vision. The primary outcome is procedural time, with secondary outcomes of procedural efficiency, technical effectiveness, patient outcomes, and cost-effectiveness. Patients with a clinical diagnosis of AAA or TAAA suitable for endovascular repair and able to provide written informed consent will be invited to participate.

DISCUSSION: This trial is the first randomised controlled trial evaluating advanced image fusion technology in endovascular aortic surgery and is well placed to evaluate the effect of this technology on patient outcomes and cost to the NHS.

TRIAL REGISTRATION: ISRCTN13832085. Dec. 3, 2021.}, } @article {pmid38528564, year = {2024}, author = {Zhang, S and Li, H and Jing, Q and Shen, W and Luo, W and Dai, R}, title = {Anesthesia decision analysis using a cloud-based big data platform.}, journal = {European journal of medical research}, volume = {29}, number = {1}, pages = {201}, pmid = {38528564}, issn = {2047-783X}, support = {2022JJ70061//Natural Science Foundation of Hunan Province/ ; 22A0011//Key Fund Project of Hunan Provincial Department of Education/ ; W20243113//Health Commission of Hunan Province/ ; 82103641 and 82071347//National Natural Science Foundation of China/ ; }, mesh = {Humans ; Big Data ; *Anesthesiology ; Cloud Computing ; *Anesthesia ; *Anesthetics ; Decision Support Techniques ; }, abstract = {Big data technologies have proliferated since the dawn of the cloud-computing era. Traditional data storage, extraction, transformation, and analysis technologies have thus become unsuitable for the large volume, diversity, high processing speed, and low value density of big data in medical strategies, which require the development of novel big data application technologies. In this regard, we investigated the most recent big data platform breakthroughs in anesthesiology and designed an anesthesia decision model based on a cloud system for storing and analyzing massive amounts of data from anesthetic records. The presented Anesthesia Decision Analysis Platform performs distributed computing on medical records via several programming tools, and provides services such as keyword search, data filtering, and basic statistics to reduce inaccurate and subjective judgments by decision-makers. Importantly, it can potentially to improve anesthetic strategy and create individualized anesthesia decisions, lowering the likelihood of perioperative complications.}, } @article {pmid38524844, year = {2024}, author = {Mukuka, A}, title = {Data on mathematics teacher educators' proficiency and willingness to use technology: A structural equation modelling analysis.}, journal = {Data in brief}, volume = {54}, number = {}, pages = {110307}, pmid = {38524844}, issn = {2352-3409}, abstract = {The role of Mathematics Teacher Educators (MTEs) in preparing future teachers to effectively integrate technology into their mathematics instruction is of paramount importance yet remains an underexplored domain. Technology has the potential to enhance the development of 21st-century skills, such as problem-solving and critical thinking, which are essential for students in the era of the fourth industrial revolution. However, the rapid evolution of technology and the emergence of new trends like data analytics, the Internet of Things, machine learning, cloud computing, and artificial intelligence present new challenges in the realm of mathematics teaching and learning. Consequently, MTEs need to equip prospective teachers with the knowledge and skills to harness technology in innovative ways within their future mathematics classrooms. This paper presents and describes data from a survey of 104 MTEs in Zambia. The study focuses on MTEs' proficiency, perceived usefulness, perceived ease of use, and willingness to incorporate technology in their classrooms. This data-driven article aims to unveil patterns and trends within the dataset, with the objective of offering insights rather than drawing definitive conclusions. The article also highlights the data collection process and outlines the procedure for assessing the measurement model of the hypothesised relationships among variables through structural equation modelling analysis. The data described in this article not only sheds light on the current landscape but also serves as a valuable resource for mathematics teacher training institutions and other stakeholders seeking to understand the requisites for MTEs to foster technological skills among prospective teachers of mathematics.}, } @article {pmid38520921, year = {2024}, author = {Tadi, AA and Alhadidi, D and Rueda, L}, title = {PPPCT: Privacy-Preserving framework for Parallel Clustering Transcriptomics data.}, journal = {Computers in biology and medicine}, volume = {173}, number = {}, pages = {108351}, doi = {10.1016/j.compbiomed.2024.108351}, pmid = {38520921}, issn = {1879-0534}, mesh = {Humans ; *Privacy ; *Software ; Algorithms ; Gene Expression Profiling ; Cluster Analysis ; Sequence Analysis, RNA ; }, abstract = {Single-cell transcriptomics data provides crucial insights into patients' health, yet poses significant privacy concerns. Genomic data privacy attacks can have deep implications, encompassing not only the patients' health information but also extending widely to compromise their families'. Moreover, the permanence of leaked data exacerbates the challenges, making retraction an impossibility. While extensive efforts have been directed towards clustering single-cell transcriptomics data, addressing critical challenges, especially in the realm of privacy, remains pivotal. This paper introduces an efficient, fast, privacy-preserving approach for clustering single-cell RNA-sequencing (scRNA-seq) datasets. The key contributions include ensuring data privacy, achieving high-quality clustering, accommodating the high dimensionality inherent in the datasets, and maintaining reasonable computation time for big-scale datasets. Our proposed approach utilizes the map-reduce scheme to parallelize clustering, addressing intensive calculation challenges. Intel Software Guard eXtension (SGX) processors are used to ensure the security of sensitive code and data during processing. Additionally, the approach incorporates a logarithm transformation as a preprocessing step, employs non-negative matrix factorization for dimensionality reduction, and utilizes parallel k-means for clustering. The approach fully leverages the computing capabilities of all processing resources within a secure private cloud environment. Experimental results demonstrate the efficacy of our approach in preserving patient privacy while surpassing state-of-the-art methods in both clustering quality and computation time. Our method consistently achieves a minimum of 7% higher Adjusted Rand Index (ARI) than existing approaches, contingent on dataset size. Additionally, due to parallel computations and dimensionality reduction, our approach exhibits efficiency, converging to very good results in less than 10 seconds for a scRNA-seq dataset with 5000 genes and 6000 cells when prioritizing privacy and under two seconds without privacy considerations. Availability and implementation Code and datasets availability: https://github.com/University-of-Windsor/PPPCT.}, } @article {pmid38514837, year = {2024}, author = {Hajiaghabozorgi, M and Fischbach, M and Albrecht, M and Wang, W and Myers, CL}, title = {BridGE: a pathway-based analysis tool for detecting genetic interactions from GWAS.}, journal = {Nature protocols}, volume = {}, number = {}, pages = {}, pmid = {38514837}, issn = {1750-2799}, support = {R21CA235352//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; R01HG005084//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; R01HG005853//U.S. Department of Health & Human Services | NIH | Center for Information Technology (Center for Information Technology, National Institutes of Health)/ ; BAND-19-615151//Weston Brain Institute/ ; }, abstract = {Genetic interactions have the potential to modulate phenotypes, including human disease. In principle, genome-wide association studies (GWAS) provide a platform for detecting genetic interactions; however, traditional methods for identifying them, which tend to focus on testing individual variant pairs, lack statistical power. In this protocol, we describe a novel computational approach, called Bridging Gene sets with Epistasis (BridGE), for discovering genetic interactions between biological pathways from GWAS data. We present a Python-based implementation of BridGE along with instructions for its application to a typical human GWAS cohort. The major stages include initial data processing and quality control, construction of a variant-level genetic interaction network, measurement of pathway-level genetic interactions, evaluation of statistical significance using sample permutations and generation of results in a standardized output format. The BridGE software pipeline includes options for running the analysis on multiple cores and multiple nodes for users who have access to computing clusters or a cloud computing environment. In a cluster computing environment with 10 nodes and 100 GB of memory per node, the method can be run in less than 24 h for typical human GWAS cohorts. Using BridGE requires knowledge of running Python programs and basic shell script programming experience.}, } @article {pmid38506901, year = {2024}, author = {Sahu, KS and Dubin, JA and Majowicz, SE and Liu, S and Morita, PP}, title = {Revealing the Mysteries of Population Mobility Amid the COVID-19 Pandemic in Canada: Comparative Analysis With Internet of Things-Based Thermostat Data and Google Mobility Insights.}, journal = {JMIR public health and surveillance}, volume = {10}, number = {}, pages = {e46903}, pmid = {38506901}, issn = {2369-2960}, mesh = {Humans ; Pandemics ; *Internet of Things ; Search Engine ; *COVID-19/epidemiology ; Alberta/epidemiology ; Health Policy ; }, abstract = {BACKGROUND: The COVID-19 pandemic necessitated public health policies to limit human mobility and curb infection spread. Human mobility, which is often underestimated, plays a pivotal role in health outcomes, impacting both infectious and chronic diseases. Collecting precise mobility data is vital for understanding human behavior and informing public health strategies. Google's GPS-based location tracking, which is compiled in Google Mobility Reports, became the gold standard for monitoring outdoor mobility during the pandemic. However, indoor mobility remains underexplored.

OBJECTIVE: This study investigates in-home mobility data from ecobee's smart thermostats in Canada (February 2020 to February 2021) and compares it directly with Google's residential mobility data. By assessing the suitability of smart thermostat data, we aim to shed light on indoor mobility patterns, contributing valuable insights to public health research and strategies.

METHODS: Motion sensor data were acquired from the ecobee "Donate Your Data" initiative via Google's BigQuery cloud platform. Concurrently, residential mobility data were sourced from the Google Mobility Report. This study centered on 4 Canadian provinces-Ontario, Quebec, Alberta, and British Columbia-during the period from February 15, 2020, to February 14, 2021. Data processing, analysis, and visualization were conducted on the Microsoft Azure platform using Python (Python Software Foundation) and R programming languages (R Foundation for Statistical Computing). Our investigation involved assessing changes in mobility relative to the baseline in both data sets, with the strength of this relationship assessed using Pearson and Spearman correlation coefficients. We scrutinized daily, weekly, and monthly variations in mobility patterns across the data sets and performed anomaly detection for further insights.

RESULTS: The results revealed noteworthy week-to-week and month-to-month shifts in population mobility within the chosen provinces, aligning with pandemic-driven policy adjustments. Notably, the ecobee data exhibited a robust correlation with Google's data set. Examination of Google's daily patterns detected more pronounced mobility fluctuations during weekdays, a trend not mirrored in the ecobee data. Anomaly detection successfully identified substantial mobility deviations coinciding with policy modifications and cultural events.

CONCLUSIONS: This study's findings illustrate the substantial influence of the Canadian stay-at-home and work-from-home policies on population mobility. This impact was discernible through both Google's out-of-house residential mobility data and ecobee's in-house smart thermostat data. As such, we deduce that smart thermostats represent a valid tool for facilitating intelligent monitoring of population mobility in response to policy-driven shifts.}, } @article {pmid38495592, year = {2024}, author = {Wang, H and Chen, H and Wang, Y}, title = {Analysis of Hot Topics Regarding Global Smart Elderly Care Research - 1997-2021.}, journal = {China CDC weekly}, volume = {6}, number = {9}, pages = {157-161}, pmid = {38495592}, issn = {2096-7071}, abstract = {With the assistance of the internet, big data, cloud computing, and other technologies, the concept of smart elderly care has emerged.

WHAT IS ADDED BY THIS REPORT?: This study presents information on the countries or regions that have conducted research on smart elderly care, as well as identifies global hotspots and development trends in this field.

The results of this study suggest that future research should focus on fall detection, health monitoring, and guidance systems that are user-friendly and contribute to the creation of smarter safer communities for the well-being of the elderly.}, } @article {pmid38495055, year = {2024}, author = {Li, J and Xiong, Y and Feng, S and Pan, C and Guo, X}, title = {CloudProteoAnalyzer: scalable processing of big data from proteomics using cloud computing.}, journal = {Bioinformatics advances}, volume = {4}, number = {1}, pages = {vbae024}, pmid = {38495055}, issn = {2635-0041}, support = {R01 AT011618/AT/NCCIH NIH HHS/United States ; }, abstract = {SUMMARY: Shotgun proteomics is widely used in many system biology studies to determine the global protein expression profiles of tissues, cultures, and microbiomes. Many non-distributed computer algorithms have been developed for users to process proteomics data on their local computers. However, the amount of data acquired in a typical proteomics study has grown rapidly in recent years, owing to the increasing throughput of mass spectrometry and the expanding scale of study designs. This presents a big data challenge for researchers to process proteomics data in a timely manner. To overcome this challenge, we developed a cloud-based parallel computing application to offer end-to-end proteomics data analysis software as a service (SaaS). A web interface was provided to users to upload mass spectrometry-based proteomics data, configure parameters, submit jobs, and monitor job status. The data processing was distributed across multiple nodes in a supercomputer to achieve scalability for large datasets. Our study demonstrated SaaS for proteomics as a viable solution for the community to scale up the data processing using cloud computing.

This application is available online at https://sipros.oscer.ou.edu/ or https://sipros.unt.edu for free use. The source code is available at https://github.com/Biocomputing-Research-Group/CloudProteoAnalyzer under the GPL version 3.0 license.}, } @article {pmid38491365, year = {2024}, author = {Clements, J and Goina, C and Hubbard, PM and Kawase, T and Olbris, DJ and Otsuna, H and Svirskas, R and Rokicki, K}, title = {NeuronBridge: an intuitive web application for neuronal morphology search across large data sets.}, journal = {BMC bioinformatics}, volume = {25}, number = {1}, pages = {114}, pmid = {38491365}, issn = {1471-2105}, mesh = {Animals ; *Software ; Neurons ; *Connectome ; Microscopy, Electron ; Drosophila ; }, abstract = {BACKGROUND: Neuroscience research in Drosophila is benefiting from large-scale connectomics efforts using electron microscopy (EM) to reveal all the neurons in a brain and their connections. To exploit this knowledge base, researchers relate a connectome's structure to neuronal function, often by studying individual neuron cell types. Vast libraries of fly driver lines expressing fluorescent reporter genes in sets of neurons have been created and imaged using confocal light microscopy (LM), enabling the targeting of neurons for experimentation. However, creating a fly line for driving gene expression within a single neuron found in an EM connectome remains a challenge, as it typically requires identifying a pair of driver lines where only the neuron of interest is expressed in both. This task and other emerging scientific workflows require finding similar neurons across large data sets imaged using different modalities.

RESULTS: Here, we present NeuronBridge, a web application for easily and rapidly finding putative morphological matches between large data sets of neurons imaged using different modalities. We describe the functionality and construction of the NeuronBridge service, including its user-friendly graphical user interface (GUI), extensible data model, serverless cloud architecture, and massively parallel image search engine.

CONCLUSIONS: NeuronBridge fills a critical gap in the Drosophila research workflow and is used by hundreds of neuroscience researchers around the world. We offer our software code, open APIs, and processed data sets for integration and reuse, and provide the application as a service at http://neuronbridge.janelia.org .}, } @article {pmid38475170, year = {2024}, author = {Tripathi, A and Waqas, A and Venkatesan, K and Yilmaz, Y and Rasool, G}, title = {Building Flexible, Scalable, and Machine Learning-Ready Multimodal Oncology Datasets.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38475170}, issn = {1424-8220}, support = {2234836//National Science Foundation/ ; 2234468//National Science Foundation/ ; 1903466//National Science Foundation/ ; }, mesh = {Humans ; Reproducibility of Results ; *Neoplasms ; }, abstract = {The advancements in data acquisition, storage, and processing techniques have resulted in the rapid growth of heterogeneous medical data. Integrating radiological scans, histopathology images, and molecular information with clinical data is essential for developing a holistic understanding of the disease and optimizing treatment. The need for integrating data from multiple sources is further pronounced in complex diseases such as cancer for enabling precision medicine and personalized treatments. This work proposes Multimodal Integration of Oncology Data System (MINDS)-a flexible, scalable, and cost-effective metadata framework for efficiently fusing disparate data from public sources such as the Cancer Research Data Commons (CRDC) into an interconnected, patient-centric framework. MINDS consolidates over 41,000 cases from across repositories while achieving a high compression ratio relative to the 3.78 PB source data size. It offers sub-5-s query response times for interactive exploration. MINDS offers an interface for exploring relationships across data types and building cohorts for developing large-scale multimodal machine learning models. By harmonizing multimodal data, MINDS aims to potentially empower researchers with greater analytical ability to uncover diagnostic and prognostic insights and enable evidence-based personalized care. MINDS tracks granular end-to-end data provenance, ensuring reproducibility and transparency. The cloud-native architecture of MINDS can handle exponential data growth in a secure, cost-optimized manner while ensuring substantial storage optimization, replication avoidance, and dynamic access capabilities. Auto-scaling, access controls, and other mechanisms guarantee pipelines' scalability and security. MINDS overcomes the limitations of existing biomedical data silos via an interoperable metadata-driven approach that represents a pivotal step toward the future of oncology data integration.}, } @article {pmid38475051, year = {2024}, author = {Gaba, P and Raw, RS and Kaiwartya, O and Aljaidi, M}, title = {B-SAFE: Blockchain-Enabled Security Architecture for Connected Vehicle Fog Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38475051}, issn = {1424-8220}, support = {00//nottingham trent university/ ; }, abstract = {Vehicles are no longer stand-alone mechanical entities due to the advancements in vehicle-to-vehicle (V2V) and vehicle-to-infrastructure (V2I) communication-centric Internet of Connected Vehicles (IoV) frameworks. However, the advancement in connected vehicles leads to another serious security threat, online vehicle hijacking, where the steering control of vehicles can be hacked online. The feasibility of traditional security solutions in IoV environments is very limited, considering the intermittent network connectivity to cloud servers and vehicle-centric computing capability constraints. In this context, this paper presents a Blockchain-enabled Security Architecture for a connected vehicular Fog networking Environment (B-SAFE). Firstly, blockchain security and vehicular fog networking are introduced as preliminaries of the framework. Secondly, a three-layer architecture of B-SAFE is presented, focusing on vehicular communication, blockchain at fog nodes, and the cloud as trust and reward management for vehicles. Thirdly, details of the blockchain implementation at fog nodes is presented, along with a flowchart and algorithm. The performance of the evaluation of the proposed framework B-SAFE attests to the benefits in terms of trust, reward points, and threshold calculation.}, } @article {pmid38474954, year = {2024}, author = {Vercheval, N and Royen, R and Munteanu, A and Pižurica, A}, title = {PCGen: A Fully Parallelizable Point Cloud Generative Model.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38474954}, issn = {1424-8220}, support = {174B0911//Flanders AI Research Programme/ ; G094122N//Fonds Wetenschappelijk Onderzoek (FWO) project/ ; }, abstract = {Generative models have the potential to revolutionize 3D extended reality. A primary obstacle is that augmented and virtual reality need real-time computing. Current state-of-the-art point cloud random generation methods are not fast enough for these applications. We introduce a vector-quantized variational autoencoder model (VQVAE) that can synthesize high-quality point clouds in milliseconds. Unlike previous work in VQVAEs, our model offers a compact sample representation suitable for conditional generation and data exploration with potential applications in rapid prototyping. We achieve this result by combining architectural improvements with an innovative approach for probabilistic random generation. First, we rethink current parallel point cloud autoencoder structures, and we propose several solutions to improve robustness, efficiency and reconstruction quality. Notable contributions in the decoder architecture include an innovative computation layer to process the shape semantic information, an attention mechanism that helps the model focus on different areas and a filter to cover possible sampling errors. Secondly, we introduce a parallel sampling strategy for VQVAE models consisting of a double encoding system, where a variational autoencoder learns how to generate the complex discrete distribution of the VQVAE, not only allowing quick inference but also describing the shape with a few global variables. We compare the proposed decoder and our VQVAE model with established and concurrent work, and we prove, one by one, the validity of the single contributions.}, } @article {pmid38474952, year = {2024}, author = {AlSaleh, I and Al-Samawi, A and Nissirat, L}, title = {Novel Machine Learning Approach for DDoS Cloud Detection: Bayesian-Based CNN and Data Fusion Enhancements.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {5}, pages = {}, pmid = {38474952}, issn = {1424-8220}, support = {GRANT5,340//King Faisal University/ ; }, abstract = {Cloud computing has revolutionized the information technology landscape, offering businesses the flexibility to adapt to diverse business models without the need for costly on-site servers and network infrastructure. A recent survey reveals that 95% of enterprises have already embraced cloud technology, with 79% of their workloads migrating to cloud environments. However, the deployment of cloud technology introduces significant cybersecurity risks, including network security vulnerabilities, data access control challenges, and the ever-looming threat of cyber-attacks such as Distributed Denial of Service (DDoS) attacks, which pose substantial risks to both cloud and network security. While Intrusion Detection Systems (IDS) have traditionally been employed for DDoS attack detection, prior studies have been constrained by various limitations. In response to these challenges, we present an innovative machine learning approach for DDoS cloud detection, known as the Bayesian-based Convolutional Neural Network (BaysCNN) model. Leveraging the CICDDoS2019 dataset, which encompasses 88 features, we employ Principal Component Analysis (PCA) for dimensionality reduction. Our BaysCNN model comprises 19 layers of analysis, forming the basis for training and validation. Our experimental findings conclusively demonstrate that the BaysCNN model significantly enhances the accuracy of DDoS cloud detection, achieving an impressive average accuracy rate of 99.66% across 13 multi-class attacks. To further elevate the model's performance, we introduce the Data Fusion BaysFusCNN approach, encompassing 27 layers. By leveraging Bayesian methods to estimate uncertainties and integrating features from multiple sources, this approach attains an even higher average accuracy of 99.79% across the same 13 multi-class attacks. Our proposed methodology not only offers valuable insights for the development of robust machine learning-based intrusion detection systems but also enhances the reliability and scalability of IDS in cloud computing environments. This empowers organizations to proactively mitigate security risks and fortify their defenses against malicious cyber-attacks.}, } @article {pmid38469580, year = {2024}, author = {Yakubu, B and Appiah, EM and Adu, AF}, title = {Pangenome Analysis of Helicobacter pylori Isolates from Selected Areas of Africa Indicated Diverse Antibiotic Resistance and Virulence Genes.}, journal = {International journal of genomics}, volume = {2024}, number = {}, pages = {5536117}, pmid = {38469580}, issn = {2314-4378}, abstract = {The challenge facing Helicobacter pylori (H. pylori) infection management in some parts of Africa is the evolution of drug-resistant species, the lack of gold standard in diagnostic methods, and the ineffectiveness of current vaccines against the bacteria. It is being established that even though clinical consequences linked to the bacteria vary geographically, there is rather a generic approach to treatment. This situation has remained problematic in the successful fight against the bacteria in parts of Africa. As a result, this study compared the genomes of selected H. pylori isolates from selected areas of Africa and evaluated their virulence and antibiotic drug resistance, those that are highly pathogenic and are associated with specific clinical outcomes and those that are less virulent and rarely associated with clinical outcomes. 146 genomes of H. pylori isolated from selected locations of Africa were sampled, and bioinformatic tools such as Abricate, CARD RGI, MLST, Prokka, Roary, Phandango, Google Sheets, and iTOLS were used to compare the isolates and their antibiotic resistance or susceptibility. Over 20 k virulence and AMR genes were observed. About 95% of the isolates were genetically diverse, 90% of the isolates harbored shell genes, and 50% harbored cloud and core genes. Some isolates did not retain the cagA and vacA genes. Clarithromycin, metronidazole, amoxicillin, and tinidazole were resistant to most AMR genes (vacA, cagA, oip, and bab). Conclusion. This study found both virulence and AMR genes in all H. pylori strains in all the selected geographies around Africa with differing quantities. MLST, Pangenome, and ORF analyses showed disparities among the isolates. This in general could imply diversities in terms of genetics, evolution, and protein production. Therefore, generic administration of antibiotics such as clarithromycin, amoxicillin, and erythromycin as treatment methods in the African subregion could be contributing to the spread of the bacterium's antibiotic resistance.}, } @article {pmid38468957, year = {2024}, author = {Tripathy, SS and Bebortta, S and Chowdhary, CL and Mukherjee, T and Kim, S and Shafi, J and Ijaz, MF}, title = {FedHealthFog: A federated learning-enabled approach towards healthcare analytics over fog computing platform.}, journal = {Heliyon}, volume = {10}, number = {5}, pages = {e26416}, pmid = {38468957}, issn = {2405-8440}, abstract = {The emergence of federated learning (FL) technique in fog-enabled healthcare system has leveraged enhanced privacy towards safeguarding sensitive patient information over heterogeneous computing platforms. In this paper, we introduce the FedHealthFog framework, which was meticulously developed to overcome the difficulties of distributed learning in resource-constrained IoT-enabled healthcare systems, particularly those sensitive to delays and energy efficiency. Conventional federated learning approaches face challenges stemming from substantial compute requirements and significant communication costs. This is primarily due to their reliance on a singular server for the aggregation of global data, which results in inefficient training models. We present a transformational approach to address these problems by elevating strategically placed fog nodes to the position of local aggregators within the federated learning architecture. A sophisticated greedy heuristic technique is used to optimize the choice of a fog node as the global aggregator in each communication cycle between edge devices and the cloud. The FedHealthFog system notably accounts for drop in communication latency of 87.01%, 26.90%, and 71.74%, and energy consumption of 57.98%, 34.36%, and 35.37% respectively, for three benchmark algorithms analyzed in this study. The effectiveness of FedHealthFog is strongly supported by outcomes of our experiments compared to cutting-edge alternatives while simultaneously reducing number of global aggregation cycles. These findings highlight FedHealthFog's potential to transform federated learning in resource-constrained IoT environments for delay-sensitive applications.}, } @article {pmid38466691, year = {2024}, author = {Shafi, I and Din, S and Farooq, S and Díez, IT and Breñosa, J and Espinosa, JCM and Ashraf, I}, title = {Design and development of patient health tracking, monitoring and big data storage using Internet of Things and real time cloud computing.}, journal = {PloS one}, volume = {19}, number = {3}, pages = {e0298582}, pmid = {38466691}, issn = {1932-6203}, mesh = {Humans ; *Cloud Computing ; *Internet of Things ; Pandemics ; Monitoring, Physiologic ; Information Storage and Retrieval ; }, abstract = {With the outbreak of the COVID-19 pandemic, social isolation and quarantine have become commonplace across the world. IoT health monitoring solutions eliminate the need for regular doctor visits and interactions among patients and medical personnel. Many patients in wards or intensive care units require continuous monitoring of their health. Continuous patient monitoring is a hectic practice in hospitals with limited staff; in a pandemic situation like COVID-19, it becomes much more difficult practice when hospitals are working at full capacity and there is still a risk of medical workers being infected. In this study, we propose an Internet of Things (IoT)-based patient health monitoring system that collects real-time data on important health indicators such as pulse rate, blood oxygen saturation, and body temperature but can be expanded to include more parameters. Our system is comprised of a hardware component that collects and transmits data from sensors to a cloud-based storage system, where it can be accessed and analyzed by healthcare specialists. The ESP-32 microcontroller interfaces with the multiple sensors and wirelessly transmits the collected data to the cloud storage system. A pulse oximeter is utilized in our system to measure blood oxygen saturation and body temperature, as well as a heart rate monitor to measure pulse rate. A web-based interface is also implemented, allowing healthcare practitioners to access and visualize the collected data in real-time, making remote patient monitoring easier. Overall, our IoT-based patient health monitoring system represents a significant advancement in remote patient monitoring, allowing healthcare practitioners to access real-time data on important health metrics and detect potential health issues before they escalate.}, } @article {pmid38460568, year = {2024}, author = {Ghiandoni, GM and Evertsson, E and Riley, DJ and Tyrchan, C and Rathi, PC}, title = {Augmenting DMTA using predictive AI modelling at AstraZeneca.}, journal = {Drug discovery today}, volume = {29}, number = {4}, pages = {103945}, doi = {10.1016/j.drudis.2024.103945}, pmid = {38460568}, issn = {1878-5832}, mesh = {*Artificial Intelligence ; *Biological Assay ; Drug Discovery ; }, abstract = {Design-Make-Test-Analyse (DMTA) is the discovery cycle through which molecules are designed, synthesised, and assayed to produce data that in turn are analysed to inform the next iteration. The process is repeated until viable drug candidates are identified, often requiring many cycles before reaching a sweet spot. The advent of artificial intelligence (AI) and cloud computing presents an opportunity to innovate drug discovery to reduce the number of cycles needed to yield a candidate. Here, we present the Predictive Insight Platform (PIP), a cloud-native modelling platform developed at AstraZeneca. The impact of PIP in each step of DMTA, as well as its architecture, integration, and usage, are discussed and used to provide insights into the future of drug discovery.}, } @article {pmid38455562, year = {2024}, author = {Gokool, S and Mahomed, M and Brewer, K and Naiken, V and Clulow, A and Sibanda, M and Mabhaudhi, T}, title = {Crop mapping in smallholder farms using unmanned aerial vehicle imagery and geospatial cloud computing infrastructure.}, journal = {Heliyon}, volume = {10}, number = {5}, pages = {e26913}, pmid = {38455562}, issn = {2405-8440}, abstract = {Smallholder farms are major contributors to agricultural production, food security, and socio-economic growth in many developing countries. However, they generally lack the resources to fully maximize their potential. Subsequently they require innovative, evidence-based and lower-cost solutions to optimize their productivity. Recently, precision agricultural practices facilitated by unmanned aerial vehicles (UAVs) have gained traction in the agricultural sector and have great potential for smallholder farm applications. Furthermore, advances in geospatial cloud computing have opened new and exciting possibilities in the remote sensing arena. In light of these recent developments, the focus of this study was to explore and demonstrate the utility of using the advanced image processing capabilities of the Google Earth Engine (GEE) geospatial cloud computing platform to process and analyse a very high spatial resolution multispectral UAV image for mapping land use land cover (LULC) within smallholder farms. The results showed that LULC could be mapped at a 0.50 m spatial resolution with an overall accuracy of 91%. Overall, we found GEE to be an extremely useful platform for conducting advanced image analysis on UAV imagery and rapid communication of results. Notwithstanding the limitations of the study, the findings presented herein are quite promising and clearly demonstrate how modern agricultural practices can be implemented to facilitate improved agricultural management in smallholder farmers.}, } @article {pmid38453988, year = {2024}, author = {Inam, S and Kanwal, S and Firdous, R and Hajjej, F}, title = {Blockchain based medical image encryption using Arnold's cat map in a cloud environment.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {5678}, pmid = {38453988}, issn = {2045-2322}, abstract = {Improved software for processing medical images has inspired tremendous interest in modern medicine in recent years. Modern healthcare equipment generates huge amounts of data, such as scanned medical images and computerized patient information, which must be secured for future use. Diversity in the healthcare industry, namely in the form of medical data, is one of the largest challenges for researchers. Cloud environment and the Block chain technology have both demonstrated their own use. The purpose of this study is to combine both technologies for safe and secure transaction. Storing or sending medical data through public clouds exposes information into potential eavesdropping, data breaches and unauthorized access. Encrypting data before transmission is crucial to mitigate these security risks. As a result, a Blockchain based Chaotic Arnold's cat map Encryption Scheme (BCAES) is proposed in this paper. The BCAES first encrypts the image using Arnold's cat map encryption scheme and then sends the encrypted image into Cloud Server and stores the signed document of plain image into blockchain. As blockchain is often considered more secure due to its distributed nature and consensus mechanism, data receiver will ensure data integrity and authenticity of image after decryption using signed document stored into the blockchain. Various analysis techniques have been used to examine the proposed scheme. The results of analysis like key sensitivity analysis, key space analysis, Information Entropy, histogram correlation of adjacent pixels, Number of Pixel Change Rate, Peak Signal Noise Ratio, Unified Average Changing Intensity, and similarity analysis like Mean Square Error, and Structural Similarity Index Measure illustrated that our proposed scheme is an efficient encryption scheme as compared to some recent literature. Our current achievements surpass all previous endeavors, setting a new standard of excellence.}, } @article {pmid38452470, year = {2024}, author = {Zhong, C and Darbandi, M and Nassr, M and Latifian, A and Hosseinzadeh, M and Jafari Navimipour, N}, title = {A new cloud-based method for composition of healthcare services using deep reinforcement learning and Kalman filtering.}, journal = {Computers in biology and medicine}, volume = {172}, number = {}, pages = {108152}, doi = {10.1016/j.compbiomed.2024.108152}, pmid = {38452470}, issn = {1879-0534}, mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; *Delivery of Health Care ; }, abstract = {Healthcare has significantly contributed to the well-being of individuals around the globe; nevertheless, further benefits could be derived from a more streamlined healthcare system without incurring additional costs. Recently, the main attributes of cloud computing, such as on-demand service, high scalability, and virtualization, have brought many benefits across many areas, especially in medical services. It is considered an important element in healthcare services, enhancing the performance and efficacy of the services. The current state of the healthcare industry requires the supply of healthcare products and services, increasing its viability for everyone involved. Developing new approaches for discovering and selecting healthcare services in the cloud has become more critical due to the rising popularity of these kinds of services. As a result of the diverse array of healthcare services, service composition enables the execution of intricate operations by integrating multiple services' functionalities into a single procedure. However, many methods in this field encounter several issues, such as high energy consumption, cost, and response time. This article introduces a novel layered method for selecting and evaluating healthcare services to find optimal service selection and composition solutions based on Deep Reinforcement Learning (Deep RL), Kalman filtering, and repeated training, addressing the aforementioned issues. The results revealed that the proposed method has achieved acceptable results in terms of availability, reliability, energy consumption, and response time when compared to other methods.}, } @article {pmid38449567, year = {2024}, author = {Wang, J and Yin, J and Nguyen, MH and Wang, J and Xu, W}, title = {Editorial: Big scientific data analytics on HPC and cloud.}, journal = {Frontiers in big data}, volume = {7}, number = {}, pages = {1353988}, doi = {10.3389/fdata.2024.1353988}, pmid = {38449567}, issn = {2624-909X}, } @article {pmid38449564, year = {2024}, author = {Saad, M and Enam, RN and Qureshi, R}, title = {Optimizing multi-objective task scheduling in fog computing with GA-PSO algorithm for big data application.}, journal = {Frontiers in big data}, volume = {7}, number = {}, pages = {1358486}, pmid = {38449564}, issn = {2624-909X}, abstract = {As the volume and velocity of Big Data continue to grow, traditional cloud computing approaches struggle to meet the demands of real-time processing and low latency. Fog computing, with its distributed network of edge devices, emerges as a compelling solution. However, efficient task scheduling in fog computing remains a challenge due to its inherently multi-objective nature, balancing factors like execution time, response time, and resource utilization. This paper proposes a hybrid Genetic Algorithm (GA)-Particle Swarm Optimization (PSO) algorithm to optimize multi-objective task scheduling in fog computing environments. The hybrid approach combines the strengths of GA and PSO, achieving effective exploration and exploitation of the search space, leading to improved performance compared to traditional single-algorithm approaches. The proposed hybrid algorithm results improved the execution time by 85.68% when compared with GA algorithm, by 84% when compared with Hybrid PWOA and by 51.03% when compared with PSO algorithm as well as it improved the response time by 67.28% when compared with GA algorithm, by 54.24% when compared with Hybrid PWOA and by 75.40% when compared with PSO algorithm as well as it improved the completion time by 68.69% when compared with GA algorithm, by 98.91% when compared with Hybrid PWOA and by 75.90% when compared with PSO algorithm when various tasks inputs are given. The proposed hybrid algorithm results also improved the execution time by 84.87% when compared with GA algorithm, by 88.64% when compared with Hybrid PWOA and by 85.07% when compared with PSO algorithm it improved the response time by 65.92% when compared with GA algorithm, by 80.51% when compared with Hybrid PWOA and by 85.26% when compared with PSO algorithm as well as it improved the completion time by 67.60% when compared with GA algorithm, by 81.34% when compared with Hybrid PWOA and by 85.23% when compared with PSO algorithm when various fog nodes are given.}, } @article {pmid38435622, year = {2024}, author = {Mehmood, T and Latif, S and Jamail, NSM and Malik, A and Latif, R}, title = {LSTMDD: an optimized LSTM-based drift detector for concept drift in dynamic cloud computing.}, journal = {PeerJ. Computer science}, volume = {10}, number = {}, pages = {e1827}, pmid = {38435622}, issn = {2376-5992}, abstract = {This study aims to investigate the problem of concept drift in cloud computing and emphasizes the importance of early detection for enabling optimum resource utilization and offering an effective solution. The analysis includes synthetic and real-world cloud datasets, stressing the need for appropriate drift detectors tailored to the cloud domain. A modified version of Long Short-Term Memory (LSTM) called the LSTM Drift Detector (LSTMDD) is proposed and compared with other top drift detection techniques using prediction error as the primary evaluation metric. LSTMDD is optimized to improve performance in detecting anomalies in non-Gaussian distributed cloud environments. The experiments show that LSTMDD outperforms other methods for gradual and sudden drift in the cloud domain. The findings suggest that machine learning techniques such as LSTMDD could be a promising approach to addressing the problem of concept drift in cloud computing, leading to more efficient resource allocation and improved performance.}, } @article {pmid38429324, year = {2024}, author = {Yin, X and Fang, W and Liu, Z and Liu, D}, title = {A novel multi-scale CNN and Bi-LSTM arbitration dense network model for low-rate DDoS attack detection.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {5111}, pmid = {38429324}, issn = {2045-2322}, support = {2021GX056//the Key Technologies R\&D Program of Weifang/ ; 2023GX063//the Key Technologies R\&D Program of Weifang/ ; KJRC2021002//the Foundation for the Talents by the Weifang University of Science and Technology/ ; ZR2021MF086//the Natural Science Foundation of Shandong Province/ ; 2019GNC106034//the Key R\&D Program of Shandong Province under Grant/ ; }, abstract = {Low-rate distributed denial of service attacks, as known as LDDoS attacks, pose the notorious security risks in cloud computing network. They overload the cloud servers and degrade network service quality with the stealthy strategy. Furthermore, this kind of small ratio and pulse-like abnormal traffic leads to a serious data scale problem. As a result, the existing models for detecting minority and adversary LDDoS attacks are insufficient in both detection accuracy and time consumption. This paper proposes a novel multi-scale Convolutional Neural Networks (CNN) and bidirectional Long-short Term Memory (bi-LSTM) arbitration dense network model (called MSCBL-ADN) for learning and detecting LDDoS attack behaviors under the condition of limited dataset and time consumption. The MSCBL-ADN incorporates CNN for preliminary spatial feature extraction and embedding-based bi-LSTM for time relationship extraction. And then, it employs arbitration network to re-weigh feature importance for higher accuracy. At last, it uses 2-block dense connection network to perform final classification. The experimental results conducted on popular ISCX-2016-SlowDos dataset have demonstrated that the proposed MSCBL-ADN model has a significant improvement with high detection accuracy and superior time performance over the state-of-the-art models.}, } @article {pmid38421498, year = {2024}, author = {Mahato, T and Parida, BR and Bar, S}, title = {Assessing tea plantations biophysical and biochemical characteristics in Northeast India using satellite data.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {3}, pages = {327}, pmid = {38421498}, issn = {1573-2959}, support = {F.4-5(209-FRP)/2015/BSR//University Grants Commission/ ; }, mesh = {*Environmental Monitoring ; *Camellia sinensis ; India ; Nitrogen ; Tea ; }, abstract = {Despite advancements in using multi-temporal satellite data to assess long-term changes in Northeast India's tea plantations, a research gap exists in understanding the intricate interplay between biophysical and biochemical characteristics. Further exploration is crucial for precise, sustainable monitoring and management. In this study, satellite-derived vegetation indices and near-proximal sensor data were deployed to deduce various physico-chemical characteristics and to evaluate the health conditions of tea plantations in northeast India. The districts, such as Sonitpur, Jorhat, Sibsagar, Dibrugarh, and Tinsukia in Assam were selected, which are the major contributors to the tea industry in India. The Sentinel-2A (2022) data was processed in the Google Earth Engine (GEE) cloud platform and utilized for analyzing tea plantations biochemical and biophysical properties. Leaf chlorophyll (Cab) and nitrogen contents are determined using the Normalized Area Over Reflectance Curve (NAOC) index and flavanol contents, respectively. Biophysical and biochemical parameters of the tea assessed during the spring season (March-April) 2022 revealed that tea plantations located in Tinsukia and Dibrugarh were much healthier than the other districts in Assam which are evident from satellite-derived Enhanced Vegetation Index (EVI), Modified Soil Adjusted Vegetation Index (MSAVI), Leaf Area Index (LAI), and Fraction of Absorbed Photosynthetically Active Radiation (fPAR), including the Cab and nitrogen contents. The Cab of healthy tea plants varied from 25 to 35 µg/cm[2]. Pearson correlation among satellite-derived Cab and nitrogen with field measurements showed R[2] of 0.61-0.62 (p-value < 0.001). This study offered vital information about land alternations and tea health conditions, which can be crucial for conservation, monitoring, and management practices.}, } @article {pmid38420486, year = {2024}, author = {Liu, X and Wider, W and Fauzi, MA and Jiang, L and Udang, LN and Hossain, SFA}, title = {The evolution of smart hotels: A bibliometric review of the past, present and future trends.}, journal = {Heliyon}, volume = {10}, number = {4}, pages = {e26472}, pmid = {38420486}, issn = {2405-8440}, abstract = {This study provides a bibliometric analysis of smart hotel research, drawing from 613 publications in the Web of Science (WoS) database to examine scholarly trends and developments in this dynamic field. Smart hotels, characterized by integrating advanced technologies such as AI, IoT, cloud computing, and big data, aim to redefine customer experiences and operational efficiency. Utilizing co-citation and co-word analysis techniques, the research delves into the depth of literature from past to future trends. In co-citation analysis, clusters including "Sustainable Hotel and Green Hotel", "Theories Integration in Smart Hotel Research", and "Consumers' Decisions about Green Hotels" underscore the pivotal areas of past and current research. Co-word analysis further reveals emergent trend clusters: "The New Era of Sustainable Tourism", "Elevating Standards and Guest Loyalty", and "Hotels' New Sustainable Blueprint in Modern Travel". These clusters reflect the industry's evolving focus on sustainability and technology-enhanced guest experiences. Theoretically, this research bridges gaps in smart hotel literature, proposing new frameworks for understanding customer decisions amid technological advancements and environmental responsibilities. Practically, it offers valuable insights for hotel managers, guiding technology integration strategies for enhanced efficiency and customer loyalty while underscoring the critical role of green strategies and sustainability.}, } @article {pmid38420393, year = {2024}, author = {Mukred, M and Mokhtar, UA and Hawash, B and AlSalman, H and Zohaib, M}, title = {The adoption and use of learning analytics tools to improve decision making in higher learning institutions: An extension of technology acceptance model.}, journal = {Heliyon}, volume = {10}, number = {4}, pages = {e26315}, pmid = {38420393}, issn = {2405-8440}, abstract = {Learning Analytics Tools (LATs) can be used for informed decision-making regarding teaching strategies and their continuous enhancement. Therefore, LATs must be adopted in higher learning institutions, but several factors hinder its implementation, primarily due to the lack of an implementation model. Therefore, in this study, the focus is directed towards examining LATs adoption in Higher Learning Institutions (HLIs), with emphasis on the determinants of the adoption process. The study mainly aims to design a model of LAT adoption and use it in the above context to improve the institutions' decision-making and accordingly, the study adopted an extended version of Technology Acceptance Model (TAM) as the underpinning theory. Five experts validated the employed survey instrument, and 500 questionnaire copies were distributed through e-mails, from which 275 copies were retrieved from Saudi employees working at public HLIs. Data gathered was exposed to Partial Least Square-Structural Equation Modeling (PLS-SEM) for analysis and to test the proposed conceptual model. Based on the findings, the perceived usefulness of LAT plays a significant role as a determinant of its adoption. Other variables include top management support, financial support, and the government's role in LATs acceptance and adoption among HLIs. The findings also supported the contribution of LAT adoption and acceptance towards making informed decisions and highlighted the need for big data facility and cloud computing ability towards LATs usefulness. The findings have significant implications towards LATs implementation success among HLIs, providing clear insights into the factors that can enhance its adoption and acceptance. They also lay the basis for future studies in the area to validate further the effect of LATs on decision-making among HLIs institutions. Furthermore, the obtained findings are expected to serve as practical implications for policy makers and educational leaders in their objective to implement LAT using a multi-layered method that considers other aspects in addition to the perceptions of the individual user.}, } @article {pmid38409183, year = {2024}, author = {Grossman, RL and Boyles, RR and Davis-Dusenbery, BN and Haddock, A and Heath, AP and O'Connor, BD and Resnick, AC and Taylor, DM and Ahalt, S}, title = {A Framework for the Interoperability of Cloud Platforms: Towards FAIR Data in SAFE Environments.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {241}, pmid = {38409183}, issn = {2052-4463}, support = {HHSN261201400008C/CA/NCI NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Electronic Health Records ; }, abstract = {As the number of cloud platforms supporting scientific research grows, there is an increasing need to support interoperability between two or more cloud platforms. A well accepted core concept is to make data in cloud platforms Findable, Accessible, Interoperable and Reusable (FAIR). We introduce a companion concept that applies to cloud-based computing environments that we call a Secure and Authorized FAIR Environment (SAFE). SAFE environments require data and platform governance structures and are designed to support the interoperability of sensitive or controlled access data, such as biomedical data. A SAFE environment is a cloud platform that has been approved through a defined data and platform governance process as authorized to hold data from another cloud platform and exposes appropriate APIs for the two platforms to interoperate.}, } @article {pmid38404043, year = {2024}, author = {Rusinovich, Y and Rusinovich, V and Buhayenka, A and Liashko, V and Sabanov, A and Holstein, DJF and Aldmour, S and Doss, M and Branzan, D}, title = {Classification of anatomic patterns of peripheral artery disease with automated machine learning (AutoML).}, journal = {Vascular}, volume = {}, number = {}, pages = {17085381241236571}, doi = {10.1177/17085381241236571}, pmid = {38404043}, issn = {1708-539X}, abstract = {AIM: The aim of this study was to investigate the potential of novel automated machine learning (AutoML) in vascular medicine by developing a discriminative artificial intelligence (AI) model for the classification of anatomical patterns of peripheral artery disease (PAD).

MATERIAL AND METHODS: Random open-source angiograms of lower limbs were collected using a web-indexed search. An experienced researcher in vascular medicine labelled the angiograms according to the most applicable grade of femoropopliteal disease in the Global Limb Anatomic Staging System (GLASS). An AutoML model was trained using the Vertex AI (Google Cloud) platform to classify the angiograms according to the GLASS grade with a multi-label algorithm. Following deployment, we conducted a test using 25 random angiograms (five from each GLASS grade). Model tuning through incremental training by introducing new angiograms was executed to the limit of the allocated quota following the initial evaluation to determine its effect on the software's performance.

RESULTS: We collected 323 angiograms to create the AutoML model. Among these, 80 angiograms were labelled as grade 0 of femoropopliteal disease in GLASS, 114 as grade 1, 34 as grade 2, 25 as grade 3 and 70 as grade 4. After 4.5 h of training, the AI model was deployed. The AI self-assessed average precision was 0.77 (0 is minimal and 1 is maximal). During the testing phase, the AI model successfully determined the GLASS grade in 100% of the cases. The agreement with the researcher was almost perfect with the number of observed agreements being 22 (88%), Kappa = 0.85 (95% CI 0.69-1.0). The best results were achieved in predicting GLASS grade 0 and grade 4 (initial precision: 0.76 and 0.84). However, the AI model exhibited poorer results in classifying GLASS grade 3 (initial precision: 0.2) compared to other grades. Disagreements between the AI and the researcher were associated with the low resolution of the test images. Incremental training expanded the initial dataset by 23% to a total of 417 images, which improved the model's average precision by 11% to 0.86.

CONCLUSION: After a brief training period with a limited dataset, AutoML has demonstrated its potential in identifying and classifying the anatomical patterns of PAD, operating unhindered by the factors that can affect human analysts, such as fatigue or lack of experience. This technology bears the potential to revolutionize outcome prediction and standardize evidence-based revascularization strategies for patients with PAD, leveraging its adaptability and ability to continuously improve with additional data. The pursuit of further research in AutoML within the field of vascular medicine is both promising and warranted. However, it necessitates additional financial support to realize its full potential.}, } @article {pmid38403304, year = {2024}, author = {Wu, ZF and Yang, SJ and Yang, YQ and Wang, ZQ and Ai, L and Zhu, GH and Zhu, WF}, title = {[Current situation and development trend of digital traditional Chinese medicine pharmacy].}, journal = {Zhongguo Zhong yao za zhi = Zhongguo zhongyao zazhi = China journal of Chinese materia medica}, volume = {49}, number = {2}, pages = {285-293}, doi = {10.19540/j.cnki.cjcmm.20230904.301}, pmid = {38403304}, issn = {1001-5302}, mesh = {Humans ; Medicine, Chinese Traditional ; Artificial Intelligence ; Technology, Pharmaceutical ; Drug Industry ; *Pharmacy ; *Drugs, Chinese Herbal ; }, abstract = {The 21st century is a highly information-driven era, and traditional Chinese medicine(TCM) pharmacy is also moving towards digitization and informatization. New technologies such as artificial intelligence and big data with information technology as the core are being integrated into various aspects of drug research, manufacturing, evaluation, and application, promoting interaction between these stages and improving the quality and efficiency of TCM preparations. This, in turn, provides better healthcare services to the general population. The deep integration of emerging technologies such as artificial intelligence, big data, and cloud computing with the TCM pharmaceutical industry will innovate TCM pharmaceutical technology, accelerate the research and industrialization process of TCM pharmacy, provide cutting-edge technological support to the global scientific community, boost the efficiency of the TCM industry, and promote economic and social development. Drawing from recent developments in TCM pharmacy in China, this paper discussed the current research status and future trends in digital TCM pharmacy, aiming to provide a reference for future research in this field.}, } @article {pmid38400504, year = {2024}, author = {Alasmary, H}, title = {ScalableDigitalHealth (SDH): An IoT-Based Scalable Framework for Remote Patient Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400504}, issn = {1424-8220}, support = {The authors extend their appreciation to the Deanship of Scientific Research at King Khalid University for funding this work through large group Research Project under grant number RGP2/312/44//King Khalid University/ ; }, mesh = {Aged ; Humans ; *Awareness ; *Benchmarking ; Blood Pressure ; Body Temperature ; Monitoring, Physiologic ; }, abstract = {Addressing the increasing demand for remote patient monitoring, especially among the elderly and mobility-impaired, this study proposes the "ScalableDigitalHealth" (SDH) framework. The framework integrates smart digital health solutions with latency-aware edge computing autoscaling, providing a novel approach to remote patient monitoring. By leveraging IoT technology and application autoscaling, the "SDH" enables the real-time tracking of critical health parameters, such as ECG, body temperature, blood pressure, and oxygen saturation. These vital metrics are efficiently transmitted in real time to AWS cloud storage through a layered networking architecture. The contributions are two-fold: (1) establishing real-time remote patient monitoring and (2) developing a scalable architecture that features latency-aware horizontal pod autoscaling for containerized healthcare applications. The architecture incorporates a scalable IoT-based architecture and an innovative microservice autoscaling strategy in edge computing, driven by dynamic latency thresholds and enhanced by the integration of custom metrics. This work ensures heightened accessibility, cost-efficiency, and rapid responsiveness to patient needs, marking a significant leap forward in the field. By dynamically adjusting pod numbers based on latency, the system optimizes system responsiveness, particularly in edge computing's proximity-based processing. This innovative fusion of technologies not only revolutionizes remote healthcare delivery but also enhances Kubernetes performance, preventing unresponsiveness during high usage.}, } @article {pmid38400486, year = {2024}, author = {Dhiman, P and Saini, N and Gulzar, Y and Turaev, S and Kaur, A and Nisa, KU and Hamid, Y}, title = {A Review and Comparative Analysis of Relevant Approaches of Zero Trust Network Model.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400486}, issn = {1424-8220}, support = {This research was funded by the United Arab Emirates UAEU-ZU Joint Research Grant G00003819 (Fund No.: 12R138) Emirates Center for Mobility Research.//United Arab Emirates University/ ; }, abstract = {The Zero Trust safety architecture emerged as an intriguing approach for overcoming the shortcomings of standard network security solutions. This extensive survey study provides a meticulous explanation of the underlying principles of Zero Trust, as well as an assessment of the many strategies and possibilities for effective implementation. The survey begins by examining the role of authentication and access control within Zero Trust Architectures, and subsequently investigates innovative authentication, as well as access control solutions across different scenarios. It more deeply explores traditional techniques for encryption, micro-segmentation, and security automation, emphasizing their importance in achieving a secure Zero Trust environment. Zero Trust Architecture is explained in brief, along with the Taxonomy of Zero Trust Network Features. This review article provides useful insights into the Zero Trust paradigm, its approaches, problems, and future research objectives for scholars, practitioners, and policymakers. This survey contributes to the growth and implementation of secure network architectures in critical infrastructures by developing a deeper knowledge of Zero Trust.}, } @article {pmid38400360, year = {2024}, author = {Li, W and Zhou, H and Lu, Z and Kamarthi, S}, title = {Navigating the Evolution of Digital Twins Research through Keyword Co-Occurence Network Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400360}, issn = {1424-8220}, abstract = {Digital twin technology has become increasingly popular and has revolutionized data integration and system modeling across various industries, such as manufacturing, energy, and healthcare. This study aims to explore the evolving research landscape of digital twins using Keyword Co-occurrence Network (KCN) analysis. We analyze metadata from 9639 peer-reviewed articles published between 2000 and 2023. The results unfold in two parts. The first part examines trends and keyword interconnection over time, and the second part maps sensing technology keywords to six application areas. This study reveals that research on digital twins is rapidly diversifying, with focused themes such as predictive and decision-making functions. Additionally, there is an emphasis on real-time data and point cloud technologies. The advent of federated learning and edge computing also highlights a shift toward distributed computation, prioritizing data privacy. This study confirms that digital twins have evolved into complex systems that can conduct predictive operations through advanced sensing technologies. The discussion also identifies challenges in sensor selection and empirical knowledge integration.}, } @article {pmid38400338, year = {2024}, author = {Wiryasaputra, R and Huang, CY and Lin, YJ and Yang, CT}, title = {An IoT Real-Time Potable Water Quality Monitoring and Prediction Model Based on Cloud Computing Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400338}, issn = {1424-8220}, support = {112-2622-E-029-003,112-2621-M-029-004, and 110-2221-E-029-020-MY3//the National Science and Technology Council (NSTC), Taiwan R.O.C./ ; }, mesh = {Humans ; Artificial Intelligence ; Cloud Computing ; *Drinking Water ; *Internet of Things ; Data Accuracy ; }, abstract = {In order to achieve the Sustainable Development Goals (SDG), it is imperative to ensure the safety of drinking water. The characteristics of each drinkable water, encompassing taste, aroma, and appearance, are unique. Inadequate water infrastructure and treatment can affect these features and may also threaten public health. This study utilizes the Internet of Things (IoT) in developing a monitoring system, particularly for water quality, to reduce the risk of contracting diseases. Water quality components data, such as water temperature, alkalinity or acidity, and contaminants, were obtained through a series of linked sensors. An Arduino microcontroller board acquired all the data and the Narrow Band-IoT (NB-IoT) transmitted them to the web server. Due to limited human resources to observe the water quality physically, the monitoring was complemented by real-time notifications alerts via a telephone text messaging application. The water quality data were monitored using Grafana in web mode, and the binary classifiers of machine learning techniques were applied to predict whether the water was drinkable or not based on the data collected, which were stored in a database. The non-decision tree, as well as the decision tree, were evaluated based on the improvements of the artificial intelligence framework. With a ratio of 60% for data training: at 20% for data validation, and 10% for data testing, the performance of the decision tree (DT) model was more prominent in comparison with the Gradient Boosting (GB), Random Forest (RF), Neural Network (NN), and Support Vector Machine (SVM) modeling approaches. Through the monitoring and prediction of results, the authorities can sample the water sources every two weeks.}, } @article {pmid38400323, year = {2024}, author = {Pan, S and Huang, C and Fan, J and Shi, Z and Tong, J and Wang, H}, title = {Optimizing Internet of Things Fog Computing: Through Lyapunov-Based Long Short-Term Memory Particle Swarm Optimization Algorithm for Energy Consumption Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400323}, issn = {1424-8220}, abstract = {In the era of continuous development in Internet of Things (IoT) technology, smart services are penetrating various facets of societal life, leading to a growing demand for interconnected devices. Many contemporary devices are no longer mere data producers but also consumers of data. As a result, massive amounts of data are transmitted to the cloud, but the latency generated in edge-to-cloud communication is unacceptable for many tasks. In response to this, this paper introduces a novel contribution-a layered computing network built on the principles of fog computing, accompanied by a newly devised algorithm designed to optimize user tasks and allocate computing resources within rechargeable networks. The proposed algorithm, a synergy of Lyapunov-based, dynamic Long Short-Term Memory (LSTM) networks, and Particle Swarm Optimization (PSO), allows for predictive task allocation. The fog servers dynamically train LSTM networks to effectively forecast the data features of user tasks, facilitating proper unload decisions based on task priorities. In response to the challenge of slower hardware upgrades in edge devices compared to user demands, the algorithm optimizes the utilization of low-power devices and addresses performance limitations. Additionally, this paper considers the unique characteristics of rechargeable networks, where computing nodes acquire energy through charging. Utilizing Lyapunov functions for dynamic resource control enables nodes with abundant resources to maximize their potential, significantly reducing energy consumption and enhancing overall performance. The simulation results demonstrate that our algorithm surpasses traditional methods in terms of energy efficiency and resource allocation optimization. Despite the limitations of prediction accuracy in Fog Servers (FS), the proposed results significantly promote overall performance. The proposed approach improves the efficiency and the user experience of Internet of Things systems in terms of latency and energy consumption.}, } @article {pmid38400319, year = {2024}, author = {Brata, KC and Funabiki, N and Panduman, YYF and Fajrianti, ED}, title = {An Enhancement of Outdoor Location-Based Augmented Reality Anchor Precision through VSLAM and Google Street View.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {4}, pages = {}, pmid = {38400319}, issn = {1424-8220}, abstract = {Outdoor Location-Based Augmented Reality (LAR) applications require precise positioning for seamless integrations of virtual content into immersive experiences. However, common solutions in outdoor LAR applications rely on traditional smartphone sensor fusion methods, such as the Global Positioning System (GPS) and compasses, which often lack the accuracy needed for precise AR content alignments. In this paper, we introduce an innovative approach to enhance LAR anchor precision in outdoor environments. We leveraged Visual Simultaneous Localization and Mapping (VSLAM) technology, in combination with innovative cloud-based methodologies, and harnessed the extensive visual reference database of Google Street View (GSV), to address the accuracy limitation problems. For the evaluation, 10 Point of Interest (POI) locations were used as anchor point coordinates in the experiments. We compared the accuracies between our approach and the common sensor fusion LAR solution comprehensively involving accuracy benchmarking and running load performance testing. The results demonstrate substantial enhancements in overall positioning accuracies compared to conventional GPS-based approaches for aligning AR anchor content in the real world.}, } @article {pmid38376453, year = {2024}, author = {Horstmann, A and Riggs, S and Chaban, Y and Clare, DK and de Freitas, G and Farmer, D and Howe, A and Morris, KL and Hatton, D}, title = {A service-based approach to cryoEM facility processing pipelines at eBIC.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {80}, number = {Pt 3}, pages = {174-180}, pmid = {38376453}, issn = {2059-7983}, mesh = {*Software ; *Image Processing, Computer-Assisted/methods ; Cryoelectron Microscopy/methods ; Workflow ; Cloud Computing ; }, abstract = {Electron cryo-microscopy image-processing workflows are typically composed of elements that may, broadly speaking, be categorized as high-throughput workloads which transition to high-performance workloads as preprocessed data are aggregated. The high-throughput elements are of particular importance in the context of live processing, where an optimal response is highly coupled to the temporal profile of the data collection. In other words, each movie should be processed as quickly as possible at the earliest opportunity. The high level of disconnected parallelization in the high-throughput problem directly allows a completely scalable solution across a distributed computer system, with the only technical obstacle being an efficient and reliable implementation. The cloud computing frameworks primarily developed for the deployment of high-availability web applications provide an environment with a number of appealing features for such high-throughput processing tasks. Here, an implementation of an early-stage processing pipeline for electron cryotomography experiments using a service-based architecture deployed on a Kubernetes cluster is discussed in order to demonstrate the benefits of this approach and how it may be extended to scenarios of considerably increased complexity.}, } @article {pmid38370642, year = {2024}, author = {McMurry, AJ and Gottlieb, DI and Miller, TA and Jones, JR and Atreja, A and Crago, J and Desai, PM and Dixon, BE and Garber, M and Ignatov, V and Kirchner, LA and Payne, PRO and Saldanha, AJ and Shankar, PRV and Solad, YV and Sprouse, EA and Terry, M and Wilcox, AB and Mandl, KD}, title = {Cumulus: A federated EHR-based learning system powered by FHIR and AI.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, pmid = {38370642}, support = {NU38OT000286/OT/OSTLTS CDC HHS/United States ; U01 TR002623/TR/NCATS NIH HHS/United States ; U01 TR002997/TR/NCATS NIH HHS/United States ; U18DP006500/ACL/ACL HHS/United States ; }, abstract = {OBJECTIVE: To address challenges in large-scale electronic health record (EHR) data exchange, we sought to develop, deploy, and test an open source, cloud-hosted app 'listener' that accesses standardized data across the SMART/HL7 Bulk FHIR Access application programming interface (API).

METHODS: We advance a model for scalable, federated, data sharing and learning. Cumulus software is designed to address key technology and policy desiderata including local utility, control, and administrative simplicity as well as privacy preservation during robust data sharing, and AI for processing unstructured text.

RESULTS: Cumulus relies on containerized, cloud-hosted software, installed within a healthcare organization's security envelope. Cumulus accesses EHR data via the Bulk FHIR interface and streamlines automated processing and sharing. The modular design enables use of the latest AI and natural language processing tools and supports provider autonomy and administrative simplicity. In an initial test, Cumulus was deployed across five healthcare systems each partnered with public health. Cumulus output is patient counts which were aggregated into a table stratifying variables of interest to enable population health studies. All code is available open source. A policy stipulating that only aggregate data leave the institution greatly facilitated data sharing agreements.

DISCUSSION AND CONCLUSION: Cumulus addresses barriers to data sharing based on (1) federally required support for standard APIs (2), increasing use of cloud computing, and (3) advances in AI. There is potential for scalability to support learning across myriad network configurations and use cases.}, } @article {pmid38370229, year = {2024}, author = {Yadav, N and Pattabiraman, B and Tummuru, NR and Soundharajan, BS and Kasiviswanathan, KS and Adeloye, AJ and Sen, S and Maurya, M and Vijayalakshmanan, S}, title = {Toward improving water-energy-food nexus through dynamic energy management of solar powered automated irrigation system.}, journal = {Heliyon}, volume = {10}, number = {4}, pages = {e25359}, pmid = {38370229}, issn = {2405-8440}, abstract = {This paper focuses on developing a water and energy-saving reliable irrigation system using state-of-the-art computing, communication, and optimal energy management framework. The framework integrates real-time soil moisture and weather forecasting information to decide the time of irrigation and quantity of water required for potato crops, which is made available to the users across a region through the cloud-based irrigation decision support system. This is accomplished through various modules such as data acquisition, soil moisture forecasting, smart irrigation scheduling, and energy management scheme. The main emphasizes is on the electrical segment which demonstrates an energy management scheme for PV-battery based grid-connected system to operate the irrigation system valves and water pump. The proposed scheme is verified through simulation and dSpace-based real-time experiment studies. Overall, the proposed energy management system demonstrates an improvement in the optimal onsite solar power generation and storage capacity to power the solar pump which save the electrical energy as well as the water in order to establish an improved solar-irrigation system. Finally, the proposed system achieved water and energy savings of around 9.24 % for potato crop with full irrigation enhancing the Water-Energy-Food Nexus at field scale.}, } @article {pmid38365804, year = {2024}, author = {Beteri, J and Lyimo, JG and Msinde, JV}, title = {The influence of climatic and environmental variables on sunflower planting season suitability in Tanzania.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3906}, pmid = {38365804}, issn = {2045-2322}, mesh = {Seasons ; *Helianthus ; Tanzania ; Temperature ; Plants ; *Asteraceae ; }, abstract = {Crop survival and growth requires identification of correlations between appropriate suitable planting season and relevant climatic and environmental characteristics. Climatic and environmental conditions may cause water and heat stress at critical stages of crop development and thus affecting planting suitability. Consequently, this may affect crop yield and productivity. This study assesses the influence of climate and environmental variables on rain-fed sunflower planting season suitability in Tanzania. Data on rainfall, temperature, slope, elevation, soil and land use/or cover were accessed from publicly available sources using Google Earth Engine. This is a cloud-based geospatial computing platform for remote sensed datasets. Tanzania sunflower production calendar of 2022 was adopted to mark the start and end limits of planting across the country. The default climate and environmental parameters from FAO database were used. In addition, Pearson correlation was used to evaluate the relationship between rainfall, temperature over Normalized Difference Vegetation Index (NDVI) from 2000 to 2020 at five-year interval for January-April and June-September, for high and poor suitability season. The results showed that planting suitability of sunflower in Tanzania is driven more by rainfall than temperature. It was revealed that intra-annual planting suitability increases gradually from short to long- rain season and diminishes towards dry season of the year. January-April planting season window showing highest suitability (41.65%), whereas June-September indicating lowest suitability (0.05%). Though, not statistically significant, rainfall and NDVI were positively correlated with r = 0.65 and 0.75 whereas negative correlation existed between temperature and NDVI with r = -- 0.6 and - 0.77. We recommend sunflower subsector interventions that consider appropriate intra-regional and seasonal diversity as an important adaptive mechanism to ensure high sunflower yields.}, } @article {pmid38360949, year = {2024}, author = {Periola, AA and Alonge, AA and Ogudo, KA}, title = {Ocean warming events resilience capability in underwater computing platforms.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3781}, pmid = {38360949}, issn = {2045-2322}, abstract = {Underwater data centers (UDCs) use the ocean's cold-water resources for free cooling and have low cooling costs. However, UDC cooling is affected by marine heat waves, and underwater seismic events thereby affecting UDC functioning continuity. Though feasible, the use of reservoirs for UDC cooling is non-scalable due to the high computing overhead, and inability to support continuity for long duration marine heat waves. The presented research proposes a mobile UDC (capable of migration) to address this challenge. The proposed UDC migrates from high underwater ground displacement ocean regions to regions having no or small underwater ground displacement. It supports multiple client underwater applications without requiring clients to develop, deploy, and launch own UDCs. The manner of resource utilization is influenced by the client's service level agreement. Hence, the proposed UDC provides resilient services to the clients and the requiring applications. Analysis shows that using the mobile UDC instead of the existing reservoir UDC approach enhances the operational duration and power usage effectiveness by 8.9-48.5% and 55.6-70.7% on average, respectively. In addition, the overhead is reduced by an average of 95.8-99.4%.}, } @article {pmid38355983, year = {2024}, author = {Kashyap, P and Shivgan, K and Patil, S and Raja, BR and Mahajan, S and Banerjee, S and Tallur, S}, title = {Unsupervised deep learning framework for temperature-compensated damage assessment using ultrasonic guided waves on edge device.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3751}, pmid = {38355983}, issn = {2045-2322}, support = {RD/0118-ISROC00-006//Indian Space Research Organisation/ ; CRG/2021/001959//Science and Engineering Research Board/ ; }, abstract = {Fueled by the rapid development of machine learning (ML) and greater access to cloud computing and graphics processing units, various deep learning based models have been proposed for improving performance of ultrasonic guided wave structural health monitoring (GW-SHM) systems, especially to counter complexity and heterogeneity in data due to varying environmental factors (e.g., temperature) and types of damages. Such models typically comprise of millions of trainable parameters, and therefore add to cost of deployment due to requirements of cloud connectivity and processing, thus limiting the scale of deployment of GW-SHM. In this work, we propose an alternative solution that leverages TinyML framework for development of light-weight ML models that could be directly deployed on embedded edge devices. The utility of our solution is illustrated by presenting an unsupervised learning framework for damage detection in honeycomb composite sandwich structure with disbond and delamination type of damages, validated using data generated by finite element simulations and experiments performed at various temperatures in the range 0-90 °C. We demonstrate a fully-integrated solution using a Xilinx Artix-7 FPGA for data acquisition and control, and edge-inference of damage. Despite the limited number of features, the lightweight model shows reasonably high accuracy, thereby enabling detection of small size defects with improved sensitivity on an edge device for online GW-SHM.}, } @article {pmid38351164, year = {2024}, author = {Feng, Q and Niu, B and Ren, Y and Su, S and Wang, J and Shi, H and Yang, J and Han, M}, title = {A 10-m national-scale map of ground-mounted photovoltaic power stations in China of 2020.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {198}, pmid = {38351164}, issn = {2052-4463}, support = {42001367//National Natural Science Foundation of China (National Science Foundation of China)/ ; }, abstract = {We provide a remote sensing derived dataset for large-scale ground-mounted photovoltaic (PV) power stations in China of 2020, which has high spatial resolution of 10 meters. The dataset is based on the Google Earth Engine (GEE) cloud computing platform via random forest classifier and active learning strategy. Specifically, ground samples are carefully collected across China via both field survey and visual interpretation. Afterwards, spectral and texture features are calculated from publicly available Sentinel-2 imagery. Meanwhile, topographic features consisting of slope and aspect that are sensitive to PV locations are also included, aiming to construct a multi-dimensional and discriminative feature space. Finally, the trained random forest model is adopted to predict PV power stations of China parallelly on GEE. Technical validation has been carefully performed across China which achieved a satisfactory accuracy over 89%. Above all, as the first publicly released 10-m national-scale distribution dataset of China's ground-mounted PV power stations, it can provide data references for relevant researchers in fields such as energy, land, remote sensing and environmental sciences.}, } @article {pmid38351065, year = {2024}, author = {Chuntakaruk, H and Hengphasatporn, K and Shigeta, Y and Aonbangkhen, C and Lee, VS and Khotavivattana, T and Rungrotmongkol, T and Hannongbua, S}, title = {FMO-guided design of darunavir analogs as HIV-1 protease inhibitors.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3639}, pmid = {38351065}, issn = {2045-2322}, mesh = {Humans ; Darunavir/pharmacology ; *HIV Protease Inhibitors/pharmacology/chemistry ; *HIV-1/genetics ; Molecular Docking Simulation ; Sulfonamides/pharmacology ; *HIV Infections ; Viral Proteins/genetics ; HIV Protease/metabolism ; Mutation ; Drug Resistance, Viral/genetics ; }, abstract = {The prevalence of HIV-1 infection continues to pose a significant global public health issue, highlighting the need for antiretroviral drugs that target viral proteins to reduce viral replication. One such target is HIV-1 protease (PR), responsible for cleaving viral polyproteins, leading to the maturation of viral proteins. While darunavir (DRV) is a potent HIV-1 PR inhibitor, drug resistance can arise due to mutations in HIV-1 PR. To address this issue, we developed a novel approach using the fragment molecular orbital (FMO) method and structure-based drug design to create DRV analogs. Using combinatorial programming, we generated novel analogs freely accessible via an on-the-cloud mode implemented in Google Colab, Combined Analog generator Tool (CAT). The designed analogs underwent cascade screening through molecular docking with HIV-1 PR wild-type and major mutations at the active site. Molecular dynamics (MD) simulations confirmed the assess ligand binding and susceptibility of screened designed analogs. Our findings indicate that the three designed analogs guided by FMO, 19-0-14-3, 19-8-10-0, and 19-8-14-3, are superior to DRV and have the potential to serve as efficient PR inhibitors. These findings demonstrate the effectiveness of our approach and its potential to be used in further studies for developing new antiretroviral drugs.}, } @article {pmid38350039, year = {2024}, author = {Bell, J and Decker, B and Eichmann, A and Palkovich, C and Reji, C}, title = {Effectiveness of Virtual Reality for Upper Extremity Function and Motor Performance of Children With Cerebral Palsy: A Systematic Review.}, journal = {The American journal of occupational therapy : official publication of the American Occupational Therapy Association}, volume = {78}, number = {2}, pages = {}, doi = {10.5014/ajot.2024.050374}, pmid = {38350039}, issn = {0272-9490}, mesh = {Child ; Humans ; Young Adult ; Adult ; *Cerebral Palsy ; Upper Extremity ; *Virtual Reality ; Language ; }, abstract = {IMPORTANCE: Research on the functional and motor performance impact of virtual reality (VR) as an intervention tool for children with cerebral palsy (CP) is limited.

OBJECTIVE: To understand whether VR is an effective intervention to improve upper extremity (UE) function and motor performance of children diagnosed with CP.

DATA SOURCES: Databases used in the search were EBSCOhost, One Search, PubMed, Cloud Source, CINAHL, SPORTDiscus, and Google Scholar.

Studies published from 2006 to 2021 were included if children had a diagnosis of CP and were age 21 yr or younger, VR was used as an intervention, and measures of UE function and motor performance were used.

FINDINGS: Twenty-one studies were included, and the results provided promising evidence for improvements in areas of UE function, motor performance, and fine motor skills when VR is used as an intervention. To yield noticeable UE improvements in children with CP, VR should be implemented for 30 to 60 min/session and for at least 360 min over more than 3 wk. Additional areas of improvement include gross motor skills, functional mobility, occupational performance, and intrinsic factors.

CONCLUSIONS AND RELEVANCE: The use of VR as an intervention for children with CP to improve UE function and motor performance is supported. More randomized controlled trials with larger sample sizes focusing on similar outcomes and intervention frequencies are needed to determine the most effective type of VR for use in clinical occupational therapy. Plain-Language Summary: This systematic review explains how virtual reality (VR) has been used as an intervention with children with cerebral palsy (CP). The review synthesizes the results of 21 research studies of children who had a diagnosis of CP and who were 21 years old or younger. The findings support using VR to improve upper extremity performance, motor performance, and fine motor skills. The findings also show that occupational therapy practitioners should use a VR intervention at a minimum frequency of 30 to 60 minutes per session and for at least 360 minutes over more than 3 weeks to yield noticeable improvements in upper extremity, motor performance, and fine motor skills for children with CP.}, } @article {pmid38347885, year = {2024}, author = {Bhattacharjee, T and Kiwuwa-Muyingo, S and Kanjala, C and Maoyi, ML and Amadi, D and Ochola, M and Kadengye, D and Gregory, A and Kiragga, A and Taylor, A and Greenfield, J and Slaymaker, E and Todd, J and , }, title = {INSPIRE datahub: a pan-African integrated suite of services for harmonising longitudinal population health data using OHDSI tools.}, journal = {Frontiers in digital health}, volume = {6}, number = {}, pages = {1329630}, pmid = {38347885}, issn = {2673-253X}, abstract = {INTRODUCTION: Population health data integration remains a critical challenge in low- and middle-income countries (LMIC), hindering the generation of actionable insights to inform policy and decision-making. This paper proposes a pan-African, Findable, Accessible, Interoperable, and Reusable (FAIR) research architecture and infrastructure named the INSPIRE datahub. This cloud-based Platform-as-a-Service (PaaS) and on-premises setup aims to enhance the discovery, integration, and analysis of clinical, population-based surveys, and other health data sources.

METHODS: The INSPIRE datahub, part of the Implementation Network for Sharing Population Information from Research Entities (INSPIRE), employs the Observational Health Data Sciences and Informatics (OHDSI) open-source stack of tools and the Observational Medical Outcomes Partnership (OMOP) Common Data Model (CDM) to harmonise data from African longitudinal population studies. Operating on Microsoft Azure and Amazon Web Services cloud platforms, and on on-premises servers, the architecture offers adaptability and scalability for other cloud providers and technology infrastructure. The OHDSI-based tools enable a comprehensive suite of services for data pipeline development, profiling, mapping, extraction, transformation, loading, documentation, anonymization, and analysis.

RESULTS: The INSPIRE datahub's "On-ramp" services facilitate the integration of data and metadata from diverse sources into the OMOP CDM. The datahub supports the implementation of OMOP CDM across data producers, harmonizing source data semantically with standard vocabularies and structurally conforming to OMOP table structures. Leveraging OHDSI tools, the datahub performs quality assessment and analysis of the transformed data. It ensures FAIR data by establishing metadata flows, capturing provenance throughout the ETL processes, and providing accessible metadata for potential users. The ETL provenance is documented in a machine- and human-readable Implementation Guide (IG), enhancing transparency and usability.

CONCLUSION: The pan-African INSPIRE datahub presents a scalable and systematic solution for integrating health data in LMICs. By adhering to FAIR principles and leveraging established standards like OMOP CDM, this architecture addresses the current gap in generating evidence to support policy and decision-making for improving the well-being of LMIC populations. The federated research network provisions allow data producers to maintain control over their data, fostering collaboration while respecting data privacy and security concerns. A use-case demonstrated the pipeline using OHDSI and other open-source tools.}, } @article {pmid38345858, year = {2024}, author = {Zandesh, Z}, title = {Privacy, Security, and Legal Issues in the Health Cloud: Structured Review for Taxonomy Development.}, journal = {JMIR formative research}, volume = {8}, number = {}, pages = {e38372}, pmid = {38345858}, issn = {2561-326X}, abstract = {BACKGROUND: Privacy in our digital world is a very complicated topic, especially when meeting cloud computing technological achievements with its multidimensional context. Here, privacy is an extended concept that is sometimes referred to as legal, philosophical, or even technical. Consequently, there is a need to harmonize it with other aspects in health care in order to provide a new ecosystem. This new ecosystem can lead to a paradigm shift involving the reconstruction and redesign of some of the most important and essential requirements like privacy concepts, legal issues, and security services. Cloud computing in the health domain has markedly contributed to other technologies, such as mobile health, health Internet of Things, and wireless body area networks, with their increasing numbers of embedded applications. Other dependent applications, which are usually used in health businesses like social networks, or some newly introduced applications have issues regarding privacy transparency boundaries and privacy-preserving principles, which have made policy making difficult in the field.

OBJECTIVE: One way to overcome this challenge is to develop a taxonomy to identify all relevant factors. A taxonomy serves to bring conceptual clarity to the set of alternatives in in-person health care delivery. This study aimed to construct a comprehensive taxonomy for privacy in the health cloud, which also provides a prospective landscape for privacy in related technologies.

METHODS: A search was performed for relevant published English papers in databases, including Web of Science, IEEE Digital Library, Google Scholar, Scopus, and PubMed. A total of 2042 papers were related to the health cloud privacy concept according to predefined keywords and search strings. Taxonomy designing was performed using the deductive methodology.

RESULTS: This taxonomy has 3 layers. The first layer has 4 main dimensions, including cloud, data, device, and legal. The second layer has 15 components, and the final layer has related subcategories (n=57). This taxonomy covers some related concepts, such as privacy, security, confidentiality, and legal issues, which are categorized here and defined by their expansion and distinctive boundaries. The main merits of this taxonomy are its ability to clarify privacy terms for different scenarios and signalize the privacy multidisciplinary objectification in eHealth.

CONCLUSIONS: This taxonomy can cover health industry requirements with its specifications like health data and scenarios, which are considered as the most complicated among businesses and industries. Therefore, the use of this taxonomy could be generalized and customized to other domains and businesses that have less complications. Moreover, this taxonomy has different stockholders, including people, organizations, and systems. If the antecedent effort in the taxonomy is proven, subject matter experts could enhance the extent of privacy in the health cloud by verifying, evaluating, and revising this taxonomy.}, } @article {pmid38345524, year = {2024}, author = {McCoy, ES and Park, SK and Patel, RP and Ryan, DF and Mullen, ZJ and Nesbitt, JJ and Lopez, JE and Taylor-Blake, B and Vanden, KA and Krantz, JL and Hu, W and Garris, RL and Snyder, MG and Lima, LV and Sotocinal, SG and Austin, JS and Kashlan, AD and Shah, S and Trocinski, AK and Pudipeddi, SS and Major, RM and Bazick, HO and Klein, MR and Mogil, JS and Wu, G and Zylka, MJ}, title = {Development of PainFace software to simplify, standardize, and scale up mouse grimace analyses.}, journal = {Pain}, volume = {}, number = {}, pages = {}, doi = {10.1097/j.pain.0000000000003187}, pmid = {38345524}, issn = {1872-6623}, support = {R01NS114259/NS/NINDS NIH HHS/United States ; }, abstract = {Facial grimacing is used to quantify spontaneous pain in mice and other mammals, but scoring relies on humans with different levels of proficiency. Here, we developed a cloud-based software platform called PainFace (http://painface.net) that uses machine learning to detect 4 facial action units of the mouse grimace scale (orbitals, nose, ears, whiskers) and score facial grimaces of black-coated C57BL/6 male and female mice on a 0 to 8 scale. Platform accuracy was validated in 2 different laboratories, with 3 conditions that evoke grimacing-laparotomy surgery, bilateral hindpaw injection of carrageenan, and intraplantar injection of formalin. PainFace can generate up to 1 grimace score per second from a standard 30 frames/s video, making it possible to quantify facial grimacing over time, and operates at a speed that scales with computing power. By analyzing the frequency distribution of grimace scores, we found that mice spent 7x more time in a "high grimace" state following laparotomy surgery relative to sham surgery controls. Our study shows that PainFace reproducibly quantifies facial grimaces indicative of nonevoked spontaneous pain and enables laboratories to standardize and scale-up facial grimace analyses.}, } @article {pmid38344670, year = {2024}, author = {Simpson, RL and Lee, JA and Li, Y and Kang, YJ and Tsui, C and Cimiotti, JP}, title = {Medicare meets the cloud: the development of a secure platform for the storage and analysis of claims data.}, journal = {JAMIA open}, volume = {7}, number = {1}, pages = {ooae007}, pmid = {38344670}, issn = {2574-2531}, support = {R01 HS026232/HS/AHRQ HHS/United States ; }, abstract = {INTRODUCTION: Cloud-based solutions are a modern-day necessity for data intense computing. This case report describes in detail the development and implementation of Amazon Web Services (AWS) at Emory-a secure, reliable, and scalable platform to store and analyze identifiable research data from the Centers for Medicare and Medicaid Services (CMS).

MATERIALS AND METHODS: Interdisciplinary teams from CMS, MBL Technologies, and Emory University collaborated to ensure compliance with CMS policy that consolidates laws, regulations, and other drivers of information security and privacy.

RESULTS: A dedicated team of individuals ensured successful transition from a physical storage server to a cloud-based environment. This included implementing access controls, vulnerability scanning, and audit logs that are reviewed regularly with a remediation plan. User adaptation required specific training to overcome the challenges of cloud computing.

CONCLUSION: Challenges created opportunities for lessons learned through the creation of an end-product accepted by CMS and shared across disciplines university-wide.}, } @article {pmid38339714, year = {2024}, author = {González-Herbón, R and González-Mateos, G and Rodríguez-Ossorio, JR and Domínguez, M and Alonso, S and Fuertes, JJ}, title = {An Approach to Develop Digital Twins in Industry.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339714}, issn = {1424-8220}, support = {Grant PID2020-117890RB-I00//Ministerio de Ciencia e Innovación/ ; }, abstract = {The industry is currently undergoing a digital revolution driven by the integration of several enabling technologies. These include automation, robotics, cloud computing, industrial cybersecurity, systems integration, digital twins, etc. Of particular note is the increasing use of digital twins, which offer significant added value by providing realistic and fully functional process simulations. This paper proposes an approach for developing digital twins in industrial environments. The novelty lies in not only focusing on obtaining the model of the industrial system and integrating virtual reality and/or augmented reality but also in emphasizing the importance of incorporating other enabled technologies of Industry 4.0, such as system integration, connectivity with standard and specific industrial protocols, cloud services, or new industrial automation systems, to enhance the capabilities of the digital twin. Furthermore, a proposal of the software tools that can be used to achieve this incorporation is made. Unity is chosen as the real-time 3D development tool for its cross-platform capability and streamlined industrial system modeling. The integration of augmented reality is facilitated by the Vuforia SDK. Node-RED is selected as the system integration option, and communications are carried out with MQTT protocol. Finally, cloud-based services are recommended for effective data storage and processing. Furthermore, this approach has been used to develop a digital twin of a robotic electro-pneumatic cell.}, } @article {pmid38339672, year = {2024}, author = {Lu, Y and Zhou, L and Zhang, A and Zha, S and Zhuo, X and Ge, S}, title = {Application of Deep Learning and Intelligent Sensing Analysis in Smart Home.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339672}, issn = {1424-8220}, abstract = {Deep learning technology can improve sensing efficiency and has the ability to discover potential patterns in data; the efficiency of user behavior recognition in the field of smart homes has been further improved, making the recognition process more intelligent and humanized. This paper analyzes the optical sensors commonly used in smart homes and their working principles through case studies and explores the technical framework of user behavior recognition based on optical sensors. At the same time, CiteSpace (Basic version 6.2.R6) software is used to visualize and analyze the related literature, elaborate the main research hotspots and evolutionary changes of optical sensor-based smart home user behavior recognition, and summarize the future research trends. Finally, fully utilizing the advantages of cloud computing technology, such as scalability and on-demand services, combining typical life situations and the requirements of smart home users, a smart home data collection and processing technology framework based on elderly fall monitoring scenarios is designed. Based on the comprehensive research results, the application and positive impact of optical sensors in smart home user behavior recognition were analyzed, and inspiration was provided for future smart home user experience research.}, } @article {pmid38339591, year = {2024}, author = {Ehtisham, M and Hassan, MU and Al-Awady, AA and Ali, A and Junaid, M and Khan, J and Abdelrahman Ali, YA and Akram, M}, title = {Internet of Vehicles (IoV)-Based Task Scheduling Approach Using Fuzzy Logic Technique in Fog Computing Enables Vehicular Ad Hoc Network (VANET).}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339591}, issn = {1424-8220}, support = {NU/IFC/2/SERC/-/47//Najran University/ ; Authors would like to acknowledge the support of the Deputy for Research and Innovation Ministry of Education, Kingdom of Saudi Arabia, for this research through a grant (NU/IFC/2/SERC/-/47) under the Institutional Funding Committee at Najran University,//Najran University/ ; }, abstract = {The intelligent transportation system (ITS) relies heavily on the vehicular ad hoc network (VANET) and the internet of vehicles (IoVs), which combine cloud and fog to improve task processing capabilities. As a cloud extension, the fog processes' infrastructure is close to VANET, fostering an environment favorable to smart cars with IT equipment and effective task management oversight. Vehicle processing power, bandwidth, time, and high-speed mobility are all limited in VANET. It is critical to satisfy the vehicles' requirements for minimal latency and fast reaction times while offloading duties to the fog layer. We proposed a fuzzy logic-based task scheduling system in VANET to minimize latency and improve the enhanced response time when offloading tasks in the IoV. The proposed method effectively transfers workloads to the fog computing layer while considering the constrained resources of car nodes. After choosing a suitable processing unit, the algorithm sends the job and its associated resources to the fog layer. The dataset is related to crisp values for fog computing for system utilization, latency, and task deadline time for over 5000 values. The task execution, latency, deadline of task, storage, CPU, and bandwidth utilizations are used for fuzzy set values. We proved the effectiveness of our proposed task scheduling framework via simulation tests, outperforming current algorithms in terms of task ratio by 13%, decreasing average turnaround time by 9%, minimizing makespan time by 15%, and effectively overcoming average latency time within the network parameters. The proposed technique shows better results and responses than previous techniques by scheduling the tasks toward fog layers with less response time and minimizing the overall time from task submission to completion.}, } @article {pmid38339582, year = {2024}, author = {Hassan, MU and Al-Awady, AA and Ali, A and Iqbal, MM and Akram, M and Jamil, H}, title = {Smart Resource Allocation in Mobile Cloud Next-Generation Network (NGN) Orchestration with Context-Aware Data and Machine Learning for the Cost Optimization of Microservice Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339582}, issn = {1424-8220}, support = {NU/IFC/2/SERC/-/47//Deputy for Research and Innovation Ministry of Education, Kingdom of Saudi Arabia/ ; }, abstract = {Mobile cloud computing (MCC) provides resources to users to handle smart mobile applications. In MCC, task scheduling is the solution for mobile users' context-aware computation resource-rich applications. Most existing approaches have achieved a moderate service reliability rate due to a lack of instance-centric resource estimations and task offloading, a statistical NP-hard problem. The current intelligent scheduling process cannot address NP-hard problems due to traditional task offloading approaches. To address this problem, the authors design an efficient context-aware service offloading approach based on instance-centric measurements. The revised machine learning model/algorithm employs task adaptation to make decisions regarding task offloading. The proposed MCVS scheduling algorithm predicts the usage rates of individual microservices for a practical task scheduling scheme, considering mobile device time, cost, network, location, and central processing unit (CPU) power to train data. One notable feature of the microservice software architecture is its capacity to facilitate the scalability, flexibility, and independent deployment of individual components. A series of simulation results show the efficiency of the proposed technique based on offloading, CPU usage, and execution time metrics. The experimental results efficiently show the learning rate in training and testing in comparison with existing approaches, showing efficient training and task offloading phases. The proposed system has lower costs and uses less energy to offload microservices in MCC. Graphical results are presented to define the effectiveness of the proposed model. For a service arrival rate of 80%, the proposed model achieves an average 4.5% service offloading rate and 0.18% CPU usage rate compared with state-of-the-art approaches. The proposed method demonstrates efficiency in terms of cost and energy savings for microservice offloading in mobile cloud computing (MCC).}, } @article {pmid38339552, year = {2024}, author = {Parracciani, C and Gigante, D and Bonini, F and Grassi, A and Morbidini, L and Pauselli, M and Valenti, B and Lilli, E and Antonielli, F and Vizzari, M}, title = {Leveraging Google Earth Engine for a More Effective Grassland Management: A Decision Support Application Perspective.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339552}, issn = {1424-8220}, mesh = {Animals ; Humans ; *Ecosystem ; *Grassland ; Search Engine ; Biodiversity ; Agriculture ; Livestock ; }, abstract = {Grasslands cover a substantial portion of the earth's surface and agricultural land and is crucial for human well-being and livestock farming. Ranchers and grassland management authorities face challenges in effectively controlling herders' grazing behavior and grassland utilization due to underdeveloped infrastructure and poor communication in pastoral areas. Cloud-based grazing management and decision support systems (DSS) are needed to address this issue, promote sustainable grassland use, and preserve their ecosystem services. These systems should enable rapid and large-scale grassland growth and utilization monitoring, providing a basis for decision-making in managing grazing and grassland areas. In this context, this study contributes to the objectives of the EU LIFE IMAGINE project, aiming to develop a Web-GIS app for conserving and monitoring Umbria's grasslands and promoting more informed decisions for more sustainable livestock management. The app, called "Praterie" and developed in Google Earth Engine, utilizes historical Sentinel-2 satellite data and harmonic modeling of the EVI (Enhanced Vegetation Index) to estimate vegetation growth curves and maturity periods for the forthcoming vegetation cycle. The app is updated in quasi-real time and enables users to visualize estimates for the upcoming vegetation cycle, including the maximum greenness, the days remaining to the subsequent maturity period, the accuracy of the harmonic models, and the grassland greenness status in the previous 10 days. Even though future additional developments can improve the informative value of the Praterie app, this platform can contribute to optimizing livestock management and biodiversity conservation by providing timely and accurate data about grassland status and growth curves.}, } @article {pmid38339545, year = {2024}, author = {Gragnaniello, M and Borghese, A and Marrazzo, VR and Maresca, L and Breglio, G and Irace, A and Riccio, M}, title = {Real-Time Myocardial Infarction Detection Approaches with a Microcontroller-Based Edge-AI Device.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {38339545}, issn = {1424-8220}, support = {PNC0000007//Italian Ministry for Universities and Research (MUR)/ ; }, mesh = {Humans ; *Myocardial Infarction/diagnosis ; Heart ; *Heart Diseases ; Myocardium ; Algorithms ; }, abstract = {Myocardial Infarction (MI), commonly known as heart attack, is a cardiac condition characterized by damage to a portion of the heart, specifically the myocardium, due to the disruption of blood flow. Given its recurring and often asymptomatic nature, there is the need for continuous monitoring using wearable devices. This paper proposes a single-microcontroller-based system designed for the automatic detection of MI based on the Edge Computing paradigm. Two solutions for MI detection are evaluated, based on Machine Learning (ML) and Deep Learning (DL) techniques. The developed algorithms are based on two different approaches currently available in the literature, and they are optimized for deployment on low-resource hardware. A feasibility assessment of their implementation on a single 32-bit microcontroller with an ARM Cortex-M4 core was examined, and a comparison in terms of accuracy, inference time, and memory usage was detailed. For ML techniques, significant data processing for feature extraction, coupled with a simpler Neural Network (NN) is involved. On the other hand, the second method, based on DL, employs a Spectrogram Analysis for feature extraction and a Convolutional Neural Network (CNN) with a longer inference time and higher memory utilization. Both methods employ the same low power hardware reaching an accuracy of 89.40% and 94.76%, respectively. The final prototype is an energy-efficient system capable of real-time detection of MI without the need to connect to remote servers or the cloud. All processing is performed at the edge, enabling NN inference on the same microcontroller.}, } @article {pmid38332408, year = {2024}, author = {Huang, Z and Herbozo Contreras, LF and Yu, L and Truong, ND and Nikpour, A and Kavehei, O}, title = {S4D-ECG: A Shallow State-of-the-Art Model for Cardiac Abnormality Classification.}, journal = {Cardiovascular engineering and technology}, volume = {}, number = {}, pages = {}, pmid = {38332408}, issn = {1869-4098}, abstract = {PURPOSE: This study introduces an algorithm specifically designed for processing unprocessed 12-lead electrocardiogram (ECG) data, with the primary aim of detecting cardiac abnormalities.

METHODS: The proposed model integrates Diagonal State Space Sequence (S4D) model into its architecture, leveraging its effectiveness in capturing dynamics within time-series data. The S4D model is designed with stacked S4D layers for processing raw input data and a simplified decoder using a dense layer for predicting abnormality types. Experimental optimization determines the optimal number of S4D layers, striking a balance between computational efficiency and predictive performance. This comprehensive approach ensures the model's suitability for real-time processing on hardware devices with limited capabilities, offering a streamlined yet effective solution for heart monitoring.

RESULTS: Among the notable features of this algorithm is its strong resilience to noise, enabling the algorithm to achieve an average F1-score of 81.2% and an AUROC of 95.5% in generalization. The model underwent testing specifically on the lead II ECG signal, exhibiting consistent performance with an F1-score of 79.5% and an AUROC of 95.7%.

CONCLUSION: It is characterized by the elimination of pre-processing features and the availability of a low-complexity architecture that makes it suitable for implementation on numerous computing devices because it is easily implementable. Consequently, this algorithm exhibits considerable potential for practical applications in analyzing real-world ECG data. This model can be placed on the cloud for diagnosis. The model was also tested on lead II of the ECG alone and has demonstrated promising results, supporting its potential for on-device application.}, } @article {pmid38327871, year = {2024}, author = {Schönherr, S and Schachtl-Riess, JF and Di Maio, S and Filosi, M and Mark, M and Lamina, C and Fuchsberger, C and Kronenberg, F and Forer, L}, title = {Performing highly parallelized and reproducible GWAS analysis on biobank-scale data.}, journal = {NAR genomics and bioinformatics}, volume = {6}, number = {1}, pages = {lqae015}, pmid = {38327871}, issn = {2631-9268}, abstract = {Genome-wide association studies (GWAS) are transforming genetic research and enable the detection of novel genotype-phenotype relationships. In the last two decades, over 60 000 genetic associations across thousands of traits have been discovered using a GWAS approach. Due to increasing sample sizes, researchers are increasingly faced with computational challenges. A reproducible, modular and extensible pipeline with a focus on parallelization is essential to simplify data analysis and to allow researchers to devote their time to other essential tasks. Here we present nf-gwas, a Nextflow pipeline to run biobank-scale GWAS analysis. The pipeline automatically performs numerous pre- and post-processing steps, integrates regression modeling from the REGENIE package and supports single-variant, gene-based and interaction testing. It includes an extensive reporting functionality that allows to inspect thousands of phenotypes and navigate interactive Manhattan plots directly in the web browser. The pipeline is tested using the unit-style testing framework nf-test, a crucial requirement in clinical and pharmaceutical settings. Furthermore, we validated the pipeline against published GWAS datasets and benchmarked the pipeline on high-performance computing and cloud infrastructures to provide cost estimations to end users. nf-gwas is a highly parallelized, scalable and well-tested Nextflow pipeline to perform GWAS analysis in a reproducible manner.}, } @article {pmid38324613, year = {2024}, author = {Swetnam, TL and Antin, PB and Bartelme, R and Bucksch, A and Camhy, D and Chism, G and Choi, I and Cooksey, AM and Cosi, M and Cowen, C and Culshaw-Maurer, M and Davey, R and Davey, S and Devisetty, U and Edgin, T and Edmonds, A and Fedorov, D and Frady, J and Fonner, J and Gillan, JK and Hossain, I and Joyce, B and Lang, K and Lee, T and Littin, S and McEwen, I and Merchant, N and Micklos, D and Nelson, A and Ramsey, A and Roberts, S and Sarando, P and Skidmore, E and Song, J and Sprinkle, MM and Srinivasan, S and Stanzione, D and Strootman, JD and Stryeck, S and Tuteja, R and Vaughn, M and Wali, M and Wall, M and Walls, R and Wang, L and Wickizer, T and Williams, J and Wregglesworth, J and Lyons, E}, title = {CyVerse: Cyberinfrastructure for open science.}, journal = {PLoS computational biology}, volume = {20}, number = {2}, pages = {e1011270}, pmid = {38324613}, issn = {1553-7358}, mesh = {Humans ; *Artificial Intelligence ; *Software ; Cloud Computing ; Publishing ; }, abstract = {CyVerse, the largest publicly-funded open-source research cyberinfrastructure for life sciences, has played a crucial role in advancing data-driven research since the 2010s. As the technology landscape evolved with the emergence of cloud computing platforms, machine learning and artificial intelligence (AI) applications, CyVerse has enabled access by providing interfaces, Software as a Service (SaaS), and cloud-native Infrastructure as Code (IaC) to leverage new technologies. CyVerse services enable researchers to integrate institutional and private computational resources, custom software, perform analyses, and publish data in accordance with open science principles. Over the past 13 years, CyVerse has registered more than 124,000 verified accounts from 160 countries and was used for over 1,600 peer-reviewed publications. Since 2011, 45,000 students and researchers have been trained to use CyVerse. The platform has been replicated and deployed in three countries outside the US, with additional private deployments on commercial clouds for US government agencies and multinational corporations. In this manuscript, we present a strategic blueprint for creating and managing SaaS cyberinfrastructure and IaC as free and open-source software.}, } @article {pmid38323147, year = {2024}, author = {Lewis, EC and Zhu, S and Oladimeji, AT and Igusa, T and Martin, NM and Poirier, L and Trujillo, AJ and Reznar, MM and Gittelsohn, J}, title = {Design of an innovative digital application to facilitate access to healthy foods in low-income urban settings.}, journal = {mHealth}, volume = {10}, number = {}, pages = {2}, pmid = {38323147}, issn = {2306-9740}, support = {R34 HL145368/HL/NHLBI NIH HHS/United States ; T32 DK062707/DK/NIDDK NIH HHS/United States ; }, abstract = {BACKGROUND: Under-resourced urban minority communities in the United States are characterized by food environments with low access to healthy foods, high food insecurity, and high rates of diet-related chronic disease. In Baltimore, Maryland, low access to healthy food largely results from a distribution gap between small food sources (retailers) and their suppliers. Digital interventions have the potential to address this gap, while keeping costs low.

METHODS: In this paper, we describe the technical (I) front-end design and (II) back-end development process of the Baltimore Urban food Distribution (BUD) application (app). We identify and detail four main phases of the process: (I) information architecture; (II) low and high-fidelity wireframes; (III) prototype; and (IV) back-end components, while considering formative research and a pre-pilot test of a preliminary version of the BUD app.

RESULTS: Our lessons learned provide valuable insight into developing a stable app with a user-friendly experience and interface, and accessible cloud computing services for advanced technical features.

CONCLUSIONS: Next steps will involve a pilot trial of the app in Baltimore, and eventually, other urban and rural settings nationwide. Once iterative feedback is incorporated into the app, all code will be made publicly available via an open source repository to encourage adaptation for desired communities.

TRIAL REGISTRATION: ClinicalTrials.gov NCT05010018.}, } @article {pmid38321247, year = {2024}, author = {Pacios, D and Vázquez-Poletti, JL and Dhuri, DB and Atri, D and Moreno-Vozmediano, R and Lillis, RJ and Schetakis, N and Gómez-Sanz, J and Iorio, AD and Vázquez, L}, title = {A serverless computing architecture for Martian aurora detection with the Emirates Mars Mission.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {3029}, pmid = {38321247}, issn = {2045-2322}, support = {101007638//Horizon 2020 Framework Programme/ ; G1502//New York University Abu Dhabi/ ; S1560//Advanced Technology Research Council/ ; }, abstract = {Remote sensing technologies are experiencing a surge in adoption for monitoring Earth's environment, demanding more efficient and scalable methods for image analysis. This paper presents a new approach for the Emirates Mars Mission (Hope probe); A serverless computing architecture designed to analyze images of Martian auroras, a key aspect in understanding the Martian atmosphere. Harnessing the power of OpenCV and machine learning algorithms, our architecture offers image classification, object detection, and segmentation in a swift and cost-effective manner. Leveraging the scalability and elasticity of cloud computing, this innovative system is capable of managing high volumes of image data, adapting to fluctuating workloads. This technology, applied to the study of Martian auroras within the HOPE Mission, not only solves a complex problem but also paves the way for future applications in the broad field of remote sensing.}, } @article {pmid38315519, year = {2024}, author = {Xu, J}, title = {The Current Status and Promotional Strategies for Cloud Migration of Hospital Information Systems in China: Strengths, Weaknesses, Opportunities, and Threats Analysis.}, journal = {JMIR medical informatics}, volume = {12}, number = {}, pages = {e52080}, pmid = {38315519}, issn = {2291-9694}, abstract = {BACKGROUND: In the 21st century, Chinese hospitals have witnessed innovative medical business models, such as online diagnosis and treatment, cross-regional multidepartment consultation, and real-time sharing of medical test results, that surpass traditional hospital information systems (HISs). The introduction of cloud computing provides an excellent opportunity for hospitals to address these challenges. However, there is currently no comprehensive research assessing the cloud migration of HISs in China. This lack may hinder the widespread adoption and secure implementation of cloud computing in hospitals.

OBJECTIVE: The objective of this study is to comprehensively assess external and internal factors influencing the cloud migration of HISs in China and propose promotional strategies.

METHODS: Academic articles from January 1, 2007, to February 21, 2023, on the topic were searched in PubMed and HuiyiMd databases, and relevant documents such as national policy documents, white papers, and survey reports were collected from authoritative sources for analysis. A systematic assessment of factors influencing cloud migration of HISs in China was conducted by combining a Strengths, Weaknesses, Opportunities, and Threats (SWOT) analysis and literature review methods. Then, various promotional strategies based on different combinations of external and internal factors were proposed.

RESULTS: After conducting a thorough search and review, this study included 94 academic articles and 37 relevant documents. The analysis of these documents reveals the increasing application of and research on cloud computing in Chinese hospitals, and that it has expanded to 22 disciplinary domains. However, more than half (n=49, 52%) of the documents primarily focused on task-specific cloud-based systems in hospitals, while only 22% (n=21 articles) discussed integrated cloud platforms shared across the entire hospital, medical alliance, or region. The SWOT analysis showed that cloud computing adoption in Chinese hospitals benefits from policy support, capital investment, and social demand for new technology. However, it also faces threats like loss of digital sovereignty, supplier competition, cyber risks, and insufficient supervision. Factors driving cloud migration for HISs include medical big data analytics and use, interdisciplinary collaboration, health-centered medical service provision, and successful cases. Barriers include system complexity, security threats, lack of strategic planning and resource allocation, relevant personnel shortages, and inadequate investment. This study proposes 4 promotional strategies: encouraging more hospitals to migrate, enhancing hospitals' capabilities for migration, establishing a provincial-level unified medical hybrid multi-cloud platform, strengthening legal frameworks, and providing robust technical support.

CONCLUSIONS: Cloud computing is an innovative technology that has gained significant attention from both the Chinese government and the global community. In order to effectively support the rapid growth of a novel, health-centered medical industry, it is imperative for Chinese health authorities and hospitals to seize this opportunity by implementing comprehensive strategies aimed at encouraging hospitals to migrate their HISs to the cloud.}, } @article {pmid38312948, year = {2024}, author = {Ssekagiri, A and Jjingo, D and Bbosa, N and Bugembe, DL and Kateete, DP and Jordan, IK and Kaleebu, P and Ssemwanga, D}, title = {HIVseqDB: a portable resource for NGS and sample metadata integration for HIV-1 drug resistance analysis.}, journal = {Bioinformatics advances}, volume = {4}, number = {1}, pages = {vbae008}, pmid = {38312948}, issn = {2635-0041}, support = {MC_UU_00027/5/MRC_/Medical Research Council/United Kingdom ; }, abstract = {SUMMARY: Human immunodeficiency virus (HIV) remains a public health threat, with drug resistance being a major concern in HIV treatment. Next-generation sequencing (NGS) is a powerful tool for identifying low-abundance drug resistance mutations (LA-DRMs) that conventional Sanger sequencing cannot reliably detect. To fully understand the significance of LA-DRMs, it is necessary to integrate NGS data with clinical and demographic data. However, freely available tools for NGS-based HIV-1 drug resistance analysis do not integrate these data. This poses a challenge in interpretation of the impact of LA-DRMs, mainly for resource-limited settings due to the shortage of bioinformatics expertise. To address this challenge, we present HIVseqDB, a portable, secure, and user-friendly resource for integrating NGS data with associated clinical and demographic data for analysis of HIV drug resistance. HIVseqDB currently supports uploading of NGS data and associated sample data, HIV-1 drug resistance data analysis, browsing of uploaded data, and browsing and visualizing of analysis results. Each function of HIVseqDB corresponds to an individual Django application. This ensures efficient incorporation of additional features with minimal effort. HIVseqDB can be deployed on various computing environments, such as on-premises high-performance computing facilities and cloud-based platforms.

HIVseqDB is available at https://github.com/AlfredUg/HIVseqDB. A deployed instance of HIVseqDB is available at https://hivseqdb.org.}, } @article {pmid38308984, year = {2024}, author = {Lan, L and Wang, YG and Chen, HS and Gao, XR and Wang, XK and Yan, XF}, title = {Improving on mapping long-term surface water with a novel framework based on the Landsat imagery series.}, journal = {Journal of environmental management}, volume = {353}, number = {}, pages = {120202}, doi = {10.1016/j.jenvman.2024.120202}, pmid = {38308984}, issn = {1095-8630}, mesh = {*Water ; *Environmental Monitoring/methods ; Satellite Imagery ; Environment ; Algorithms ; }, abstract = {Surface water plays a crucial role in the ecological environment and societal development. Remote sensing detection serves as a significant approach to understand the temporal and spatial change in surface water series (SWS) and to directly construct long-term SWS. Limited by various factors such as cloud, cloud shadow, and problematic satellite sensor monitoring, the existent surface water mapping datasets might be short and incomplete due to losing raw information on certain dates. Improved algorithms are desired to increase the completeness and quality of SWS datasets. The present study proposes an automated framework to detect SWS, based on the Google Earth Engine and Landsat satellite imagery. This framework incorporates implementing a raw image filtering algorithm to increase available images, thereby expanding the completeness. It improves OTSU thresholding by replacing anomaly thresholds with the median value, thus enhancing the accuracy of SWS datasets. Gaps caused by Landsat7 ETM + SLC-off are respired with the random forest algorithm and morphological operations. The results show that this novel framework effectively expands the long-term series of SWS for three surface water bodies with distinct geomorphological patterns. The evaluation of confusion matrices suggests the good performance of extracting surface water, with the overall accuracy ranging from 0.96 to 0.97, and user's accuracy between 0.96 and 0.98, producer's accuracy ranging from 0.83 to 0.89, and Matthews correlation coefficient ranging from 0.87 to 0.9 for several spectral water indices (NDWI, MNDWI, ANNDWI, and AWEI). Compared with the Global Reservoirs Surface Area Dynamics (GRSAD) dataset, our constructed datasets promote greater completeness of SWS datasets by 27.01%-91.89% for the selected water bodies. The proposed framework for detecting SWS shows good potential in enlarging and completing long-term global-scale SWS datasets, capable of supporting assessments of surface-water-related environmental management and disaster prevention.}, } @article {pmid38303478, year = {2024}, author = {Lv, W and Chen, J and Cheng, S and Qiu, X and Li, D}, title = {QoS-driven resource allocation in fog radio access network: A VR service perspective.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {21}, number = {1}, pages = {1573-1589}, doi = {10.3934/mbe.2024068}, pmid = {38303478}, issn = {1551-0018}, abstract = {While immersive media services represented by virtual reality (VR) are booming, They are facing fundamental challenges, i.e., soaring multimedia applications, large operation costs and scarce spectrum resources. It is difficult to simultaneously address these service challenges in a conventional radio access network (RAN) system. These problems motivated us to explore a quality-of-service (QoS)-driven resource allocation framework from VR service perspective based on the fog radio access network (F-RAN) architecture. We elaborated details of deployment on the caching allocation, dynamic base station (BS) clustering, statistical beamforming and cost strategy under the QoS constraints in the F-RAN architecture. The key solutions aimed to break through the bottleneck of the network design and to deep integrate the network-computing resources from different perspectives of cloud, network, edge, terminal and use of collaboration and integration. Accordingly, we provided a tailored algorithm to solve the corresponding formulation problem. This is the first design of VR services based on caching and statistical beamforming under the F-RAN. A case study provided to demonstrate the advantage of our proposed framework compared with existing schemes. Finally, we concluded the article and discussed possible open research problems.}, } @article {pmid38303438, year = {2024}, author = {Niu, Q and Li, H and Liu, Y and Qin, Z and Zhang, LB and Chen, J and Lyu, Z}, title = {Toward the Internet of Medical Things: Architecture, trends and challenges.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {21}, number = {1}, pages = {650-678}, doi = {10.3934/mbe.2024028}, pmid = {38303438}, issn = {1551-0018}, mesh = {*Artificial Intelligence ; Big Data ; Cloud Computing ; Internet ; *Internet of Things ; }, abstract = {In recent years, the growing pervasiveness of wearable technology has created new opportunities for medical and emergency rescue operations to protect users' health and safety, such as cost-effective medical solutions, more convenient healthcare and quick hospital treatments, which make it easier for the Internet of Medical Things (IoMT) to evolve. The study first presents an overview of the IoMT before introducing the IoMT architecture. Later, it portrays an overview of the core technologies of the IoMT, including cloud computing, big data and artificial intelligence, and it elucidates their utilization within the healthcare system. Further, several emerging challenges, such as cost-effectiveness, security, privacy, accuracy and power consumption, are discussed, and potential solutions for these challenges are also suggested.}, } @article {pmid38301786, year = {2024}, author = {Shrestha, N and Kolarik, NE and Brandt, JS}, title = {Mesic vegetation persistence: A new approach for monitoring spatial and temporal changes in water availability in dryland regions using cloud computing and the sentinel and Landsat constellations.}, journal = {The Science of the total environment}, volume = {917}, number = {}, pages = {170491}, doi = {10.1016/j.scitotenv.2024.170491}, pmid = {38301786}, issn = {1879-1026}, abstract = {Climate change and anthropogenic activity pose severe threats to water availability in drylands. A better understanding of water availability response to these threats could improve our ability to adapt and mitigate climate and anthropogenic effects. Here, we present a Mesic Vegetation Persistence (MVP) workflow that takes every usable image in the Sentinel (10-m) and Landsat (30-m) archives to generate a dense time-series of water availability that is continuously updated as new images become available in Google Earth Engine. MVP takes advantage of the fact that mesic vegetation can be used as a proxy of available water in drylands. Our MVP workflow combines a novel moisture-based index (moisture change index - MCI) with a vegetation index (Modified Chlorophyll Absorption Ratio Vegetation Index (MCARI2)). MCI is the difference in soil moisture condition between an individual pixel's state and the dry and wet reference reflectance in the image, derived using 5th and 95th percentiles of the visible and shortwave infra-red drought index (VSDI). We produced and validated our MVP products across drylands of the western U.S., covering a broad range of elevation, land use, and ecoregions. MVP outperforms NDVI, a commonly-employed index for mesic ecosystem health, in both rangeland and forested ecosystems, and in mesic habitats with particularly high and low vegetation cover. We applied our MVP product at case study sites and found that MVP more accurately characterizes differences in mesic persistence, late-season water availability, and restoration success compared to NDVI. MVP could be applied as an indicator of change in a variety of contexts to provide a greater understanding of how water availability changes as a result of climate and management. Our MVP product for the western U.S. is freely available within a Google Earth Engine Web App, and the MVP workflow is replicable for other dryland regions.}, } @article {pmid38293581, year = {2024}, author = {Zurqani, HA}, title = {The first generation of a regional-scale 1-m forest canopy cover dataset using machine learning and google earth engine cloud computing platform: A case study of Arkansas, USA.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109986}, pmid = {38293581}, issn = {2352-3409}, abstract = {Forest canopy cover (FCC) is essential in forest assessment and management, affecting ecosystem services such as carbon sequestration, wildlife habitat, and water regulation. Ongoing advancements in techniques for accurately and efficiently mapping and extracting FCC information require a thorough evaluation of their validity and reliability. The primary objectives of this study are to: (1) create a large-scale forest FCC dataset with a 1-meter spatial resolution, (2) assess the regional spatial distribution of FCC at a regional scale, and (3) investigate differences in FCC areas among the Global Forest Change (Hansen et al., 2013) and U.S. Forest Service Tree Canopy Cover products at various spatial scales in Arkansas (i.e., county and city levels). This study utilized high-resolution aerial imagery and a machine learning algorithm processed and analyzed using the Google Earth Engine cloud computing platform to produce the FCC dataset. The accuracy of this dataset was validated using one-third of the reference locations obtained from the Global Forest Change (Hansen et al., 2013) dataset and the National Agriculture Imagery Program (NAIP) aerial imagery with a 0.6-m spatial resolution. The results showed that the dataset successfully identified FCC at a 1-m resolution in the study area, with overall accuracy ranging between 83.31% and 94.35% per county. Spatial comparison results between the produced FCC dataset and the Hansen et al., 2013 and USFS products indicated a strong positive correlation, with R[2] values ranging between 0.94 and 0.98 for county and city levels. This dataset provides valuable information for monitoring, forecasting, and managing forest resources in Arkansas and beyond. The methodology followed in this study enhances efficiency, cost-effectiveness, and scalability, as it enables the processing of large-scale datasets with high computational demands in a cloud-based environment. It also demonstrates that machine learning and cloud computing technologies can generate high-resolution forest cover datasets, which might be helpful in other regions of the world.}, } @article {pmid38292471, year = {2024}, author = {Li, W and Zhang, Z and Xie, B and He, Y and He, K and Qiu, H and Lu, Z and Jiang, C and Pan, X and He, Y and Hu, W and Liu, W and Que, T and Hu, Y}, title = {HiOmics: A cloud-based one-stop platform for the comprehensive analysis of large-scale omics data.}, journal = {Computational and structural biotechnology journal}, volume = {23}, number = {}, pages = {659-668}, pmid = {38292471}, issn = {2001-0370}, abstract = {Analyzing the vast amount of omics data generated comprehensively by high-throughput sequencing technology is of utmost importance for scientists. In this context, we propose HiOmics, a cloud-based platform equipped with nearly 300 plugins designed for the comprehensive analysis and visualization of omics data. HiOmics utilizes the Element Plus framework to craft a user-friendly interface and harnesses Docker container technology to ensure the reliability and reproducibility of data analysis results. Furthermore, HiOmics employs the Workflow Description Language and Cromwell engine to construct workflows, ensuring the portability of data analysis and simplifying the examination of intricate data. Additionally, HiOmics has developed DataCheck, a tool based on Golang, which verifies and converts data formats. Finally, by leveraging the object storage technology and batch computing capabilities of public cloud platforms, HiOmics enables the storage and processing of large-scale data while maintaining resource independence among users.}, } @article {pmid38289970, year = {2024}, author = {Abbasi, IA and Jan, SU and Alqahtani, AS and Khan, AS and Algarni, F}, title = {A lightweight and robust authentication scheme for the healthcare system using public cloud server.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0294429}, pmid = {38289970}, issn = {1932-6203}, mesh = {Humans ; *Confidentiality ; *Telemedicine ; Computer Security ; Delivery of Health Care ; Privacy ; }, abstract = {Cloud computing is vital in various applications, such as healthcare, transportation, governance, and mobile computing. When using a public cloud server, it is mandatory to be secured from all known threats because a minor attacker's disturbance severely threatens the whole system. A public cloud server is posed with numerous threats; an adversary can easily enter the server to access sensitive information, especially for the healthcare industry, which offers services to patients, researchers, labs, and hospitals in a flexible way with minimal operational costs. It is challenging to make it a reliable system and ensure the privacy and security of a cloud-enabled healthcare system. In this regard, numerous security mechanisms have been proposed in past decades. These protocols either suffer from replay attacks, are completed in three to four round trips or have maximum computation, which means the security doesn't balance with performance. Thus, this work uses a fuzzy extractor method to propose a robust security method for a cloud-enabled healthcare system based on Elliptic Curve Cryptography (ECC). The proposed scheme's security analysis has been examined formally with BAN logic, ROM and ProVerif and informally using pragmatic illustration and different attacks' discussions. The proposed security mechanism is analyzed in terms of communication and computation costs. Upon comparing the proposed protocol with prior work, it has been demonstrated that our scheme is 33.91% better in communication costs and 35.39% superior to its competitors in computation costs.}, } @article {pmid38289917, year = {2024}, author = {Sun, Y and Du, X and Niu, S and Zhou, S}, title = {A lightweight attribute-based signcryption scheme based on cloud-fog assisted in smart healthcare.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0297002}, pmid = {38289917}, issn = {1932-6203}, mesh = {Humans ; *Computer Security ; *Algorithms ; Big Data ; Cloud Computing ; Delivery of Health Care ; }, abstract = {In the environment of big data of the Internet of Things, smart healthcare is developed in combination with cloud computing. However, with the generation of massive data in smart healthcare systems and the need for real-time data processing, traditional cloud computing is no longer suitable for resources-constrained devices in the Internet of Things. In order to address this issue, we combine the advantages of fog computing and propose a cloud-fog assisted attribute-based signcryption for smart healthcare. In the constructed "cloud-fog-terminal" three-layer model, before the patient (data owner)signcryption, it first offloads some heavy computation burden to fog nodes and the doctor (data user) also outsources some complicated operations to fog nodes before unsigncryption by providing a blinded private key, which greatly reduces the calculation overhead of resource-constrained devices of patient and doctor, improves the calculation efficiency. Thus it implements a lightweight signcryption algorithm. Security analysis confirms that the proposed scheme achieves indistinguishability under chosen ciphertext attack and existential unforgeability under chosen message attack if the computational bilinear Diffie-Hellman problem and the decisional bilinear Diffie-Hellman problem holds. Furthermore, performance analysis demonstrates that our new scheme has less computational overhead for both doctors and patients, so it offers higher computational efficiency and is well-suited for application scenarios of smart healthcare.}, } @article {pmid38283301, year = {2024}, author = {Amjad, S and Akhtar, A and Ali, M and Afzal, A and Shafiq, B and Vaidya, J and Shamail, S and Rana, O}, title = {Orchestration and Management of Adaptive IoT-centric Distributed Applications.}, journal = {IEEE internet of things journal}, volume = {11}, number = {3}, pages = {3779-3791}, pmid = {38283301}, issn = {2327-4662}, support = {R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {Current Internet of Things (IoT) devices provide a diverse range of functionalities, ranging from measurement and dissemination of sensory data observation, to computation services for real-time data stream processing. In extreme situations such as emergencies, a significant benefit of IoT devices is that they can help gain a more complete situational understanding of the environment. However, this requires the ability to utilize IoT resources while taking into account location, battery life, and other constraints of the underlying edge and IoT devices. A dynamic approach is proposed for orchestration and management of distributed workflow applications using services available in cloud data centers, deployed on servers, or IoT devices at the network edge. Our proposed approach is specifically designed for knowledge-driven business process workflows that are adaptive, interactive, evolvable and emergent. A comprehensive empirical evaluation shows that the proposed approach is effective and resilient to situational changes.}, } @article {pmid38273718, year = {2024}, author = {Wu, Y and Sanati, O and Uchimiya, M and Krishnamurthy, K and Wedell, J and Hoch, JC and Edison, AS and Delaglio, F}, title = {SAND: Automated Time-Domain Modeling of NMR Spectra Applied to Metabolite Quantification.}, journal = {Analytical chemistry}, volume = {96}, number = {5}, pages = {1843-1851}, pmid = {38273718}, issn = {1520-6882}, support = {P41 GM111135/GM/NIGMS NIH HHS/United States ; }, mesh = {*Algorithms ; Magnetic Resonance Spectroscopy ; *Magnetic Resonance Imaging ; Software ; Metabolomics ; }, abstract = {Developments in untargeted nuclear magnetic resonance (NMR) metabolomics enable the profiling of thousands of biological samples. The exploitation of this rich source of information requires a detailed quantification of spectral features. However, the development of a consistent and automatic workflow has been challenging because of extensive signal overlap. To address this challenge, we introduce the software Spectral Automated NMR Decomposition (SAND). SAND follows on from the previous success of time-domain modeling and automatically quantifies entire spectra without manual interaction. The SAND approach uses hybrid optimization with Markov chain Monte Carlo methods, employing subsampling in both time and frequency domains. In particular, SAND randomly divides the time-domain data into training and validation sets to help avoid overfitting. We demonstrate the accuracy of SAND, which provides a correlation of ∼0.9 with ground truth on cases including highly overlapped simulated data sets, a two-compound mixture, and a urine sample spiked with different amounts of a four-compound mixture. We further demonstrate an automated annotation using correlation networks derived from SAND decomposed peaks, and on average, 74% of peaks for each compound can be recovered in single clusters. SAND is available in NMRbox, the cloud computing environment for NMR software hosted by the Network for Advanced NMR (NAN). Since the SAND method uses time-domain subsampling (i.e., random subset of time-domain points), it has the potential to be extended to a higher dimensionality and nonuniformly sampled data.}, } @article {pmid38270978, year = {2024}, author = {Dral, PO and Ge, F and Hou, YF and Zheng, P and Chen, Y and Barbatti, M and Isayev, O and Wang, C and Xue, BX and Pinheiro, M and Su, Y and Dai, Y and Chen, Y and Zhang, L and Zhang, S and Ullah, A and Zhang, Q and Ou, Y}, title = {MLatom 3: A Platform for Machine Learning-Enhanced Computational Chemistry Simulations and Workflows.}, journal = {Journal of chemical theory and computation}, volume = {20}, number = {3}, pages = {1193-1213}, pmid = {38270978}, issn = {1549-9626}, abstract = {Machine learning (ML) is increasingly becoming a common tool in computational chemistry. At the same time, the rapid development of ML methods requires a flexible software framework for designing custom workflows. MLatom 3 is a program package designed to leverage the power of ML to enhance typical computational chemistry simulations and to create complex workflows. This open-source package provides plenty of choice to the users who can run simulations with the command-line options, input files, or with scripts using MLatom as a Python package, both on their computers and on the online XACS cloud computing service at XACScloud.com. Computational chemists can calculate energies and thermochemical properties, optimize geometries, run molecular and quantum dynamics, and simulate (ro)vibrational, one-photon UV/vis absorption, and two-photon absorption spectra with ML, quantum mechanical, and combined models. The users can choose from an extensive library of methods containing pretrained ML models and quantum mechanical approximations such as AIQM1 approaching coupled-cluster accuracy. The developers can build their own models using various ML algorithms. The great flexibility of MLatom is largely due to the extensive use of the interfaces to many state-of-the-art software packages and libraries.}, } @article {pmid38269892, year = {2024}, author = {Renato, A and Luna, D and Benítez, S}, title = {Development of an ASR System for Medical Conversations.}, journal = {Studies in health technology and informatics}, volume = {310}, number = {}, pages = {664-668}, doi = {10.3233/SHTI231048}, pmid = {38269892}, issn = {1879-8365}, mesh = {Humans ; *Communication ; Language ; Speech ; Acoustics ; *Physicians ; }, abstract = {In this work we document the development of an ASR system for the transcription of conversations between patient and doctor and we will point out the critical aspects of the domain. The system was trained with an acoustic base of spontaneous speech that has a domain language model and a supervised phonetic dictionary. Its performance was compared with two systems: a) NeMo End-to-End Conformers in Spanish and b) Google API ASR (Automatic Speech Recognition) Cloud. The evaluation was carried out on a set of 208 teleconsultations recorded during the year 2020. The WER (Word Error Rate) was evaluated in ASR, and Recall and F1 for recognized medical entities. In conclusion, the developed system performed better, reaching 72.5% accuracy in the domain of teleconsultations and an F1 for entity recognition of 0.80.}, } @article {pmid38257526, year = {2024}, author = {Malik, AW and Bhatti, DS and Park, TJ and Ishtiaq, HU and Ryou, JC and Kim, KI}, title = {Cloud Digital Forensics: Beyond Tools, Techniques, and Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {2}, pages = {}, pmid = {38257526}, issn = {1424-8220}, support = {RS-2022-00144000//National Research Foundation of Korea/ ; 2022-0-01200//Information & Communications Technology Planning & Evaluation/ ; }, abstract = {Cloud computing technology is rapidly becoming ubiquitous and indispensable. However, its widespread adoption also exposes organizations and individuals to a broad spectrum of potential threats. Despite the multiple advantages the cloud offers, organizations remain cautious about migrating their data and applications to the cloud due to fears of data breaches and security compromises. In light of these concerns, this study has conducted an in-depth examination of a variety of articles to enhance the comprehension of the challenges related to safeguarding and fortifying data within the cloud environment. Furthermore, the research has scrutinized several well-documented data breaches, analyzing the financial consequences they inflicted. Additionally, it scrutinizes the distinctions between conventional digital forensics and the forensic procedures specific to cloud computing. As a result of this investigation, the study has concluded by proposing potential opportunities for further research in this critical domain. By doing so, it contributes to our collective understanding of the complex panorama of cloud data protection and security, while acknowledging the evolving nature of technology and the need for ongoing exploration and innovation in this field. This study also helps in understanding the compound annual growth rate (CAGR) of cloud digital forensics, which is found to be quite high at ≈16.53% from 2023 to 2031. Moreover, its market is expected to reach ≈USD 36.9 billion by the year 2031; presently, it is ≈USD 11.21 billion, which shows that there are great opportunities for investment in this area. This study also strategically addresses emerging challenges in cloud digital forensics, providing a comprehensive approach to navigating and overcoming the complexities associated with the evolving landscape of cloud computing.}, } @article {pmid38248999, year = {2024}, author = {Molnár, T and Király, G}, title = {Forest Disturbance Monitoring Using Cloud-Based Sentinel-2 Satellite Imagery and Machine Learning.}, journal = {Journal of imaging}, volume = {10}, number = {1}, pages = {}, pmid = {38248999}, issn = {2313-433X}, support = {TKP2021-NKTA-43//Ministry of Innovation and Technology of Hungary/ ; }, abstract = {Forest damage has become more frequent in Hungary in the last decades, and remote sensing offers a powerful tool for monitoring them rapidly and cost-effectively. A combined approach was developed to utilise high-resolution ESA Sentinel-2 satellite imagery and Google Earth Engine cloud computing and field-based forest inventory data. Maps and charts were derived from vegetation indices (NDVI and Z∙NDVI) of satellite images to detect forest disturbances in the Hungarian study site for the period of 2017-2020. The NDVI maps were classified to reveal forest disturbances, and the cloud-based method successfully showed drought and frost damage in the oak-dominated Nagyerdő forest of Debrecen. Differences in the reactions to damage between tree species were visible on the index maps; therefore, a random forest machine learning classifier was applied to show the spatial distribution of dominant species. An accuracy assessment was accomplished with confusion matrices that compared classified index maps to field-surveyed data, demonstrating 99.1% producer, 71% user, and 71% total accuracies for forest damage and 81.9% for tree species. Based on the results of this study and the resilience of Google Earth Engine, the presented method has the potential to be extended to monitor all of Hungary in a faster, more accurate way using systematically collected field-data, the latest satellite imagery, and artificial intelligence.}, } @article {pmid38248542, year = {2024}, author = {Willingham, TB and Stowell, J and Collier, G and Backus, D}, title = {Leveraging Emerging Technologies to Expand Accessibility and Improve Precision in Rehabilitation and Exercise for People with Disabilities.}, journal = {International journal of environmental research and public health}, volume = {21}, number = {1}, pages = {}, pmid = {38248542}, issn = {1660-4601}, support = {90REGE0011/ACL/ACL HHS/United States ; }, mesh = {Humans ; Artificial Intelligence ; Quality of Life ; *Medicine ; Exercise ; *Disabled Persons ; }, abstract = {Physical rehabilitation and exercise training have emerged as promising solutions for improving health, restoring function, and preserving quality of life in populations that face disparate health challenges related to disability. Despite the immense potential for rehabilitation and exercise to help people with disabilities live longer, healthier, and more independent lives, people with disabilities can experience physical, psychosocial, environmental, and economic barriers that limit their ability to participate in rehabilitation, exercise, and other physical activities. Together, these barriers contribute to health inequities in people with disabilities, by disproportionately limiting their ability to participate in health-promoting physical activities, relative to people without disabilities. Therefore, there is great need for research and innovation focusing on the development of strategies to expand accessibility and promote participation in rehabilitation and exercise programs for people with disabilities. Here, we discuss how cutting-edge technologies related to telecommunications, wearables, virtual and augmented reality, artificial intelligence, and cloud computing are providing new opportunities to improve accessibility in rehabilitation and exercise for people with disabilities. In addition, we highlight new frontiers in digital health technology and emerging lines of scientific research that will shape the future of precision care strategies for people with disabilities.}, } @article {pmid38248199, year = {2024}, author = {Yan, Z and Lin, X and Zhang, X and Xu, J and Qu, H}, title = {Identity-Based Matchmaking Encryption with Equality Test.}, journal = {Entropy (Basel, Switzerland)}, volume = {26}, number = {1}, pages = {}, pmid = {38248199}, issn = {1099-4300}, abstract = {The identity-based encryption with equality test (IBEET) has become a hot research topic in cloud computing as it provides an equality test for ciphertexts generated under different identities while preserving the confidentiality. Subsequently, for the sake of the confidentiality and authenticity of the data, the identity-based signcryption with equality test (IBSC-ET) has been put forward. Nevertheless, the existing schemes do not consider the anonymity of the sender and the receiver, which leads to the potential leakage of sensitive personal information. How to ensure confidentiality, authenticity, and anonymity in the IBEET setting remains a significant challenge. In this paper, we put forward the concept of the identity-based matchmaking encryption with equality test (IBME-ET) to address this issue. We formalized the system model, the definition, and the security models of the IBME-ET and, then, put forward a concrete scheme. Furthermore, our scheme was confirmed to be secure and practical by proving its security and evaluating its performance.}, } @article {pmid38247937, year = {2024}, author = {Kim, J and Jang, H and Koh, H}, title = {MiMultiCat: A Unified Cloud Platform for the Analysis of Microbiome Data with Multi-Categorical Responses.}, journal = {Bioengineering (Basel, Switzerland)}, volume = {11}, number = {1}, pages = {}, pmid = {38247937}, issn = {2306-5354}, support = {2021R1C1C1013861//National Research Foundation of Korea/ ; }, abstract = {The field of the human microbiome is rapidly growing due to the recent advances in high-throughput sequencing technologies. Meanwhile, there have also been many new analytic pipelines, methods and/or tools developed for microbiome data preprocessing and analytics. They are usually focused on microbiome data with continuous (e.g., body mass index) or binary responses (e.g., diseased vs. healthy), yet multi-categorical responses that have more than two categories are also common in reality. In this paper, we introduce a new unified cloud platform, named MiMultiCat, for the analysis of microbiome data with multi-categorical responses. The two main distinguishing features of MiMultiCat are as follows: First, MiMultiCat streamlines a long sequence of microbiome data preprocessing and analytic procedures on user-friendly web interfaces; as such, it is easy to use for many people in various disciplines (e.g., biology, medicine, public health). Second, MiMultiCat performs both association testing and prediction modeling extensively. For association testing, MiMultiCat handles both ecological (e.g., alpha and beta diversity) and taxonomical (e.g., phylum, class, order, family, genus, species) contexts through covariate-adjusted or unadjusted analysis. For prediction modeling, MiMultiCat employs the random forest and gradient boosting algorithms that are well suited to microbiome data while providing nice visual interpretations. We demonstrate its use through the reanalysis of gut microbiome data on obesity with body mass index categories. MiMultiCat is freely available on our web server.}, } @article {pmid38235187, year = {2024}, author = {Xun, D and Wang, R and Zhang, X and Wang, Y}, title = {Microsnoop: A generalist tool for microscopy image representation.}, journal = {Innovation (Cambridge (Mass.))}, volume = {5}, number = {1}, pages = {100541}, pmid = {38235187}, issn = {2666-6758}, abstract = {Accurate profiling of microscopy images from small scale to high throughput is an essential procedure in basic and applied biological research. Here, we present Microsnoop, a novel deep learning-based representation tool trained on large-scale microscopy images using masked self-supervised learning. Microsnoop can process various complex and heterogeneous images, and we classified images into three categories: single-cell, full-field, and batch-experiment images. Our benchmark study on 10 high-quality evaluation datasets, containing over 2,230,000 images, demonstrated Microsnoop's robust and state-of-the-art microscopy image representation ability, surpassing existing generalist and even several custom algorithms. Microsnoop can be integrated with other pipelines to perform tasks such as superresolution histopathology image and multimodal analysis. Furthermore, Microsnoop can be adapted to various hardware and can be easily deployed on local or cloud computing platforms. We will regularly retrain and reevaluate the model using community-contributed data to consistently improve Microsnoop.}, } @article {pmid38235176, year = {2024}, author = {Putra, IMS and Siahaan, D and Saikhu, A}, title = {SNLI Indo: A recognizing textual entailment dataset in Indonesian derived from the Stanford Natural Language Inference dataset.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109998}, pmid = {38235176}, issn = {2352-3409}, abstract = {Recognizing textual entailment (RTE) is an essential task in natural language processing (NLP). It is the task of determining the inference relationship between text fragments (premise and hypothesis), of which the inference relationship is either entailment (true), contradiction (false), or neutral (undetermined). The most popular approach for RTE is neural networks, which has resulted in the best RTE models. Neural network approaches, in particular deep learning, are data-driven and, consequently, the quantity and quality of the data significantly influences the performance of these approaches. Therefore, we introduce SNLI Indo, a large-scale RTE dataset in the Indonesian language, which was derived from the Stanford Natural Language Inference (SNLI) corpus by translating the original sentence pairs. SNLI is a large-scale dataset that contains premise-hypothesis pairs that were generated using a crowdsourcing framework. The SNLI dataset is comprised of a total of 569,027 sentence pairs with the distribution of sentence pairs as follows: 549,365 pairs for training, 9,840 pairs for model validation, and 9,822 pairs for testing. We translated the original sentence pairs of the SNLI dataset from English to Indonesian using the Google Cloud Translation API. The existence of SNLI Indo addresses the resource gap in the field of NLP for the Indonesian language. Even though large datasets are available in other languages, in particular English, the SNLI Indo dataset enables a more optimal development of deep learning models for RTE in the Indonesian language.}, } @article {pmid38235174, year = {2024}, author = {Koulgi, P and Jumani, S}, title = {Dataset of temporal trends of surface water area across India's rivers and basins.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109991}, pmid = {38235174}, issn = {2352-3409}, abstract = {This dataset [1] quantifies the extent and rate of annual change in surface water area (SWA) across India's rivers and basins over a period of 30 years spanning 1991 to 2020. This data has been derived from the Global Surface Water Explorer, which maps historical terrestrial surface water occurrence globally using the Landsat satellite image archive since 1984, at a spatial resolution of 30 m/pixel and a temporal resolution of once a month. This monthly time-series was used to create annual composites of wet-season (October, November, December), dry-season (February, March, April), and permanent (October, November, December, February, March, April) surface water extent, which were then used to estimate annual rates of change. To estimate SWA trends for both river networks and their basins, we conducted our analysis at two spatial scales - (1) cross-sectional reaches (transects) across river networks, and (2) sub-basins within river catchments. For each reach and sub-basin (henceforth basin), temporal trends in wet-season, dry-season, and permanent SWA were estimated using the non-parametric Sen's slope estimator. For every valid reach and basin, the temporal timeseries of invalid or missing data was also computed as a fractional area to inform the level of certainty associated with reported SWA trends estimates. In addition to a Zenodo data repository, this data [1] is presented as an interactive web application (https://sites.google.com/view/surface-water-trends-india/; henceforth Website) to allow users to visualize the trends of permanent, wet-season, and dry-season water along with the extent of missing data for individual transects or basins across India. The Website provides a simple user interface to enable users to download seasonal time-series of SWA for any region of interest at the scale of the river network or basin. The Website also provides details about accessing the annual permanent, dry and wet season composites, which are stored as publicly accessible cloud assets on the Google Earth Engine platform. The spatial (basin and reach) and temporal (wet season, dry season, and permanent water scenarios) scales of information provided in this dataset yield a granular understanding of water systems in India. We envision this dataset to serve as a baseline information layer that can be used in combination with other data sources to support regional analysis of hydrologic trends, watershed-based analysis, and conservation planning. Specific applications include, but are not limited to, monitoring and identifying at-risk wetlands, visualizing and measuring changes to surface water extent before and after water infrastructure projects (such as dams and water abstraction projects), mapping drought prone regions, and mapping natural and anthropogenic changes to SWA along river networks. Intended users include, but are not limited to, students, academics, decision-makers, planners, policymakers, activists, and others interested in water-related issues.}, } @article {pmid38231538, year = {2024}, author = {Gheisari, M and Ghaderzadeh, M and Li, H and Taami, T and Fernández-Campusano, C and Sadeghsalehi, H and Afzaal Abbasi, A}, title = {Mobile Apps for COVID-19 Detection and Diagnosis for Future Pandemic Control: Multidimensional Systematic Review.}, journal = {JMIR mHealth and uHealth}, volume = {12}, number = {}, pages = {e44406}, pmid = {38231538}, issn = {2291-5222}, mesh = {Humans ; *COVID-19 ; Pandemics/prevention & control ; Artificial Intelligence ; SARS-CoV-2 ; *Mobile Applications ; COVID-19 Testing ; }, abstract = {BACKGROUND: In the modern world, mobile apps are essential for human advancement, and pandemic control is no exception. The use of mobile apps and technology for the detection and diagnosis of COVID-19 has been the subject of numerous investigations, although no thorough analysis of COVID-19 pandemic prevention has been conducted using mobile apps, creating a gap.

OBJECTIVE: With the intention of helping software companies and clinical researchers, this study provides comprehensive information regarding the different fields in which mobile apps were used to diagnose COVID-19 during the pandemic.

METHODS: In this systematic review, 535 studies were found after searching 5 major research databases (ScienceDirect, Scopus, PubMed, Web of Science, and IEEE). Of these, only 42 (7.9%) studies concerned with diagnosing and detecting COVID-19 were chosen after applying inclusion and exclusion criteria using the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) protocol.

RESULTS: Mobile apps were categorized into 6 areas based on the content of these 42 studies: contact tracing, data gathering, data visualization, artificial intelligence (AI)-based diagnosis, rule- and guideline-based diagnosis, and data transformation. Patients with COVID-19 were identified via mobile apps using a variety of clinical, geographic, demographic, radiological, serological, and laboratory data. Most studies concentrated on using AI methods to identify people who might have COVID-19. Additionally, symptoms, cough sounds, and radiological images were used more frequently compared to other data types. Deep learning techniques, such as convolutional neural networks, performed comparatively better in the processing of health care data than other types of AI techniques, which improved the diagnosis of COVID-19.

CONCLUSIONS: Mobile apps could soon play a significant role as a powerful tool for data collection, epidemic health data analysis, and the early identification of suspected cases. These technologies can work with the internet of things, cloud storage, 5th-generation technology, and cloud computing. Processing pipelines can be moved to mobile device processing cores using new deep learning methods, such as lightweight neural networks. In the event of future pandemics, mobile apps will play a critical role in rapid diagnosis using various image data and clinical symptoms. Consequently, the rapid diagnosis of these diseases can improve the management of their effects and obtain excellent results in treating patients.}, } @article {pmid38228707, year = {2024}, author = {Simaiya, S and Lilhore, UK and Sharma, YK and Rao, KBVB and Maheswara Rao, VVR and Baliyan, A and Bijalwan, A and Alroobaea, R}, title = {A hybrid cloud load balancing and host utilization prediction method using deep learning and optimization techniques.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {1337}, pmid = {38228707}, issn = {2045-2322}, abstract = {Virtual machine (VM) integration methods have effectively proven an optimized load balancing in cloud data centers. The main challenge with VM integration methods is the trade-off among cost effectiveness, quality of service, performance, optimal resource utilization and compliance with service level agreement violations. Deep Learning methods are widely used in existing research on cloud load balancing. However, there is still a problem with acquiring noisy multilayered fluctuations in workload due to the limited resource-level provisioning. The long short-term memory (LSTM) model plays a vital role in the prediction of server load and workload provisioning. This research presents a hybrid model using deep learning with Particle Swarm Intelligence and Genetic Algorithm ("DPSO-GA") for dynamic workload provisioning in cloud computing. The proposed model works in two phases. The first phase utilizes a hybrid PSO-GA approach to address the prediction challenge by combining the benefits of these two methods in fine-tuning the Hyperparameters. In the second phase, CNN-LSTM is utilized. Before using the CNN-LSTM approach to forecast the consumption of resources, a hybrid approach, PSO-GA, is used for training it. In the proposed framework, a one-dimensional CNN and LSTM are used to forecast the cloud resource utilization at various subsequent time steps. The LSTM module simulates temporal information that predicts the upcoming VM workload, while a CNN module extracts complicated distinguishing features gathered from VM workload statistics. The proposed model simultaneously integrates the resource utilization in a multi-resource utilization, which helps overcome the load balancing and over-provisioning issues. Comprehensive simulations are carried out utilizing the Google cluster traces benchmarks dataset to verify the efficiency of the proposed DPSO-GA technique in enhancing the distribution of resources and load balancing for the cloud. The proposed model achieves outstanding results in terms of better precision, accuracy and load allocation.}, } @article {pmid38218894, year = {2024}, author = {Zhao, Y and Sazlina, SG and Rokhani, FZ and Chinna, K and Su, J and Chew, BH}, title = {The expectations and acceptability of a smart nursing home model among Chinese older adults: a mixed methods study.}, journal = {BMC nursing}, volume = {23}, number = {1}, pages = {40}, pmid = {38218894}, issn = {1472-6955}, abstract = {BACKGROUND: Smart nursing homes (SNHs) integrate advanced technologies, including IoT, digital health, big data, AI, and cloud computing to optimise remote clinical services, monitor abnormal events, enhance decision-making, and support daily activities for older residents, ensuring overall well-being in a safe and cost-effective environment. This study developed and validated a 24-item Expectation and Acceptability of Smart Nursing Homes Questionnaire (EASNH-Q), and examined the levels of expectations and acceptability of SNHs and associated factors among older adults in China.

METHODS: This was an exploratory sequential mixed methods study, where the qualitative case study was conducted in Hainan and Dalian, while the survey was conducted in Xi'an, Nanjing, Shenyang, and Xiamen. The validation of EASNH-Q also included exploratory and confirmatory factor analyses. Multinomial logistic regression analysis was used to estimate the determinants of expectations and acceptability of SNHs.

RESULTS: The newly developed EASNH-Q uses a Likert Scale ranging from 1 (strongly disagree) to 5 (strongly agree), and underwent validation and refinement from 49 items to the final 24 items. The content validity indices for relevance, comprehensibility, and comprehensiveness were all above 0.95. The expectations and acceptability of SNHs exhibited a strong correlation (r = 0.85, p < 0.01), and good test-retest reliability for expectation (0.90) and acceptability (0.81). The highest tertile of expectations (X[2]=28.89, p < 0.001) and acceptability (X[2]=25.64, p < 0.001) towards SNHs were significantly associated with the willingness to relocate to such facilities. Older adults with self-efficacy in applying smart technologies (OR: 28.0) and those expressing a willingness to move to a nursing home (OR: 3.0) were more likely to have the highest tertile of expectations compared to those in the lowest tertile. Similarly, older adults with self-efficacy in applying smart technologies were more likely to be in the highest tertile of acceptability of SNHs (OR: 13.8).

CONCLUSIONS: EASNH-Q demonstrated commendable validity, reliability, and stability. The majority of Chinese older adults have high expectations for and accept SNHs. Self-efficacy in applying smart technologies and willingness to relocate to a nursing home associated with high expectations and acceptability of SNHs.}, } @article {pmid38218892, year = {2024}, author = {Putzier, M and Khakzad, T and Dreischarf, M and Thun, S and Trautwein, F and Taheri, N}, title = {Implementation of cloud computing in the German healthcare system.}, journal = {NPJ digital medicine}, volume = {7}, number = {1}, pages = {12}, pmid = {38218892}, issn = {2398-6352}, abstract = {With the advent of artificial intelligence and Big Data - projects, the necessity for a transition from analog medicine to modern-day solutions such as cloud computing becomes unavoidable. Even though this need is now common knowledge, the process is not always easy to start. Legislative changes, for example at the level of the European Union, are helping the respective healthcare systems to take the necessary steps. This article provides an overview of how a German university hospital is dealing with European data protection laws on the integration of cloud computing into everyday clinical practice. By describing our model approach, we aim to identify opportunities and possible pitfalls to sustainably influence digitization in Germany.}, } @article {pmid38218746, year = {2024}, author = {Chen, M and Wei, Z and Li, L and Zhang, K}, title = {Edge computing-based proactive control method for industrial product manufacturing quality prediction.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {1288}, pmid = {38218746}, issn = {2045-2322}, abstract = {With the emergence of intelligent manufacturing, new-generation information technologies such as big data and artificial intelligence are rapidly integrating with the manufacturing industry. One of the primary applications is to assist manufacturing plants in predicting product quality. Traditional predictive models primarily focus on establishing high-precision classification or regression models, with less emphasis on imbalanced data. This is a specific but common scenario in practical industrial environments concerning quality prediction. A SMOTE-XGboost quality prediction active control method based on joint optimization hyperparameters is proposed to address the problem of imbalanced data classification in product quality prediction. In addition, edge computing technology is introduced to address issues in industrial manufacturing, such as the large bandwidth load and resource limitations associated with traditional cloud computing models. Finally, the practicality and effectiveness of the proposed method are validated through a case study of the brake disc production line. Experimental results indicate that the proposed method outperforms other classification methods in brake disc quality prediction.}, } @article {pmid38215330, year = {2024}, author = {Zhao, B and Chen, WN and Wei, FF and Liu, X and Pei, Q and Zhang, J}, title = {PEGA: A Privacy-Preserving Genetic Algorithm for Combinatorial Optimization.}, journal = {IEEE transactions on cybernetics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TCYB.2023.3346863}, pmid = {38215330}, issn = {2168-2275}, abstract = {EA, such as the genetic algorithm (GA), offer an elegant way to handle combinatorial optimization problems (COPs). However, limited by expertise and resources, most users lack the capability to implement evolutionary algorithms (EAs) for solving COPs. An intuitive and promising solution is to outsource evolutionary operations to a cloud server, however, it poses privacy concerns. To this end, this article proposes a novel computing paradigm called evolutionary computation as a service (ECaaS), where a cloud server renders evolutionary computation services for users while ensuring their privacy. Following the concept of ECaaS, this article presents privacy-preserving genetic algorithm (PEGA), a privacy-preserving GA designed specifically for COPs. PEGA enables users, regardless of their domain expertise or resource availability, to outsource COPs to the cloud server that holds a competitive GA and approximates the optimal solution while safeguarding privacy. Notably, PEGA features the following characteristics. First, PEGA empowers users without domain expertise or sufficient resources to solve COPs effectively. Second, PEGA protects the privacy of users by preventing the leakage of optimization problem details. Third, PEGA performs comparably to the conventional GA when approximating the optimal solution. To realize its functionality, we implement PEGA falling in a twin-server architecture and evaluate it on two widely known COPs: 1) the traveling Salesman problem (TSP) and 2) the 0/1 knapsack problem (KP). Particularly, we utilize encryption cryptography to protect users' privacy and carefully design a suite of secure computing protocols to support evolutionary operators of GA on encrypted chromosomes. Privacy analysis demonstrates that PEGA successfully preserves the confidentiality of COP contents. Experimental evaluation results on several TSP datasets and KP datasets reveal that PEGA performs equivalently to the conventional GA in approximating the optimal solution.}, } @article {pmid38215168, year = {2024}, author = {Sun, X and Sun, W and Wang, Z}, title = {Novel enterprises digital transformation influence empirical study.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0296693}, pmid = {38215168}, issn = {1932-6203}, mesh = {China ; *Big Data ; *Cloud Computing ; Commerce ; Empirical Research ; }, abstract = {With the rapid development of technologies such as cloud computing and big data, various levels of government departments in the country have successively introduced digital subsidy policies to promote enterprises' digital transformation. However, the effectiveness of these policies and their ability to truly achieve policy objectives have become pressing concerns across society. Against this backdrop, this paper employs a moderated mediation effects model to empirically analyze the incentive effects of financial subsidies on the digital transformation of A-share listed manufacturing companies in the Shanghai and Shenzhen stock markets from 2013 to 2022. The research findings indicate a significant promotion effect of financial subsidies on the digital transformation of manufacturing enterprises, especially demonstrating a notable incentive impact on the digital transformation of large enterprises, non-asset-intensive enterprises, technology-intensive enterprises, and non-labor-intensive enterprises. However, the incentive effect on the digital transformation of small and medium-sized enterprises (SMEs), asset-intensive enterprises, non-technology-intensive enterprises, and labor-intensive enterprises is not significant. Notably, the expansion of financial subsidies positively influences the augmentation of R&D investment within manufacturing enterprises, subsequently providing indirect encouragement for their digital transformation. Additionally, the incorporation of the degree of marketization implies its potential to moderate both the direct and indirect impacts of financial subsidies on enterprise digital transformation. This study enriches the research on the mechanism of the role of financial subsidies in digital transformation and provides empirical evidence on how market participation influences the effects of financial subsidies, thereby assisting policymakers in comprehensively understanding the impact of financial subsidy policies on different types of enterprises.}, } @article {pmid38215070, year = {2024}, author = {Fan, Y}, title = {Load balance -aware dynamic cloud-edge-end collaborative offloading strategy.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0296897}, pmid = {38215070}, issn = {1932-6203}, mesh = {*Awareness ; *Cloud Computing ; }, abstract = {Cloud-edge-end (CEE) computing is a hybrid computing paradigm that converges the principles of edge and cloud computing. In the design of CEE systems, a crucial challenge is to develop efficient offloading strategies to achieve the collaboration of edge and cloud offloading. Although CEE offloading problems have been widely studied under various backgrounds and methodologies, load balance, which is an indispensable scheme in CEE systems to ensure the full utilization of edge resources, is still a factor that has not yet been accounted for. To fill this research gap, we are devoted to developing a dynamic load balance -aware CEE offloading strategy. First, we propose a load evolution model to characterize the influences of offloading strategies on the system load dynamics and, on this basis, establish a latency model as a performance metric of different offloading strategies. Then, we formulate an optimal control model to seek the optimal offloading strategy that minimizes the latency. Second, we analyze the feasibility of typical optimal control numerical methods in solving our proposed model, and develop a numerical method based on the framework of genetic algorithm. Third, through a series of numerical experiments, we verify our proposed method. Results show that our method is effective.}, } @article {pmid38212989, year = {2024}, author = {Peltzer, A and Mohr, C and Stadermann, KB and Zwick, M and Schmid, R}, title = {nf-core/nanostring: a pipeline for reproducible NanoString nCounter analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {40}, number = {1}, pages = {}, pmid = {38212989}, issn = {1367-4811}, support = {//Boehringer Ingelheim Pharma GmbH & Co/ ; }, mesh = {*Software ; *Language ; Cloud Computing ; Workflow ; Quality Control ; }, abstract = {MOTIVATION: The NanoString™ nCounter® technology platform is a widely used targeted quantification platform for the analysis of gene expression of up to ∼800 genes. Whereas the software tools by the manufacturer can perform the analysis in an interactive and GUI driven approach, there is no portable and user-friendly workflow available that can be used to perform reproducible analysis of multiple samples simultaneously in a scalable fashion on different computing infrastructures.

RESULTS: Here, we present the nf-core/nanostring open-source pipeline to perform a comprehensive analysis including quality control and additional features such as expression visualization, annotation with additional metadata and input creation for differential gene expression analysis. The workflow features an easy installation, comprehensive documentation, open-source code with the possibility for further extensions, a strong portability across multiple computing environments and detailed quality metrics reporting covering all parts of the pipeline. nf-core/nanostring has been implemented in the Nextflow workflow language and supports Docker, Singularity, Podman container technologies as well as Conda environments, enabling easy deployment on any Nextflow supported compatible system, including most widely used cloud computing environments such as Google GCP or Amazon AWS.

The source code, documentation and installation instructions as well as results for continuous tests are freely available at https://github.com/nf-core/nanostring and https://nf-co.re/nanostring.}, } @article {pmid38212192, year = {2024}, author = {Ayeni, KI and Berry, D and Ezekiel, CN and Warth, B}, title = {Enhancing microbiome research in sub-Saharan Africa.}, journal = {Trends in microbiology}, volume = {32}, number = {2}, pages = {111-115}, doi = {10.1016/j.tim.2023.11.003}, pmid = {38212192}, issn = {1878-4380}, mesh = {Humans ; Africa South of the Sahara ; *Microbiota ; }, abstract = {While there are lighthouse examples of microbiome research in sub-Saharan Africa (SSA), a significant proportion of local researchers face several challenges. Here, we highlight prevailing issues limiting microbiome research in SSA and suggest potential technological, societal, and research-based solutions. We emphasize the need for considerable investment in infrastructures, training, and appropriate funding to democratize modern technologies with a view to providing useful data to improve human health.}, } @article {pmid38203138, year = {2024}, author = {An, X and Cai, B and Chai, L}, title = {Research on Over-the-Horizon Perception Distance Division of Optical Fiber Communication Based on Intelligent Roadways.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203138}, issn = {1424-8220}, abstract = {With the construction and application of more and more intelligent networking demonstration projects, a large number of advanced roadside digital infrastructures are deployed on both sides of the intelligent road. These devices sense the road situation in real time through algorithms and transmit it to edge computing units and cloud control platforms through high-speed optical fiber transmission networks. This article proposes a cloud edge terminal architecture system based on cloud edge cooperation, as well as a data exchange protocol for cloud control basic platforms. The over-the-horizon scene division and optical fiber network communication model are verified by deploying intelligent roadside devices on the intelligent highway. At the same time, this article uses the optical fiber network communication algorithm and ModelScope large model to model inference on real-time video data. The actual data results show that the StreamYOLO (Stream You Only Look Once) model can use the Streaming Perception method to detect and continuously track target vehicles in real-time videos. Finally, the method proposed in this article was experimentally validated in an actual smart highway digital infrastructure construction project. The experimental results demonstrate the high application value and promotion prospects of the fiber optic network in the division of over the horizon perception distance in intelligent roadways construction.}, } @article {pmid38203103, year = {2023}, author = {Sheik, AT and Maple, C and Epiphaniou, G and Dianati, M}, title = {Securing Cloud-Assisted Connected and Autonomous Vehicles: An In-Depth Threat Analysis and Risk Assessment.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, doi = {10.3390/s24010241}, pmid = {38203103}, issn = {1424-8220}, abstract = {As threat vectors and adversarial capabilities evolve, Cloud-Assisted Connected and Autonomous Vehicles (CCAVs) are becoming more vulnerable to cyberattacks. Several established threat analysis and risk assessment (TARA) methodologies are publicly available to address the evolving threat landscape. However, these methodologies inadequately capture the threat data of CCAVs, resulting in poorly defined threat boundaries or the reduced efficacy of the TARA. This is due to multiple factors, including complex hardware-software interactions, rapid technological advancements, outdated security frameworks, heterogeneous standards and protocols, and human errors in CCAV systems. To address these factors, this study begins by systematically evaluating TARA methods and applying the Spoofing, Tampering, Repudiation, Information disclosure, Denial of service, and Elevation of privileges (STRIDE) threat model and Damage, Reproducibility, Exploitability, Affected Users, and Discoverability (DREAD) risk assessment to target system architectures. This study identifies vulnerabilities, quantifies risks, and methodically examines defined data processing components. In addition, this study offers an attack tree to delineate attack vectors and provides a novel defense taxonomy against identified risks. This article demonstrates the efficacy of the TARA in systematically capturing compromised security requirements, threats, limits, and associated risks with greater precision. By doing so, we further discuss the challenges in protecting hardware-software assets against multi-staged attacks due to emerging vulnerabilities. As a result, this research informs advanced threat analyses and risk management strategies for enhanced security engineering of cyberphysical CCAV systems.}, } @article {pmid38203078, year = {2023}, author = {Suo, L and Ma, H and Jiao, W and Liu, X}, title = {Job-Deadline-Guarantee-Based Joint Flow Scheduling and Routing Scheme in Data Center Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203078}, issn = {1424-8220}, support = {62101415//National Natural Science Foundation of China/ ; }, abstract = {Many emerging Internet of Things (IoT) applications deployed on cloud platforms have strict latency requirements or deadline constraints, and thus meeting the deadlines is crucial to ensure the quality of service for users and the revenue for service providers in these delay-stringent IoT applications. Efficient flow scheduling in data center networks (DCNs) plays a major role in reducing the execution time of jobs and has garnered significant attention in recent years. However, only few studies have attempted to combine job-level flow scheduling and routing to guarantee meeting the deadlines of multi-stage jobs. In this paper, an efficient heuristic joint flow scheduling and routing (JFSR) scheme is proposed. First, targeting maximizing the number of jobs for which the deadlines have been met, we formulate the joint flow scheduling and routing optimization problem for multiple multi-stage jobs. Second, due to its mathematical intractability, this problem is decomposed into two sub-problems: inter-coflow scheduling and intra-coflow scheduling. In the first sub-problem, coflows from different jobs are scheduled according to their relative remaining times; in the second sub-problem, an iterative coflow scheduling and routing (ICSR) algorithm is designed to alternately optimize the routing path and bandwidth allocation for each scheduled coflow. Finally, simulation results demonstrate that the proposed JFSR scheme can significantly increase the number of jobs for which the deadlines have been met in DCNs.}, } @article {pmid38203015, year = {2023}, author = {Oyucu, S and Polat, O and Türkoğlu, M and Polat, H and Aksöz, A and Ağdaş, MT}, title = {Ensemble Learning Framework for DDoS Detection in SDN-Based SCADA Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203015}, issn = {1424-8220}, support = {101084323//European Union's Horizon Europe research and innovation programme/ ; }, abstract = {Supervisory Control and Data Acquisition (SCADA) systems play a crucial role in overseeing and controlling renewable energy sources like solar, wind, hydro, and geothermal resources. Nevertheless, with the expansion of conventional SCADA network infrastructures, there arise significant challenges in managing and scaling due to increased size, complexity, and device diversity. Using Software Defined Networking (SDN) technology in traditional SCADA network infrastructure offers management, scaling and flexibility benefits. However, as the integration of SDN-based SCADA systems with modern technologies such as the Internet of Things, cloud computing, and big data analytics increases, cybersecurity becomes a major concern for these systems. Therefore, cyber-physical energy systems (CPES) should be considered together with all energy systems. One of the most dangerous types of cyber-attacks against SDN-based SCADA systems is Distributed Denial of Service (DDoS) attacks. DDoS attacks disrupt the management of energy resources, causing service interruptions and increasing operational costs. Therefore, the first step to protect against DDoS attacks in SDN-based SCADA systems is to develop an effective intrusion detection system. This paper proposes a Decision Tree-based Ensemble Learning technique to detect DDoS attacks in SDN-based SCADA systems by accurately distinguishing between normal and DDoS attack traffic. For training and testing the ensemble learning models, normal and DDoS attack traffic data are obtained over a specific simulated experimental network topology. Techniques based on feature selection and hyperparameter tuning are used to optimize the performance of the decision tree ensemble models. Experimental results show that feature selection, combination of different decision tree ensemble models, and hyperparameter tuning can lead to a more accurate machine learning model with better performance detecting DDoS attacks against SDN-based SCADA systems.}, } @article {pmid38203012, year = {2023}, author = {Rodríguez-Azar, PI and Mejía-Muñoz, JM and Cruz-Mejía, O and Torres-Escobar, R and López, LVR}, title = {Fog Computing for Control of Cyber-Physical Systems in Industry Using BCI.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38203012}, issn = {1424-8220}, abstract = {Brain-computer interfaces use signals from the brain, such as EEG, to determine brain states, which in turn can be used to issue commands, for example, to control industrial machinery. While Cloud computing can aid in the creation and operation of industrial multi-user BCI systems, the vast amount of data generated from EEG signals can lead to slow response time and bandwidth problems. Fog computing reduces latency in high-demand computation networks. Hence, this paper introduces a fog computing solution for BCI processing. The solution consists in using fog nodes that incorporate machine learning algorithms to convert EEG signals into commands to control a cyber-physical system. The machine learning module uses a deep learning encoder to generate feature images from EEG signals that are subsequently classified into commands by a random forest. The classification scheme is compared using various classifiers, being the random forest the one that obtained the best performance. Additionally, a comparison was made between the fog computing approach and using only cloud computing through the use of a fog computing simulator. The results indicate that the fog computing method resulted in less latency compared to the solely cloud computing approach.}, } @article {pmid38202896, year = {2023}, author = {Feng, YC and Zeng, SY and Liang, TY}, title = {Part2Point: A Part-Oriented Point Cloud Reconstruction Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {38202896}, issn = {1424-8220}, support = {NSTC112-2221-E992-068//National Science and Technology Council in Taiwan/ ; }, abstract = {Three-dimensional object modeling is necessary for developing virtual and augmented reality applications. Traditionally, application engineers must manually use art software to edit object shapes or exploit LIDAR to scan physical objects for constructing 3D models. This is very time-consuming and costly work. Fortunately, GPU recently provided a cost-effective solution for massive data computation. With GPU support, many studies have proposed 3D model generators based on different learning architectures, which can automatically convert 2D object pictures into 3D object models with good performance. However, as the demand for model resolution increases, the required computing time and memory space increase as significantly as the parameters of the learning architecture, which seriously degrades the efficiency of 3D model construction and the feasibility of resolution improvement. To resolve this problem, this paper proposes a part-oriented point cloud reconstruction framework called Part2Point. This framework segments the object's parts, reconstructs the point cloud for individual object parts, and combines the part point clouds into the complete object point cloud. Therefore, it can reduce the number of learning network parameters at the exact resolution, effectively minimizing the calculation time cost and the required memory space. Moreover, it can improve the resolution of the reconstructed point cloud so that the reconstructed model can present more details of object parts.}, } @article {pmid38200074, year = {2024}, author = {Chen, C and Gong, L and Luo, X and Wang, F}, title = {Research on a new management model of distribution Internet of Things.}, journal = {Scientific reports}, volume = {14}, number = {1}, pages = {995}, pmid = {38200074}, issn = {2045-2322}, support = {2020-KJLH-PH-006//Science and Technology Project of Zhejiang Electric Power Company/ ; }, abstract = {Based on the characteristics of controllable intelligence of the Internet of Things (IoT) and the requirements of the new distribution Network for function and transmission delay, this study proposes a method of combining edge collaborative computing and distribution Network station area, and builds a distribution Network management structure model by combining the Packet Transport Network (PTN) Network structure. The multi-terminal node distribution model of distributed IoT is established. Finally, a distribution IoT management model is constructed based on the edge multi-node cooperative reasoning algorithm and collaborative computing architecture model. The purpose of this paper is to solve the problem of large reasoning delay caused by heavy computing tasks in distribution cloud servers. The final results show that the model reduces the inference delay of cloud computing when a large number of smart device terminals of distribution IoT are connected to the network.}, } @article {pmid38197934, year = {2024}, author = {Cheong, RCT and Jawad, S and Adams, A and Campion, T and Lim, ZH and Papachristou, N and Unadkat, S and Randhawa, P and Joseph, J and Andrews, P and Taylor, P and Kunz, H}, title = {Enhancing paranasal sinus disease detection with AutoML: efficient AI development and evaluation via magnetic resonance imaging.}, journal = {European archives of oto-rhino-laryngology : official journal of the European Federation of Oto-Rhino-Laryngological Societies (EUFOS) : affiliated with the German Society for Oto-Rhino-Laryngology - Head and Neck Surgery}, volume = {281}, number = {4}, pages = {2153-2158}, pmid = {38197934}, issn = {1434-4726}, mesh = {Humans ; *Artificial Intelligence ; Machine Learning ; Magnetic Resonance Imaging ; Head ; *Paranasal Sinus Diseases/diagnostic imaging ; }, abstract = {PURPOSE: Artificial intelligence (AI) in the form of automated machine learning (AutoML) offers a new potential breakthrough to overcome the barrier of entry for non-technically trained physicians. A Clinical Decision Support System (CDSS) for screening purposes using AutoML could be beneficial to ease the clinical burden in the radiological workflow for paranasal sinus diseases.

METHODS: The main target of this work was the usage of automated evaluation of model performance and the feasibility of the Vertex AI image classification model on the Google Cloud AutoML platform to be trained to automatically classify the presence or absence of sinonasal disease. The dataset is a consensus labelled Open Access Series of Imaging Studies (OASIS-3) MRI head dataset by three specialised head and neck consultant radiologists. A total of 1313 unique non-TSE T2w MRI head sessions were used from the OASIS-3 repository.

RESULTS: The best-performing image classification model achieved a precision of 0.928. Demonstrating the feasibility and high performance of the Vertex AI image classification model to automatically detect the presence or absence of sinonasal disease on MRI.

CONCLUSION: AutoML allows for potential deployment to optimise diagnostic radiology workflows and lay the foundation for further AI research in radiology and otolaryngology. The usage of AutoML could serve as a formal requirement for a feasibility study.}, } @article {pmid38195165, year = {2024}, author = {Chen, J and Yin, D and Wong, HYH and Duan, X and Yu, KHO and Ho, JWK}, title = {Vulture: cloud-enabled scalable mining of microbial reads in public scRNA-seq data.}, journal = {GigaScience}, volume = {13}, number = {}, pages = {}, pmid = {38195165}, issn = {2047-217X}, support = {//Innovation and Technology Commission - Hong Kong/ ; }, mesh = {Humans ; Benchmarking ; *Carcinoma, Hepatocellular/genetics ; DNA Copy Number Variations ; Hepatitis B virus ; *Liver Neoplasms ; Single-Cell Gene Expression Analysis ; }, abstract = {The rapidly growing collection of public single-cell sequencing data has become a valuable resource for molecular, cellular, and microbial discovery. Previous studies mostly overlooked detecting pathogens in human single-cell sequencing data. Moreover, existing bioinformatics tools lack the scalability to deal with big public data. We introduce Vulture, a scalable cloud-based pipeline that performs microbial calling for single-cell RNA sequencing (scRNA-seq) data, enabling meta-analysis of host-microbial studies from the public domain. In our benchmarking experiments, Vulture is 66% to 88% faster than local tools (PathogenTrack and Venus) and 41% faster than the state-of-the-art cloud-based tool Cumulus, while achieving comparable microbial read identification. In terms of the cost on cloud computing systems, Vulture also shows a cost reduction of 83% ($12 vs. ${\$}$70). We applied Vulture to 2 coronavirus disease 2019, 3 hepatocellular carcinoma (HCC), and 2 gastric cancer human patient cohorts with public sequencing reads data from scRNA-seq experiments and discovered cell type-specific enrichment of severe acute respiratory syndrome coronavirus 2, hepatitis B virus (HBV), and Helicobacter pylori-positive cells, respectively. In the HCC analysis, all cohorts showed hepatocyte-only enrichment of HBV, with cell subtype-associated HBV enrichment based on inferred copy number variations. In summary, Vulture presents a scalable and economical framework to mine unknown host-microbial interactions from large-scale public scRNA-seq data. Vulture is available via an open-source license at https://github.com/holab-hku/Vulture.}, } @article {pmid38192752, year = {2024}, author = {Tan, X and Zhao, D and Wang, M and Wang, X and Wang, X and Liu, W and Ghobaei-Arani, M}, title = {A decision-making mechanism for task offloading using learning automata and deep learning in mobile edge networks.}, journal = {Heliyon}, volume = {10}, number = {1}, pages = {e23651}, pmid = {38192752}, issn = {2405-8440}, abstract = {The development of mobile networks has led to the emergence of challenges such as high delays in storage, computing and traffic management. To deal with these challenges, fifth-generation networks emphasize the use of technologies such as mobile cloud computing and mobile edge computing. Mobile Edge Cloud Computing (MECC) is an emerging distributed computing model that provides access to cloud computing services at the edge of the network and near mobile users. With offloading tasks at the edge of the network instead of transferring them to a remote cloud, MECC can realize flexibility and real-time processing. During computation offloading, the requirements of Internet of Things (IoT) applications may change at different stages, which is ignored in existing works. With this motivation, we propose a task offloading method under dynamic resource requirements during the use of IoT applications, which focuses on the problem of workload fluctuations. The proposed method uses a learning automata-based offload decision-maker to offload requests to the edge layer. An auto-scaling strategy is then developed using a long short-term memory network which can estimate the expected number of future requests. Finally, an Asynchronous Advantage Actor-Critic algorithm as a deep reinforcement learning-based approach decides to scale down or scale up. The effectiveness of the proposed method has been confirmed through extensive experiments using the iFogSim simulator. The numerical results show that the proposed method has better scalability and performance in terms of delay and energy consumption than the existing state-of-the-art methods.}, } @article {pmid38192482, year = {2023}, author = {Alabadi, M and Habbal, A}, title = {Next-generation predictive maintenance: leveraging blockchain and dynamic deep learning in a domain-independent system.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1712}, pmid = {38192482}, issn = {2376-5992}, abstract = {The fourth industrial revolution, often referred to as Industry 4.0, has revolutionized the manufacturing sector by integrating emerging technologies such as artificial intelligence (AI), machine and deep learning, Industrial Internet of Things (IIoT), cloud computing, cyber physical systems (CPSs) and cognitive computing, throughout the production life cycle. Predictive maintenance (PdM) emerges as a critical component, utilizing data analytic to track machine health and proactively detect machinery failures. Deep learning (DL), is pivotal in this context, offering superior accuracy in prediction through neural networks' data processing capabilities. However, DL adoption in PdM faces challenges, including continuous model updates and domain dependence. Meanwhile, centralized DL models, prevalent in PdM, pose security risks such as central points of failure and unauthorized access. To address these issues, this study presents an innovative decentralized PdM system integrating DL, blockchain, and decentralized storage based on the InterPlanetary File System (IPFS) for accurately predicting Remaining Useful Lifetime (RUL). DL handles predictive tasks, while blockchain secures data orchestration. Decentralized storage safeguards model metadata and training data for dynamic models. The system features synchronized two DL pipelines for time series data, encompassing prediction and training mechanisms. The detailed material and methods of this research shed light on the system's development and validation processes. Rigorous validation confirms the system's accuracy, performance, and security through an experimental testbed. The results demonstrate the system's dynamic updating and domain independence. Prediction model surpass state-of-the-art models in terms of the root mean squared error (RMSE) score. Blockchain-based scalability performance was tested based on smart contract gas usage, and the analysis shows efficient performance across varying input and output data scales. A comprehensive CIA analysis highlights the system's robust security features, addressing confidentiality, integrity, and availability aspects. The proposed decentralized predictive maintenance (PdM) system, which incorporates deep learning (DL), blockchain technology, and decentralized storage, has the potential to improve predictive accuracy and overcome significant security and scalability obstacles. Consequently, this system holds promising implications for the advancement of predictive maintenance in the context of Industry 4.0.}, } @article {pmid38192461, year = {2023}, author = {Xiao, J and Chang, C and Wu, P and Ma, Y}, title = {Attribute identification based IoT fog data security control and forwarding.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1747}, pmid = {38192461}, issn = {2376-5992}, abstract = {As Internet of Things (IoT) applications continue to proliferate, traditional cloud computing is increasingly unable to meet the low-latency demands of these applications. The IoT fog architecture solves this limitation by introducing fog servers in the fog layer that are closer to the IoT devices. However, this architecture lacks authentication mechanisms for information sources, security verification for information transmission, and reasonable allocation of fog nodes. To ensure the secure transmission of end-to-end information in the IoT fog architecture, an attribute identification based security control and forwarding method for IoT fog data (AISCF) is proposed. AISCF applies attribute signatures to the IoT fog architecture and uses software defined network (SDN) to control and forward fog layer data flows. Firstly, IoT devices add attribute identifiers to the data they send based on attribute features. The ingress switch then performs fine-grained access control on the data based on these attribute identifiers. Secondly, SDN uses attribute features as flow table matching items to achieve fine-grained control and forwarding of fog layer data flows based on attribute identifiers. Lastly, the egress switch dynamically samples data flows and verifies the attribute signatures of the sampled data packets at the controller end. Experimental validation has demonstrated that AISCF can effectively detect attacks such as data tampering and forged matching items. Moreover, AISCF imposes minimal overhead on network throughput, CPU utilization and packet forwarding latency, and has practicality in IoT fog architecture.}, } @article {pmid38191935, year = {2024}, author = {Renton, AI and Dao, TT and Johnstone, T and Civier, O and Sullivan, RP and White, DJ and Lyons, P and Slade, BM and Abbott, DF and Amos, TJ and Bollmann, S and Botting, A and Campbell, MEJ and Chang, J and Close, TG and Dörig, M and Eckstein, K and Egan, GF and Evas, S and Flandin, G and Garner, KG and Garrido, MI and Ghosh, SS and Grignard, M and Halchenko, YO and Hannan, AJ and Heinsfeld, AS and Huber, L and Hughes, ME and Kaczmarzyk, JR and Kasper, L and Kuhlmann, L and Lou, K and Mantilla-Ramos, YJ and Mattingley, JB and Meier, ML and Morris, J and Narayanan, A and Pestilli, F and Puce, A and Ribeiro, FL and Rogasch, NC and Rorden, C and Schira, MM and Shaw, TB and Sowman, PF and Spitz, G and Stewart, AW and Ye, X and Zhu, JD and Narayanan, A and Bollmann, S}, title = {Neurodesk: an accessible, flexible and portable data analysis environment for reproducible neuroimaging.}, journal = {Nature methods}, volume = {}, number = {}, pages = {}, pmid = {38191935}, issn = {1548-7105}, abstract = {Neuroimaging research requires purpose-built analysis software, which is challenging to install and may produce different results across computing environments. The community-oriented, open-source Neurodesk platform (https://www.neurodesk.org/) harnesses a comprehensive and growing suite of neuroimaging software containers. Neurodesk includes a browser-accessible virtual desktop, command-line interface and computational notebook compatibility, allowing for accessible, flexible, portable and fully reproducible neuroimaging analysis on personal workstations, high-performance computers and the cloud.}, } @article {pmid38187735, year = {2023}, author = {Moctezuma, L and Rivera, LB and van Nouhuijs, F and Orcales, F and Kim, A and Campbell, R and Fuse, M and Pennings, PS}, title = {Using a decision tree to predict COVID case numbers: a tutorial for beginners.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {38187735}, support = {T32 GM142515/GM/NIGMS NIH HHS/United States ; T34 GM008574/GM/NIGMS NIH HHS/United States ; }, abstract = {Machine learning (ML) makes it possible to analyze large volumes of data and is an important tool in biomedical research. The use of ML methods can lead to improvements in diagnosis, treatment, and prevention of diseases. During the COVID pandemic, ML methods were used for predictions at the patient and community levels. Given the ubiquity of ML, it is important that future doctors, researchers and teachers get acquainted with ML and its contributions to research. Our goal is to make it easier for students and their professors to learn about ML. The learning module we present here is based on a small but relevant COVID dataset, videos, annotated code and the use of cloud computing platforms. The benefit of cloud computing platforms is that students don't have to set up a coding environment on their computer. This saves time and is also an important democratization factor - allowing students to use old or borrowed computers (e.g., from a library), tablets or Chromebooks. As a result, this will benefit colleges geared toward underserved populations with limited computing infrastructure. We developed a beginner-friendly module focused on learning the basics of decision trees by applying them to COVID tabular data. It introduces students to basic terminology used in supervised ML and its relevance to research. The module includes two Python notebooks with pre-written code, one with practice exercises and another with its solutions. Our experience with biology students at San Francisco State University suggests that the material increases interest in ML.}, } @article {pmid38183538, year = {2024}, author = {Indraja, G and Aashi, A and Vema, VK}, title = {Spatial and temporal classification and prediction of LULC in Brahmani and Baitarni basin using integrated cellular automata models.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {2}, pages = {117}, pmid = {38183538}, issn = {1573-2959}, mesh = {*Cellular Automata ; *Ecosystem ; Environmental Monitoring ; Algorithms ; Agriculture ; }, abstract = {Monitoring the dynamics of land use and land cover (LULC) is imperative in the changing climate and evolving urbanization patterns worldwide. The shifts in land use have a significant impact on the hydrological response of watersheds across the globe. Several studies have applied machine learning (ML) algorithms using historical LULC maps along with elevation data and slope for predicting future LULC projections. However, the influence of other driving factors such as socio-economic and climatological factors has not been thoroughly explored. In the present study, a sensitivity analysis approach was adopted to understand the effect of both physical (elevation, slope, aspect, etc.) and socio-economic factors such as population density, distance to built-up, and distance to road and rail, as well as climatic factors (mean precipitation) on the accuracy of LULC prediction in the Brahmani and Baitarni (BB) basin of Eastern India. Additionally, in the absence of the recent LULC maps of the basin, three ML algorithms, i.e., random forest (RF), classified and regression trees (CART), and support vector machine (SVM) were utilized for LULC classification for the years 2007, 2014, and 2021 on Google earth engine (GEE) cloud computing platform. Among the three algorithms, RF performed best for classifying built-up areas along with all the other classes as compared to CART and SVM. The prediction results revealed that the proximity to built-up and population growth dominates in modeling LULC over physical factors such as elevation and slope. The analysis of historical data revealed an increase of 351% in built-up areas over the past years (2007-2021), with a corresponding decline in forest and water areas by 12% and 36% respectively. While the future predictions highlighted an increase in built-up class ranging from 11 to 38% during the years 2028-2070, the forested areas are anticipated to decline by 4 to 16%. The overall findings of the present study suggested that the BB basin, despite being primarily agricultural with a significant forest cover, is undergoing rapid expansion of built-up areas through the encroachment of agricultural and forested lands, which could have far-reaching implications for the region's ecosystem services and sustainability.}, } @article {pmid38179578, year = {2023}, author = {Pelofske, E and Hahn, G and Djidjev, H}, title = {Initial State Encoding via Reverse Quantum Annealing and H-Gain Features.}, journal = {IEEE transactions on quantum engineering}, volume = {4}, number = {}, pages = {}, pmid = {38179578}, issn = {2689-1808}, support = {R01 AI154470/AI/NIAID NIH HHS/United States ; U01 HL089897/HL/NHLBI NIH HHS/United States ; R21 HD095228/HD/NICHD NIH HHS/United States ; P30 ES002109/ES/NIEHS NIH HHS/United States ; U01 HG008685/HG/NHGRI NIH HHS/United States ; P01 HL132825/HL/NHLBI NIH HHS/United States ; U01 HL089856/HL/NHLBI NIH HHS/United States ; P01 HL120839/HL/NHLBI NIH HHS/United States ; }, abstract = {Quantum annealing is a specialized type of quantum computation that aims to use quantum fluctuations in order to obtain global minimum solutions of combinatorial optimization problems. Programmable D-Wave quantum annealers are available as cloud computing resources, which allow users low-level access to quantum annealing control features. In this article, we are interested in improving the quality of the solutions returned by a quantum annealer by encoding an initial state into the annealing process. We explore twoD-Wave features that allow one toencode such an initialstate: the reverse annealing (RA) and theh-gain(HG)features.RAaimstorefineaknownsolutionfollowinganannealpathstartingwithaclassical state representing a good solution, going backward to a point where a transverse field is present, and then finishing the annealing process with a forward anneal. The HG feature allows one to put a time-dependent weighting scheme on linear (h) biases of the Hamiltonian, and we demonstrate that this feature likewise can be used to bias the annealing to start from an initial state. We also consider a hybrid method consisting of a backward phase resembling RA and a forward phase using the HG initial state encoding. Importantly, we investigate the idea of iteratively applying RA and HG to a problem, with the goal of monotonically improving on an initial state that is not optimal. The HG encoding technique is evaluated on a variety of input problems including the edge-weighted maximum cut problem and the vertex-weighted maximum clique problem, demonstrating that the HG technique is a viable alternative to RA for some problems. We also investigate how the iterative procedures perform for both RA and HG initial state encodings on random whole-chip spin glasses with the native hardware connectivity of the D-Wave Chimera and Pegasus chips.}, } @article {pmid38178510, year = {2023}, author = {Xu, X and Lu, Y and Huang, Y and Zhou, X and Ma, R and Xiong, H and Li, M and Wu, Q and Xu, J}, title = {Frequency modulation of terahertz microcavity via strong coupling with plasmonic resonators.}, journal = {Optics express}, volume = {31}, number = {26}, pages = {44375-44384}, doi = {10.1364/OE.510365}, pmid = {38178510}, issn = {1094-4087}, abstract = {Tunable terahertz (THz) microcavities are crucial for the compact on-chip THz devices, aiming to future cloud-based computing, and artificial-intelligence technologies. However, the solutions to effectively modulate THz microcavities remain elusive. Strong coupling has been widely demonstrated in many configurations at different ambient conditions to date and may serve as a promising tool to modulate THz microcavities. Here, we schematically design a microcavity-plasmon hybrid system, and propose an effective approach to modulating the resonant frequencies of THz microcavities by the microcavity-resonator strong coupling. In this case, we observed the strongly coupling states, where the resultant two-polariton branches exhibit an anti-crossing splitting in the frequency domain, experimentally exhibiting a ∼6.2% frequency modulation to the microcavity compared to the uncoupled case. This work provides an efficient approach to modulating chip-scale THz microcavities, thereby facilitating the development and application of compact THz integrated devices, further empowering the evolution of future information processing and intelligent computing system.}, } @article {pmid38167901, year = {2024}, author = {DeWitt, PE and Rebull, MA and Bennett, TD}, title = {Open source and reproducible and inexpensive infrastructure for data challenges and education.}, journal = {Scientific data}, volume = {11}, number = {1}, pages = {8}, pmid = {38167901}, issn = {2052-4463}, support = {K23 HD074620/HD/NICHD NIH HHS/United States ; R03 HD094912/HD/NICHD NIH HHS/United States ; }, abstract = {Data sharing is necessary to maximize the actionable knowledge generated from research data. Data challenges can encourage secondary analyses of datasets. Data challenges in biomedicine often rely on advanced cloud-based computing infrastructure and expensive industry partnerships. Examples include challenges that use Google Cloud virtual machines and the Sage Bionetworks Dream Challenges platform. Such robust infrastructures can be financially prohibitive for investigators without substantial resources. Given the potential to develop scientific and clinical knowledge and the NIH emphasis on data sharing and reuse, there is a need for inexpensive and computationally lightweight methods for data sharing and hosting data challenges. To fill that gap, we developed a workflow that allows for reproducible model training, testing, and evaluation. We leveraged public GitHub repositories, open-source computational languages, and Docker technology. In addition, we conducted a data challenge using the infrastructure we developed. In this manuscript, we report on the infrastructure, workflow, and data challenge results. The infrastructure and workflow are likely to be useful for data challenges and education.}, } @article {pmid38166081, year = {2024}, author = {Tian, Z and Qiu, L and Wang, L}, title = {Drivers and influencers of blockchain and cloud-based business sustainability accounting in China: Enhancing practices and promoting adoption.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0295802}, pmid = {38166081}, issn = {1932-6203}, abstract = {The field of sustainability accounting aims to integrate environmental, social, and governance factors into financial reporting. With the growing importance of sustainability practices, emerging technologies have the potential to revolutionize reporting methods. However, there is a lack of research on the factors influencing the adoption of blockchain and cloud-based sustainability accounting in China. This study employs a mixed-methods approach to examine the key drivers and barriers to technology adoption for sustainability reporting among Chinese businesses. Through a systematic literature review, gaps in knowledge were identified. Primary data was collected through an online survey of firms, followed by in-depth case studies. The findings of the study reveal a positive relationship between company size and reporting behaviors. However, size alone is not sufficient to predict outcomes accurately. The industry type also has significant but small effects, although its impact on reporting behaviors varies. The relationship between profitability and reporting behaviors is intricate and contingent, requiring contextual examination. The adoption of blockchain technology is positively associated with capabilities, resources, skills, and regulatory factors. On the other hand, cloud computing adoption is linked to resources, management support, and risk exposures. However, the specific impacts of industry on adoption remain inconclusive. This study aims to offer empirical validation of relationships, shedding light on the intricate nature of interactions that necessitate nuanced conceptualizations incorporating contextual moderators. The findings underscore the importance of providing customized support and adaptable guidance to accommodate the evolving practices in sustainability accounting. Moreover, the assimilation of technology and organizational changes highlights the need for multifaceted stakeholder cooperation to drive responsible innovation and address the challenges posed by digital transformations in this field.}, } @article {pmid38166050, year = {2024}, author = {Alourani, A and Khalid, A and Tahir, M and Sardaraz, M}, title = {Energy efficient virtual machines placement in cloud datacenters using genetic algorithm and adaptive thresholds.}, journal = {PloS one}, volume = {19}, number = {1}, pages = {e0296399}, pmid = {38166050}, issn = {1932-6203}, mesh = {*Conservation of Energy Resources ; *Algorithms ; Cloud Computing ; }, abstract = {Cloud computing platform provides on-demand IT services to users and advanced the technology. The purpose of virtualization is to improve the utilization of resources and reduce power consumption. Energy consumption is a major issue faced by data centers management. Virtual machine placement is an effective technique used for this purpose. Different algorithms have been proposed for virtual machine placement in cloud environments. These algorithms have considered different parameters. It is obvious that improving one parameter affects other parameters. There is still a need to reduce energy consumption in cloud data centers. Data centers need solutions that reduce energy consumption without affecting other parameters. There is a need to device solutions to effectively utilize cloud resources and reduce energy consumption. In this article, we present an algorithm for Virtual Machines (VMs) placement in cloud computing. The algorithm uses adaptive thresholding to identify over utilized and underutilized hosts to reduce energy consumption and Service Level Agreement (SLA) violations. The algorithm is validated with simulations and comparative results are presented.}, } @article {pmid38161217, year = {2024}, author = {Zhang, X and Dou, Z and Kim, SH and Upadhyay, G and Havert, D and Kang, S and Kazemi, K and Huang, KY and Aydin, O and Huang, R and Rahman, S and Ellis-Mohr, A and Noblet, HA and Lim, KH and Chung, HJ and Gritton, HJ and Saif, MTA and Kong, HJ and Beggs, JM and Gazzola, M}, title = {Mind In Vitro Platforms: Versatile, Scalable, Robust, and Open Solutions to Interfacing with Living Neurons.}, journal = {Advanced science (Weinheim, Baden-Wurttemberg, Germany)}, volume = {11}, number = {11}, pages = {e2306826}, pmid = {38161217}, issn = {2198-3844}, support = {2123781//National Science Foundation/ ; 1830881//National Science Foundation/ ; }, mesh = {Electrodes ; *Brain/physiology ; *Neurons/physiology ; Electric Stimulation ; Electrophysiological Phenomena/physiology ; }, abstract = {Motivated by the unexplored potential of in vitro neural systems for computing and by the corresponding need of versatile, scalable interfaces for multimodal interaction, an accurate, modular, fully customizable, and portable recording/stimulation solution that can be easily fabricated, robustly operated, and broadly disseminated is presented. This approach entails a reconfigurable platform that works across multiple industry standards and that enables a complete signal chain, from neural substrates sampled through micro-electrode arrays (MEAs) to data acquisition, downstream analysis, and cloud storage. Built-in modularity supports the seamless integration of electrical/optical stimulation and fluidic interfaces. Custom MEA fabrication leverages maskless photolithography, favoring the rapid prototyping of a variety of configurations, spatial topologies, and constitutive materials. Through a dedicated analysis and management software suite, the utility and robustness of this system are demonstrated across neural cultures and applications, including embryonic stem cell-derived and primary neurons, organotypic brain slices, 3D engineered tissue mimics, concurrent calcium imaging, and long-term recording. Overall, this technology, termed "mind in vitro" to underscore the computing inspiration, provides an end-to-end solution that can be widely deployed due to its affordable (>10× cost reduction) and open-source nature, catering to the expanding needs of both conventional and unconventional electrophysiology.}, } @article {pmid38155856, year = {2023}, author = {Lai, H and Chen, B and Yin, X and Wang, G and Wang, X and Yun, T and Lan, G and Wu, Z and Yang, C and Kou, W}, title = {Dry season temperature and rainy season precipitation significantly affect the spatio-temporal pattern of rubber plantation phenology in Yunnan province.}, journal = {Frontiers in plant science}, volume = {14}, number = {}, pages = {1283315}, pmid = {38155856}, issn = {1664-462X}, abstract = {The ongoing global warming trajectory poses extensive challenges to plant ecosystems, with rubber plantations particularly vulnerable due to their influence on not only the longevity of the growth cycle and rubber yield, but also the complex interplay of carbon, water, and energy exchanges between the forest canopy and atmosphere. However, the response mechanism of phenology in rubber plantations to climate change remains unclear. This study concentrates on sub-optimal environment rubber plantations in Yunnan province, Southwest China. Utilizing the Google Earth Engine (GEE) cloud platform, multi-source remote sensing images were synthesized at 8-day intervals with a spatial resolution of 30-meters. The Normalized Difference Vegetation Index (NDVI) time series was reconstructed using the Savitzky-Golay (S-G) filter, coupled with the application of the seasonal amplitude method to extract three crucial phenological indicators, namely the start of the growing season (SOS), the end of the growing season (EOS), and the length of the growing season (LOS). Linear regression method, Pearson correlation coefficient, multiple stepwise regression analysis were used to extract of the phenology trend and find the relationship between SOS, EOS and climate factors. The findings demonstrated that 1) the phenology of rubber plantations has undergone dynamic changes over the past two decades. Specifically, the SOS advanced by 9.4 days per decade (R[2] = 0.42, p< 0.01), whereas the EOS was delayed by 3.8 days per decade (R[2] = 0.35, p< 0.01). Additionally, the LOS was extended by 13.2 days per decade (R[2] = 0.55, p< 0.01); 2) rubber phenology demonstrated a notable sensitivity to temperature fluctuations during the dry season and precipitation patterns during the rainy season. The SOS advanced 2.0 days (r =-0.19, p< 0.01) and the EOS advanced 2.8 days (r =-0.35, p< 0.01) for every 1°C increase in the cool-dry season. Whereas a 100 mm increase in rainy season precipitation caused the SOS to be delayed by 2.0 days (r = 0.24, p< 0.01), a 100 mm increase in hot-dry season precipitation caused the EOS to be advanced by 7.0 days (r =-0.28, p< 0.01); 3) rubber phenology displayed a legacy effect of preseason climate variations. Changes in temperature during the fourth preseason month and precipitation during the fourth and eleventh preseason months are predominantly responsible for the variation in SOS. Meanwhile, temperature changes during the second, fourth, and ninth preseason months are primarily responsible for the variation in EOS. The study aims to enhance our understanding of how rubber plantations respond to climate change in sub-optimal environments and provide valuable insights for sustainable rubber production management in the face of changing environmental conditions.}, } @article {pmid38151930, year = {2023}, author = {Wang, X and Li, Q and Ma, C and Zhang, S and Lin, Y and Li, J and Liu, C}, title = {[Artificial intelligence in wearable electrocardiogram monitoring].}, journal = {Sheng wu yi xue gong cheng xue za zhi = Journal of biomedical engineering = Shengwu yixue gongchengxue zazhi}, volume = {40}, number = {6}, pages = {1084-1092}, pmid = {38151930}, issn = {1001-5515}, mesh = {Humans ; Artificial Intelligence ; Reproducibility of Results ; Electrocardiography ; *Cardiovascular Diseases ; *Wearable Electronic Devices ; }, abstract = {Electrocardiogram (ECG) monitoring owns important clinical value in diagnosis, prevention and rehabilitation of cardiovascular disease (CVD). With the rapid development of Internet of Things (IoT), big data, cloud computing, artificial intelligence (AI) and other advanced technologies, wearable ECG is playing an increasingly important role. With the aging process of the population, it is more and more urgent to upgrade the diagnostic mode of CVD. Using AI technology to assist the clinical analysis of long-term ECGs, and thus to improve the ability of early detection and prediction of CVD has become an important direction. Intelligent wearable ECG monitoring needs the collaboration between edge and cloud computing. Meanwhile, the clarity of medical scene is conducive for the precise implementation of wearable ECG monitoring. This paper first summarized the progress of AI-related ECG studies and the current technical orientation. Then three cases were depicted to illustrate how the AI in wearable ECG cooperate with the clinic. Finally, we demonstrated the two core issues-the reliability and worth of AI-related ECG technology and prospected the future opportunities and challenges.}, } @article {pmid38146308, year = {2024}, author = {Singh, S and Hou, F and Wang, R}, title = {Real and synthetic Punjabi speech datasets for automatic speech recognition.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109865}, doi = {10.1016/j.dib.2023.109865}, pmid = {38146308}, issn = {2352-3409}, abstract = {Automatic speech recognition (ASR) has been an active area of research. Training with large annotated datasets is the key to the development of robust ASR systems. However, most available datasets are focused on high-resource languages like English, leaving a significant gap for low-resource languages. Among these languages is Punjabi, despite its large number of speakers, Punjabi lacks high-quality annotated datasets for accurate speech recognition. To address this gap, we introduce three labeled Punjabi speech datasets: Punjabi Speech (real speech dataset) and Google-synth/CMU-synth (synthesized speech datasets). The Punjabi Speech dataset consists of read speech recordings captured in various environments, including both studio and open settings. In addition, the Google-synth dataset is synthesized using Google's Punjabi text-to-speech cloud services. Furthermore, the CMU-synth dataset is created using the Clustergen model available in the Festival speech synthesis system developed by CMU. These datasets aim to facilitate the development of accurate Punjabi speech recognition systems, bridging the resource gap for this important language.}, } @article {pmid38140780, year = {2023}, author = {Li, B and Du, K and Qu, G and Tang, N}, title = {Big data research in nursing: A bibliometric exploration of themes and publications.}, journal = {Journal of nursing scholarship : an official publication of Sigma Theta Tau International Honor Society of Nursing}, volume = {}, number = {}, pages = {}, doi = {10.1111/jnu.12954}, pmid = {38140780}, issn = {1547-5069}, support = {22A320067//the Key Research Project in Higher Education in Henan, China/ ; SBGJ202103076//Medical science and technology public relations project jointly built by Henan Health Commission/ ; HLKY2023002//Nursing research Special Fund of the First Affiliated Hospital of Zhengzhou University/ ; }, abstract = {AIMS: To comprehend the current research hotspots and emerging trends in big data research within the global nursing domain.

DESIGN: Bibliometric analysis.

METHODS: The quality articles for analysis indexed by the science core collection were obtained from the Web of Science database as of February 10, 2023.The descriptive, visual analysis and text mining were realized by CiteSpace and VOSviewer.

RESULTS: The research on big data in the nursing field has experienced steady growth over the past decade. A total of 45 core authors and 17 core journals around the world have contributed to this field. The author's keyword analysis has revealed five distinct clusters of research focus. These encompass machine/deep learning and artificial intelligence, natural language processing, big data analytics and data science, IoT and cloud computing, and the development of prediction models through data mining. Furthermore, a comparative examination was conducted with data spanning from 1980 to 2016, and an extended analysis was performed covering the years from 1980 to 2019. This bibliometric mapping comparison allowed for the identification of prevailing research trends and the pinpointing of potential future research hotspots within the field.

CONCLUSIONS: The fusion of data mining and nursing research has steadily advanced and become more refined over time. Technologically, it has expanded from initial natural language processing to encompass machine learning, deep learning, artificial intelligence, and data mining approach that amalgamates multiple technologies. Professionally, it has progressed from addressing patient safety and pressure ulcers to encompassing chronic diseases, critical care, emergency response, community and nursing home settings, and specific diseases (Cardiovascular diseases, diabetes, stroke, etc.). The convergence of IoT, cloud computing, fog computing, and big data processing has opened new avenues for research in geriatric nursing management and community care. However, a global imbalance exists in utilizing big data in nursing research, emphasizing the need to enhance data science literacy among clinical staff worldwide to advance this field.

CLINICAL RELEVANCE: This study focused on the thematic trends and evolution of research on the big data in nursing research. Moreover, this study may contribute to the understanding of researchers, journals, and countries around the world and generate the possible collaborations of them to promote the development of big data in nursing science.}, } @article {pmid38139731, year = {2023}, author = {Yang, X and Fang, H and Gao, Y and Wang, X and Wang, K and Liu, Z}, title = {Computation Offloading and Resource Allocation Based on P-DQN in LEO Satellite Edge Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139731}, issn = {1424-8220}, support = {2020YFB1808003//National Key Research and Development Program of China/ ; 61801379//National Natural Science Foundation of China/ ; 2020JQ-647//Natural Science Foundation of Shaanxi Province of China/ ; }, abstract = {Traditional low earth orbit (LEO) satellite networks are typically independent of terrestrial networks, which develop relatively slowly due to the on-board capacity limitation. By integrating emerging mobile edge computing (MEC) with LEO satellite networks to form the business-oriented "end-edge-cloud" multi-level computing architecture, some computing-sensitive tasks can be offloaded by ground terminals to satellites, thereby satisfying more tasks in the network. How to make computation offloading and resource allocation decisions in LEO satellite edge networks, nevertheless, indeed poses challenges in tracking network dynamics and handling sophisticated actions. For the discrete-continuous hybrid action space and time-varying networks, this work aims to use the parameterized deep Q-network (P-DQN) for the joint computation offloading and resource allocation. First, the characteristics of time-varying channels are modeled, and then both communication and computation models under three different offloading decisions are constructed. Second, the constraints on task offloading decisions, on remaining available computing resources, and on the power control of LEO satellites as well as the cloud server are formulated, followed by the maximization problem of satisfied task number over the long run. Third, using the parameterized action Markov decision process (PAMDP) and P-DQN, the joint computing offloading, resource allocation, and power control are made in real time, to accommodate dynamics in LEO satellite edge networks and dispose of the discrete-continuous hybrid action space. Simulation results show that the proposed P-DQN method could approach the optimal control, and outperforms other reinforcement learning (RL) methods for merely either discrete or continuous action space, in terms of the long-term rate of satisfied tasks.}, } @article {pmid38139716, year = {2023}, author = {Aldaej, A and Ahanger, TA and Ullah, I}, title = {Deep Learning-Inspired IoT-IDS Mechanism for Edge Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139716}, issn = {1424-8220}, support = {2022/01/21723//Prince Sattam Bin Abdulaziz University/ ; }, abstract = {The Internet of Things (IoT) technology has seen substantial research in Deep Learning (DL) techniques to detect cyberattacks. Critical Infrastructures (CIs) must be able to quickly detect cyberattacks close to edge devices in order to prevent service interruptions. DL approaches outperform shallow machine learning techniques in attack detection, giving them a viable alternative for use in intrusion detection. However, because of the massive amount of IoT data and the computational requirements for DL models, transmission overheads prevent the successful implementation of DL models closer to the devices. As they were not trained on pertinent IoT, current Intrusion Detection Systems (IDS) either use conventional techniques or are not intended for scattered edge-cloud deployment. A new edge-cloud-based IoT IDS is suggested to address these issues. It uses distributed processing to separate the dataset into subsets appropriate to different attack classes and performs attribute selection on time-series IoT data. Next, DL is used to train an attack detection Recurrent Neural Network, which consists of a Recurrent Neural Network (RNN) and Bidirectional Long Short-Term Memory (LSTM). The high-dimensional BoT-IoT dataset, which replicates massive amounts of genuine IoT attack traffic, is used to test the proposed model. Despite an 85 percent reduction in dataset size made achievable by attribute selection approaches, the attack detection capability was kept intact. The models built utilizing the smaller dataset demonstrated a higher recall rate (98.25%), F1-measure (99.12%), accuracy (99.56%), and precision (99.45%) with no loss in class discrimination performance compared to models trained on the entire attribute set. With the smaller attribute space, neither the RNN nor the Bi-LSTM models experienced underfitting or overfitting. The proposed DL-based IoT intrusion detection solution has the capability to scale efficiently in the face of large volumes of IoT data, thus making it an ideal candidate for edge-cloud deployment.}, } @article {pmid38139704, year = {2023}, author = {Peixoto, J and Sousa, J and Carvalho, R and Santos, G and Cardoso, R and Reis, A}, title = {End-to-End Solution for Analog Gauge Monitoring Using Computer Vision in an IoT Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139704}, issn = {1424-8220}, support = {POCI-01-0247-FEDER-047091-GRS: Glartek Retrofit Sensors//Fundo Europeu de Desenvolvimento Regional (FEDER)/ ; }, abstract = {The emergence of Industry 4.0 and 5.0 technologies has enabled the digital transformation of various processes and the integration of sensors with the internet. Despite these strides, many industrial sectors still rely on visual inspection of physical processes, especially those employing analog gauges. This method of monitoring introduces the risk of human errors and inefficiencies. Automating these processes has the potential, not only to boost productivity for companies, but also potentially reduce risks for workers. Therefore, this paper proposes an end-to-end solution to digitize analog gauges and monitor them using computer vision through integrating them into an IoT architecture, to tackle these problems. Our prototype device has been designed to capture images of gauges and transmit them to a remote server, where computer vision algorithms analyze the images and obtain gauge readings. These algorithms achieved adequate robustness and accuracy for industrial environments, with an average relative error of 0.95%. In addition, the gauge data were seamlessly integrated into an IoT platform leveraging computer vision and cloud computing technologies. This integration empowers users to create custom dashboards for real-time gauge monitoring, while also enabling them to set thresholds, alarms, and warnings, as needed. The proposed solution was tested and validated in a real-world industrial scenario, demonstrating the solution's potential to be implemented in a large-scale setting to serve workers, reduce costs, and increase productivity.}, } @article {pmid38139612, year = {2023}, author = {Ju, S and Park, Y}, title = {Provably Secure Lightweight Mutual Authentication and Key Agreement Scheme for Cloud-Based IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139612}, issn = {1424-8220}, support = {2022//Keimyung University/ ; }, abstract = {A paradigm that combines cloud computing and the Internet of Things (IoT) allows for more impressive services to be provided to users while addressing storage and computational resource issues in the IoT environments. This cloud-based IoT environment has been used in various industries, including public services, for quite some time, and has been researched in academia. However, various security issues can arise during the communication between IoT devices and cloud servers, because communication between devices occurs in open channels. Moreover, issues such as theft of a user's IoT device or extraction of key parameters from the user's device in a remote location can arise. Researchers interested in these issues have proposed lightweight mutual authentication key agreement protocols that are safe and suitable for IoT environments. Recently, a lightweight authentication scheme between IoT devices and cloud servers has been presented. However, we found out their scheme had various security vulnerabilities, vulnerable to insider, impersonation, verification table leakage, and privileged insider attacks, and did not provide users with untraceability. To address these flaws, we propose a provably secure lightweight authentication scheme. The proposed scheme uses the user's biometric information and the cloud server's secret key to prevent the exposure of key parameters. Additionally, it ensures low computational costs for providing users with real-time and fast services using only exclusive OR operations and hash functions in the IoT environments. To analyze the safety of the proposed scheme, we use informal security analysis, Burrows-Abadi-Needham (BAN) logic and a Real-or-Random (RoR) model. The analysis results confirm that our scheme is secure against insider attacks, impersonation attacks, stolen verifier attacks, and so on; furthermore, it provides additional security elements. Simultaneously, it has been verified to possess enhanced communication costs, and total bit size has been shortened to 3776 bits, which is improved by almost 6% compared to Wu et al.'s scheme. Therefore, we demonstrate that the proposed scheme is suitable for cloud-based IoT environments.}, } @article {pmid38139476, year = {2023}, author = {Zhang, T and Fan, Y}, title = {A 3D U-Net Based on a Vision Transformer for Radar Semantic Segmentation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {24}, pages = {}, pmid = {38139476}, issn = {1424-8220}, support = {61976033//National Natural Science Foundation of China/ ; 2022JH24/10200029//Pilot Base Construction and Pilot Verification Plan Program of Liaoning Province of China/ ; 2019JH8/10100100//Key Development Guidance Program of Liaoning Province of China/ ; 2022M710569//China Postdoctoral Science Foundation/ ; }, abstract = {Radar data can be presented in various forms, unlike visible data. In the field of radar target recognition, most current work involves point cloud data due to computing limitations, but this form of data lacks useful information. This paper proposes a semantic segmentation network to process high-dimensional data and enable automatic radar target recognition. Rather than relying on point cloud data, which is common in current radar automatic target recognition algorithms, the paper suggests using a radar heat map of high-dimensional data to increase the efficiency of radar data use. The radar heat map provides more complete information than point cloud data, leading to more accurate classification results. Additionally, this paper proposes a dimension collapse module based on a vision transformer for feature extraction between two modules with dimension differences during dimension changes in high-dimensional data. This module is easily extendable to other networks with high-dimensional data collapse requirements. The network's performance is verified using a real radar dataset, showing that the radar semantic segmentation network based on a vision transformer has better performance and fewer parameters compared to segmentation networks that use other dimensional collapse methods.}, } @article {pmid38136978, year = {2023}, author = {Song, Y and Zhong, S and Li, Y and Jiang, M and Wei, Q}, title = {Constructing an Interactive and Integrated Analysis and Identification Platform for Pathogenic Microorganisms to Support Surveillance Capacity.}, journal = {Genes}, volume = {14}, number = {12}, pages = {}, pmid = {38136978}, issn = {2073-4425}, support = {2022YFC2602200//Supported by National Key Research and Development Program of China/ ; }, mesh = {*Software ; *User-Computer Interface ; Genomics/methods ; Computational Biology/methods ; Genome ; }, abstract = {INTRODUCTION: Whole genome sequencing (WGS) holds significant promise for epidemiological inquiries, as it enables the identification and tracking of pathogenic origins and dissemination through comprehensive genome analysis. This method is widely preferred for investigating outbreaks and monitoring pathogen activity. However, the effective utilization of microbiome sequencing data remains a challenge for clinical and public health experts. Through the National Pathogen Resource Center, we have constructed a dynamic and interactive online analysis platform to facilitate the in-depth analysis and use of pathogen genomic data, by public health and associated professionals, to support infectious disease surveillance framework building and capacity warnings.

METHOD: The platform was implemented using the Java programming language, and the front-end pages were developed using the VUE framework, following the MVC (Model-View-Controller) pattern to enable interactive service functionalities for front-end data collection and back-end data computation. Cloud computing services were employed to integrate biological information analysis tools for conducting fundamental analysis on sequencing data.

RESULT: The platform achieved the goal of non-programming analysis, providing an interactive visual interface that allows users to visually obtain results by setting parameters in web pages. Moreover, the platform allows users to export results in various formats to further support their research.

DISCUSSION: We have established a dynamic and interactive online platform for bioinformatics analysis. By encapsulating the complex background experiments and analysis processes in a cloud-based service platform, the complex background experiments and analysis processes are presented to the end-user in a simple and interactive manner. It facilitates real-time data mining and analysis by allowing users to independently select parameters and generate analysis results at the click of a button, based on their needs, without the need for a programming foundation.}, } @article {pmid38136521, year = {2023}, author = {Xia, C and Jin, X and Xu, C and Zeng, P}, title = {Computational-Intelligence-Based Scheduling with Edge Computing in Cyber-Physical Production Systems.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {12}, pages = {}, pmid = {38136521}, issn = {1099-4300}, support = {61903356//National Natural Science Foundation of China/ ; }, abstract = {Real-time performance and reliability are two critical indicators in cyber-physical production systems (CPPS). To meet strict requirements in terms of these indicators, it is necessary to solve complex job-shop scheduling problems (JSPs) and reserve considerable redundant resources for unexpected jobs before production. However, traditional job-shop methods are difficult to apply under dynamic conditions due to the uncertain time cost of transmission and computation. Edge computing offers an efficient solution to this issue. By deploying edge servers around the equipment, smart factories can achieve localized decisions based on computational intelligence (CI) methods offloaded from the cloud. Most works on edge computing have studied task offloading and dispatching scheduling based on CI. However, few of the existing methods can be used for behavior-level control due to the corresponding requirements for ultralow latency (10 ms) and ultrahigh reliability (99.9999% in wireless transmission), especially when unexpected computing jobs arise. Therefore, this paper proposes a dynamic resource prediction scheduling (DRPS) method based on CI to achieve real-time localized behavior-level control. The proposed DRPS method primarily focuses on the schedulability of unexpected computing jobs, and its core ideas are (1) to predict job arrival times based on a backpropagation neural network and (2) to perform real-time migration in the form of human-computer interaction based on the results of resource analysis. An experimental comparison with existing schemes shows that our DRPS method improves the acceptance ratio by 25.9% compared to the earliest deadline first scheme.}, } @article {pmid38136475, year = {2023}, author = {Kang, H and Liu, G and Wang, Q and Meng, L and Liu, J}, title = {Theory and Application of Zero Trust Security: A Brief Survey.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {12}, pages = {}, pmid = {38136475}, issn = {1099-4300}, abstract = {As cross-border access becomes more frequent, traditional perimeter-based network security models can no longer cope with evolving security requirements. Zero trust is a novel paradigm for cybersecurity based on the core concept of "never trust, always verify". It attempts to protect against security risks related to internal threats by eliminating the demarcations between the internal and external network of traditional network perimeters. Nevertheless, research on the theory and application of zero trust is still in its infancy, and more extensive research is necessary to facilitate a deeper understanding of the paradigm in academia and the industry. In this paper, trust in cybersecurity is discussed, following which the origin, concepts, and principles related to zero trust are elaborated on. The characteristics, strengths, and weaknesses of the existing research are analysed in the context of zero trust achievements and their technical applications in Cloud and IoT environments. Finally, to support the development and application of zero trust in the future, the concept and its current challenges are analysed.}, } @article {pmid38134209, year = {2024}, author = {Wang, J and Hu, Y and Xiang, L and Morota, G and Brooks, SA and Wickens, CL and Miller-Cushon, EK and Yu, H}, title = {Technical note: ShinyAnimalCV: open-source cloud-based web application for object detection, segmentation, and three-dimensional visualization of animals using computer vision.}, journal = {Journal of animal science}, volume = {102}, number = {}, pages = {}, pmid = {38134209}, issn = {1525-3163}, mesh = {Animals ; *Cloud Computing ; *Imaging, Three-Dimensional/veterinary ; Software ; Computers ; Animal Husbandry ; Livestock ; }, abstract = {Computer vision (CV), a non-intrusive and cost-effective technology, has furthered the development of precision livestock farming by enabling optimized decision-making through timely and individualized animal care. The availability of affordable two- and three-dimensional camera sensors, combined with various machine learning and deep learning algorithms, has provided a valuable opportunity to improve livestock production systems. However, despite the availability of various CV tools in the public domain, applying these tools to animal data can be challenging, often requiring users to have programming and data analysis skills, as well as access to computing resources. Moreover, the rapid expansion of precision livestock farming is creating a growing need to educate and train animal science students in CV. This presents educators with the challenge of efficiently demonstrating the complex algorithms involved in CV. Thus, the objective of this study was to develop ShinyAnimalCV, an open-source cloud-based web application designed to facilitate CV teaching in animal science. This application provides a user-friendly interface for performing CV tasks, including object segmentation, detection, three-dimensional surface visualization, and extraction of two- and three-dimensional morphological features. Nine pre-trained CV models using top-view animal data are included in the application. ShinyAnimalCV has been deployed online using cloud computing platforms. The source code of ShinyAnimalCV is available on GitHub, along with detailed documentation on training CV models using custom data and deploying ShinyAnimalCV locally to allow users to fully leverage the capabilities of the application. ShinyAnimalCV can help to support the teaching of CV, thereby laying the groundwork to promote the adoption of CV in the animal science community.}, } @article {pmid38133241, year = {2023}, author = {Afonso, CL and Afonso, AM}, title = {Next-Generation Sequencing for the Detection of Microbial Agents in Avian Clinical Samples.}, journal = {Veterinary sciences}, volume = {10}, number = {12}, pages = {}, pmid = {38133241}, issn = {2306-7381}, abstract = {Direct-targeted next-generation sequencing (tNGS), with its undoubtedly superior diagnostic capacity over real-time PCR (RT-PCR), and direct-non-targeted NGS (ntNGS), with its higher capacity to identify and characterize multiple agents, are both likely to become diagnostic methods of choice in the future. tNGS is a rapid and sensitive method for precise characterization of suspected agents. ntNGS, also known as agnostic diagnosis, does not require a hypothesis and has been used to identify unsuspected infections in clinical samples. Implemented in the form of multiplexed total DNA metagenomics or as total RNA sequencing, the approach produces comprehensive and actionable reports that allow semi-quantitative identification of most of the agents present in respiratory, cloacal, and tissue samples. The diagnostic benefits of the use of direct tNGS and ntNGS are high specificity, compatibility with different types of clinical samples (fresh, frozen, FTA cards, and paraffin-embedded), production of nearly complete infection profiles (viruses, bacteria, fungus, and parasites), production of "semi-quantitative" information, direct agent genotyping, and infectious agent mutational information. The achievements of NGS in terms of diagnosing poultry problems are described here, along with future applications. Multiplexing, development of standard operating procedures, robotics, sequencing kits, automated bioinformatics, cloud computing, and artificial intelligence (AI) are disciplines converging toward the use of this technology for active surveillance in poultry farms. Other advances in human and veterinary NGS sequencing are likely to be adaptable to avian species in the future.}, } @article {pmid38126383, year = {2023}, author = {Fonseca, ELD and Santos, ECD and Figueiredo, AR and Simões, JC}, title = {The use of sentinel-2 imagery to generate vegetations maps for the Northern Antarctic peninsula and offshore islands.}, journal = {Anais da Academia Brasileira de Ciencias}, volume = {95}, number = {suppl 3}, pages = {e20230710}, doi = {10.1590/0001-3765202320230710}, pmid = {38126383}, issn = {1678-2690}, mesh = {Antarctic Regions ; *Plants ; *Bryophyta ; }, abstract = {We used Sentinel-2 imagery time series to generate a vegetation map for the Northern part of the Antarctica Peninsula and offshore islands, including the South Shetlands. The vegetation cover was identified in the NDVI maximum value composite image. The NDVI values were associated with the occurrence of algae (0.15 - 0.20), lichens (0.20 - 0.50), and mosses (0.50 - 0.80). The vegetation cover distribution map was validated using the literature information. Generating a vegetation map distribution on an annual basis was not possible due to high cloud cover in the Antarctic region, especially in coastal áreas, so optical images from 2016 to 2021 were necessary to map the vegetation distribution in the entire study área. The final map analyzed in association with the weather data shows the occurrence of a microenvironment over the western islands of the Antarctic Peninsula that provided vegetation growth conditions. The Sentinel-2 images with 10m spatial resolution allow the assembly of accurate vegetation distribution maps for the Antarctica Peninsula and Islands, the Google Earth Engine cloud computing being essential to process a large amount of the satellite images necessary for processing these maps.}, } @article {pmid38124874, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Blockchain-Based Trust Management Framework for Cloud Computing-Based Internet of Medical Things (IoMT): A Systematic Review.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9867976}, pmid = {38124874}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/9766844.].}, } @article {pmid38124577, year = {2023}, author = {Niu, S and Liu, W and Yan, S and Liu, Q}, title = {Message sharing scheme based on edge computing in IoV.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {12}, pages = {20809-20827}, doi = {10.3934/mbe.2023921}, pmid = {38124577}, issn = {1551-0018}, abstract = {With the rapid development of 5G wireless communication and sensing technology, the Internet of Vehicles (IoV) will establish a widespread network between vehicles and roadside infrastructure. The collected road information is transferred to the cloud server with the assistance of roadside infrastructure, where it is stored and made available to other vehicles as a resource. However, in an open cloud environment, message confidentiality and vehicle identity privacy are severely compromised, and current attribute-based encryption algorithms still burden vehicles with large computational costs. In order to resolve these issues, we propose a message-sharing scheme in IoV based on edge computing. To start, we utilize attribute-based encryption techniques to protect the communications being delivered. We introduce edge computing, in which the vehicle outsources some operations in encryption and decryption to roadside units to reduce the vehicle's computational load. Second, to guarantee the integrity of the message and the security of the vehicle identity, we utilize anonymous identity-based signature technology. At the same time, we can batch verify the message, which further reduces the time and transmission of verifying a large number of message signatures. Based on the computational Diffie-Hellman problem, it is demonstrated that the proposed scheme is secure under the random oracle model. Finally, the performance analysis results show that our work is more computationally efficient compared to existing schemes and is more suitable for actual vehicle networking.}, } @article {pmid38114166, year = {2023}, author = {Ma, XR and Wang, BX and Zhao, WS and Cong, DG and Sun, W and Xiong, HS and Zhang, SN}, title = {[Application progress on data-driven technologies in intelligent manufacturing of traditional Chinese medicine extraction].}, journal = {Zhongguo Zhong yao za zhi = Zhongguo zhongyao zazhi = China journal of Chinese materia medica}, volume = {48}, number = {21}, pages = {5701-5706}, doi = {10.19540/j.cnki.cjcmm.20230824.601}, pmid = {38114166}, issn = {1001-5302}, mesh = {*Medicine, Chinese Traditional ; *Drugs, Chinese Herbal ; Quality Control ; Big Data ; Algorithms ; }, abstract = {The application of new-generation information technologies such as big data, the internet of things(IoT), and cloud computing in the traditional Chinese medicine(TCM)manufacturing industry is gradually deepening, driving the intelligent transformation and upgrading of the TCM industry. At the current stage, there are challenges in understanding the extraction process and its mechanisms in TCM. Online detection technology faces difficulties in making breakthroughs, and data throughout the entire production process is scattered, lacking valuable mining and utilization, which significantly hinders the intelligent upgrading of the TCM industry. Applying data-driven technologies in the process of TCM extraction can enhance the understanding of the extraction process, achieve precise control, and effectively improve the quality of TCM products. This article analyzed the technological bottlenecks in the production process of TCM extraction, summarized commonly used data-driven algorithms in the research and production control of extraction processes, and reviewed the progress in the application of data-driven technologies in the following five aspects: mechanism analysis of the extraction process, process development and optimization, online detection, process control, and production management. This article is expected to provide references for optimizing the extraction process and intelligent production of TCM.}, } @article {pmid38113434, year = {2024}, author = {Brown, C and Agarwal, A and Luque, A}, title = {pyCapsid: identifying dominant dynamics and quasi-rigid mechanical units in protein shells.}, journal = {Bioinformatics (Oxford, England)}, volume = {40}, number = {1}, pages = {}, pmid = {38113434}, issn = {1367-4811}, support = {1951678//National Science Foundation/ ; GBMF9871//Gordon and Betty Moore Foundation/ ; }, mesh = {*Software ; *Proteins ; Amino Acids ; Documentation ; }, abstract = {SUMMARY: pyCapsid is a Python package developed to facilitate the characterization of the dynamics and quasi-rigid mechanical units of protein shells and other protein complexes. The package was developed in response to the rapid increase of high-resolution structures, particularly capsids of viruses, requiring multiscale biophysical analyses. Given a protein shell, pyCapsid generates the collective vibrations of its amino-acid residues, identifies quasi-rigid mechanical regions associated with the disassembly of the structure, and maps the results back to the input proteins for interpretation. pyCapsid summarizes the main results in a report that includes publication-quality figures.

pyCapsid's source code is available under MIT License on GitHub. It is compatible with Python 3.8-3.10 and has been deployed in two leading Python package-management systems, PIP and Conda. Installation instructions and tutorials are available in the online documentation and in the pyCapsid's YouTube playlist. In addition, a cloud-based implementation of pyCapsid is available as a Google Colab notebook. pyCapsid Colab does not require installation and generates the same report and outputs as the installable version. Users can post issues regarding pyCapsid in the repository's issues section.}, } @article {pmid38113067, year = {2023}, author = {Faisal, S and Samoth, D and Aslam, Y and Patel, H and Park, S and Baby, B and Patel, T}, title = {Key Features of Smart Medication Adherence Products: Updated Scoping Review.}, journal = {JMIR aging}, volume = {6}, number = {}, pages = {e50990}, pmid = {38113067}, issn = {2561-7605}, abstract = {BACKGROUND: Older adults often face challenges in self-managing their medication owing to physical and cognitive limitations, complex medication regimens, and packaging of medications. Emerging smart medication dispensing and adherence products (SMAPs) offer the options of automated dispensing, tracking medication intake in real time, and reminders and notifications. A 2021 review identified 51 SMAPs owing to the rapid influx of digital technology; an update to this review is required.

OBJECTIVE: This review aims to identify new products and summarize and compare the key features of SMAPs.

METHODS: Gray and published literature and videos were searched using Google, YouTube, PubMed, Embase, and Scopus. The first 10 pages of Google and the first 100 results of YouTube were screened using 4 and 5 keyword searches, respectively. SMAPs were included if they were able to store and allowed for the dispensation of medications, tracked real-time medication intake data, and could automatically analyze data. Products were excluded if they were stand-alone software applications, not marketed in English, not for in-home use, or only used in clinical trials. In total, 5 researchers independently screened and extracted the data.

RESULTS: This review identified 114 SMAPs, including 80 (70.2%) marketed and 34 (29.8%) prototypes, grouped into 15 types. Among the marketed products, 68% (54/80) were available for consumer purchase. Of these products, 26% (14/54) were available worldwide and 78% (42/54) were available in North America. There was variability in the hardware, software, data collection and management features, and cost of the products. Examples of hardware features include battery life, medication storage capacity, availability of types and number of alarms, locking features, and additional technology required for use of the product, whereas software features included reminder and notification capabilities and availability of manufacturer support. Data capture methods included the availability of sensors to record the use of the product and data-syncing capabilities with cloud storage with short-range communications. Data were accessible to users via mobile apps or web-based portals. Some SMAPs provided data security assurance with secure log-ins (use of personal identification numbers or facial recognition), whereas other SMAPs provided data through registered email addresses. Although some SMAPs were available at set prices or free of cost to end users, the cost of other products varied based on availability, shipping fees, and subscription fees.

CONCLUSIONS: An expanding market for SMAPs with features specific to at-home patient use is emerging. Health care professionals can use these features to select and suggest products that meet their patients' unique requirements.}, } @article {pmid38107765, year = {2023}, author = {Alam, AKMM and Chen, K}, title = {TEE-Graph: efficient privacy and ownership protection for cloud-based graph spectral analysis.}, journal = {Frontiers in big data}, volume = {6}, number = {}, pages = {1296469}, pmid = {38107765}, issn = {2624-909X}, abstract = {INTRODUCTION: Big graphs like social network user interactions and customer rating matrices require significant computing resources to maintain. Data owners are now using public cloud resources for storage and computing elasticity. However, existing solutions do not fully address the privacy and ownership protection needs of the key involved parties: data contributors and the data owner who collects data from contributors.

METHODS: We propose a Trusted Execution Environment (TEE) based solution: TEE-Graph for graph spectral analysis of outsourced graphs in the cloud. TEEs are new CPU features that can enable much more efficient confidential computing solutions than traditional software-based cryptographic ones. Our approach has several unique contributions compared to existing confidential graph analysis approaches. (1) It utilizes the unique TEE properties to ensure contributors' new privacy needs, e.g., the right of revocation for shared data. (2) It implements efficient access-pattern protection with a differentially private data encoding method. And (3) it implements TEE-based special analysis algorithms: the Lanczos method and the Nystrom method for efficiently handling big graphs and protecting confidentiality from compromised cloud providers.

RESULTS: The TEE-Graph approach is much more efficient than software crypto approaches and also immune to access-pattern-based attacks. Compared with the best-known software crypto approach for graph spectral analysis, PrivateGraph, we have seen that TEE-Graph has 10[3]-10[5] times lower computation, storage, and communication costs. Furthermore, the proposed access-pattern protection method incurs only about 10%-25% of the overall computation cost.

DISCUSSION: Our experimentation showed that TEE-Graph performs significantly better and has lower costs than typical software approaches. It also addresses the unique ownership and access-pattern issues that other TEE-related graph analytics approaches have not sufficiently studied. The proposed approach can be extended to other graph analytics problems with strong ownership and access-pattern protection.}, } @article {pmid38093855, year = {2024}, author = {Ortega Candel, JM and Mora Gimeno, FJ and Mora Mora, H}, title = {Generation of a dataset for DoW attack detection in serverless architectures.}, journal = {Data in brief}, volume = {52}, number = {}, pages = {109921}, pmid = {38093855}, issn = {2352-3409}, abstract = {Denial of Wallet (DoW) attacks refers to a type of cyberattack that aims to exploit and exhaust the financial resources of an organization by triggering excessive costs or charges within their cloud or serverless computing environment. These attacks are particularly relevant in the context of serverless architectures due to characteristics like pay-as-you-go model, auto-scaling, limited control and cost amplification. Serverless computing, often referred to as Function-as-a-Service (FaaS), is a cloud computing model that allows developers to build and run applications without the need to manage traditional server infrastructure. Serverless architectures have gained popularity in cloud computing due to their flexibility and ability to scale automatically based on demand. These architectures are based on executing functions without the need to manage the underlying infrastructure. However, the lack of realistic and representative datasets that simulate function invocations in serverless environments has been a challenge for research and development of solutions in this field. The aim is to create a dataset for simulating function invocations in serverless architectures, that is a valuable practice for ensuring the reliability, efficiency, and security of serverless applications. Furthermore, we propose a methodology for the generation of the dataset, which involves the generation of synthetic data from traffic generated on cloud platforms and the identification of the main characteristics of function invocations. These characteristics include SubmitTime, Invocation Delay, Response Delay, Function Duration, Active Functions at Request, Active Functions at Response. By generating this dataset, we expect to facilitate the detection of Denial of Wallet (DoW) attacks using machine learning techniques and neural networks. In this way, this dataset available in Mendeley data repository could provide other researchers and developers with a dataset to test and evaluate machine learning algorithms or use other techniques based on the detection of attacks and anomalies in serverless environments.}, } @article {pmid38090001, year = {2023}, author = {Quan, G and Yao, Z and Chen, L and Fang, Y and Zhu, W and Si, X and Li, M}, title = {A trusted medical data sharing framework for edge computing leveraging blockchain and outsourced computation.}, journal = {Heliyon}, volume = {9}, number = {12}, pages = {e22542}, pmid = {38090001}, issn = {2405-8440}, abstract = {Traditional cloud-centric approaches to medical data sharing pose risks related to real-time performance, security, and stability. Medical and healthcare data encounter challenges like data silos, privacy breaches, and transmission latency. In response to these challenges, this paper introduces a blockchain-based framework for trustworthy medical data sharing in edge computing environments. Leveraging healthcare consortium edge blockchains, this framework enables fine-grained access control to medical data. Specifically, it addresses the real-time, multi-attribute authorization challenge in CP-ABE through a Distributed Attribute Authorization strategy (DAA) based on blockchain. Furthermore, it tackles the key security issues in CP-ABE through a Distributed Key Generation protocol (DKG) based on blockchain. To address computational resource constraints in CP-ABE, we enhance a Distributed Modular Exponentiation Outsourcing algorithm (DME) and elevate its verifiable probability to "1". Theoretical analysis establishes the IND-CPA security of this framework in the Random Oracle Model. Experimental results demonstrate the effectiveness of our solution for resource-constrained end-user devices in edge computing environments.}, } @article {pmid38082849, year = {2023}, author = {Calo, J and Lo, B}, title = {IoT Federated Blockchain Learning at the Edge.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2023}, number = {}, pages = {1-4}, doi = {10.1109/EMBC40787.2023.10339946}, pmid = {38082849}, issn = {2694-0604}, mesh = {Humans ; *Blockchain ; Hospitals ; Intelligence ; Machine Learning ; *Medicine ; }, abstract = {IoT devices are sorely underutilized in the medical field, especially within machine learning for medicine, yet they offer unrivaled benefits. IoT devices are low cost, energy efficient, small and intelligent devices [1].In this paper, we propose a distributed federated learning framework for IoT devices, more specifically for IoMT (In-ternet of Medical Things), using blockchain to allow for a decentralized scheme improving privacy and efficiency over a centralized system; this allows us to move from the cloud based architectures, that are prevalent, to the edge.The system is designed for three paradigms: 1) Training neural networks on IoT devices to allow for collaborative training of a shared model whilst decoupling the learning from the dataset [2] to ensure privacy [3]. Training is performed in an online manner simultaneously amongst all participants, allowing for training of actual data that may not have been present in a dataset collected in the traditional way and dynamically adapt the system whilst it is being trained. 2) Training of an IoMT system in a fully private manner such as to mitigate the issue with confidentiality of medical data and to build robust, and potentially bespoke [4], models where not much, if any, data exists. 3) Distribution of the actual network training, something federated learning itself does not do, to allow hospitals, for example, to utilize their spare computing resources to train network models.}, } @article {pmid38077560, year = {2023}, author = {Wang, Z}, title = {An English course practice evaluation system based on multi-source mobile information and IoT technology.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1615}, pmid = {38077560}, issn = {2376-5992}, abstract = {With the increased use of online English courses, the quality of the course directly determines its efficacy. Recently, various industries have continuously employed Internet of Things (IoT) technology, which has considerable scene adaptability. To better supervise the specific content of English courses, we discuss how to apply multi-source mobile Internet of Things information technology to the practical evaluation system of English courses to boost the performance of English learning evaluation. Therefore, by analyzing the problems of existing English course evaluation and the characteristics of multi-source mobile Internet of Things information technology, this article designs an English course practical evaluation system based on multi-source data collection, processing, and analysis. The system can collect real-time student voices, behavior, and other data through mobile devices. Then, analyze the data using cloud computing and data mining technology and provide real-time learning progress and feedback. We can demonstrate that the accuracy of the evaluation system can reach 80.23%, which can effectively improve the efficiency of English learning evaluation, provide a new method for English teaching evaluation, and further improve and optimize the English education teaching content to meet the needs of the actual teaching environment.}, } @article {pmid38077558, year = {2023}, author = {Gu, H and Wang, J and Yu, J and Wang, D and Li, B and He, X and Yin, X}, title = {Towards virtual machine scheduling research based on multi-decision AHP method in the cloud computing platform.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1675}, pmid = {38077558}, issn = {2376-5992}, abstract = {Virtual machine scheduling and resource allocation mechanism in the process of dynamic virtual machine consolidation is a promising access to alleviate the cloud data centers of prominent energy consumption and service level agreement violations with improvement in quality of service (QoS). In this article, we propose an efficient algorithm (AESVMP) based on the Analytic Hierarchy Process (AHP) for the virtual machine scheduling in accordance with the measure. Firstly, we take into consideration three key criteria including the host of power consumption, available resource and resource allocation balance ratio, in which the ratio can be calculated by the balance value between overall three-dimensional resource (CPU, RAM, BW) flat surface and resource allocation flat surface (when new migrated virtual machine (VM) consumed the targeted host's resource). Then, virtual machine placement decision is determined by the application of multi-criteria decision making techniques AHP embedded with the above-mentioned three criteria. Extensive experimental results based on the CloudSim emulator using 10 PlanetLab workloads demonstrate that the proposed approach can reduce the cloud data center of number of migration, service level agreement violation (SLAV), aggregate indicators of energy comsumption (ESV) by an average of 51.76%, 67.4%, 67.6% compared with the cutting-edge method LBVMP, which validates the effectiveness.}, } @article {pmid38077531, year = {2023}, author = {Eljack, S and Jemmali, M and Denden, M and Turki, S and Khedr, WM and Algashami, AM and ALsadig, M}, title = {A secure solution based on load-balancing algorithms between regions in the cloud environment.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1513}, pmid = {38077531}, issn = {2376-5992}, abstract = {The problem treated in this article is the storage of sensitive data in the cloud environment and how to choose regions and zones to minimize the number of transfer file events. Handling sensitive data in the global internet network many times can increase risks and minimize security levels. Our work consists of scheduling several files on the different regions based on the security and load balancing parameters in the cloud. Each file is characterized by its size. If data is misplaced from the start it will require a transfer from one region to another and sometimes from one area to another. The objective is to find a schedule that assigns these files to the appropriate region ensuring the load balancing executed in each region to guarantee the minimum number of migrations. This problem is NP-hard. A novel model regarding the regional security and load balancing of files in the cloud environment is proposed in this article. This model is based on the component called "Scheduler" which utilizes the proposed algorithms to solve the problem. This model is a secure solution to guarantee an efficient dispersion of the stored files to avoid the most storage in one region. Consequently, damage to this region does not cause a loss of big data. In addition, a novel method called the "Grouping method" is proposed. Several variants of the application of this method are utilized to propose novel algorithms for solving the studied problem. Initially, seven algorithms are proposed in this article. The experimental results show that there is no dominance between these algorithms. Therefore, three combinations of these seven algorithms generate three other algorithms with better results. Based on the dominance rule, only six algorithms are selected to discuss the performance of the proposed algorithms. Four classes of instances are generated to measure and test the performance of algorithms. In total, 1,360 instances are tested. Three metrics are used to assess the algorithms and make a comparison between them. The experimental results show that the best algorithm is the "Best-value of four algorithms" in 86.5% of cases with an average gap of 0.021 and an average running time of 0.0018 s.}, } @article {pmid38074363, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Rise of Cloud Computing: Data Protection, Privacy, and Open Research Challenges-A Systematic Literature Review (SLR).}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9838129}, pmid = {38074363}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/8303504.].}, } @article {pmid38074307, year = {2023}, author = {Mangana, CM and Barraquer, A and Ferragut-Alegre, Á and Santolaria, G and Olivera, M and Barraquer, R}, title = {Detection of graft failure in post-keratoplasty patients by automated deep learning.}, journal = {Saudi journal of ophthalmology : official journal of the Saudi Ophthalmological Society}, volume = {37}, number = {3}, pages = {207-210}, pmid = {38074307}, issn = {1319-4534}, abstract = {PURPOSE: Detection of graft failure of post-penetrating keratoplasty (PKP) patients from the proprietary dataset using algorithms trained in Automated Deep Learning (AutoML).

METHODS: This was an observational cross-sectional study, for which AutoML algorithms were trained following the success/failure labeling strategy based on clinical notes, on a cohort corresponding to 220 images of post-keratoplasty anterior pole eyes. Once the image quality criteria were analyzed and the dataset was pseudo-anonymized, it was transferred to the Google Cloud Platform, where using the Vertex AI-AutoML API, cloud- and edge-based algorithms were trained, following expert recommendations on dataset splitting (80% training, 10% test, and 10% validation).

RESULTS: The metrics obtained in the cloud-based and edge-based models have been similar, but we chose to analyze the edge model as it is an exportable model, lighter and cheaper to train. The initial results of the model presented an accuracy of 95.83%, with a specificity of 91.67% and a sensitivity of 100%, obtaining an F1SCORE of 95.996% and a precision of 92.30%. Other metrics, such as the area under the curve, confusion matrix, and activation map development, were contemplated.

CONCLUSION: Initial results indicate the possibility of training algorithms in an automated fashion for the detection of graft failure in patients who underwent PKP. These algorithms are very lightweight tools easily integrated into mobile or desktop applications, potentially allowing every corneal transplant patient to have access to the best knowledge to enable the correct and timely diagnosis and treatment of graft failure. Although the results were good, because of the relatively small dataset, it is possible the data have some tendency to overfitting. AutoML opens the possibility of working in the field of artificial intelligence by computer vision to professionals with little experience and knowledge of programming.}, } @article {pmid38072221, year = {2024}, author = {Doo, FX and Kulkarni, P and Siegel, EL and Toland, M and Yi, PH and Carlos, RC and Parekh, VS}, title = {Economic and Environmental Costs of Cloud Technologies for Medical Imaging and Radiology Artificial Intelligence.}, journal = {Journal of the American College of Radiology : JACR}, volume = {21}, number = {2}, pages = {248-256}, doi = {10.1016/j.jacr.2023.11.011}, pmid = {38072221}, issn = {1558-349X}, mesh = {*Artificial Intelligence ; Cloud Computing ; *Radiology ; Costs and Cost Analysis ; Diagnostic Imaging ; }, abstract = {Radiology is on the verge of a technological revolution driven by artificial intelligence (including large language models), which requires robust computing and storage capabilities, often beyond the capacity of current non-cloud-based informatics systems. The cloud presents a potential solution for radiology, and we should weigh its economic and environmental implications. Recently, cloud technologies have become a cost-effective strategy by providing necessary infrastructure while reducing expenditures associated with hardware ownership, maintenance, and upgrades. Simultaneously, given the optimized energy consumption in modern cloud data centers, this transition is expected to reduce the environmental footprint of radiologic operations. The path to cloud integration comes with its own challenges, and radiology informatics leaders must consider elements such as cloud architectural choices, pricing, data security, uptime service agreements, user training and support, and broader interoperability. With the increasing importance of data-driven tools in radiology, understanding and navigating the cloud landscape will be essential for the future of radiology and its various stakeholders.}, } @article {pmid38069903, year = {2024}, author = {Mirchandani, CD and Shultz, AJ and Thomas, GWC and Smith, SJ and Baylis, M and Arnold, B and Corbett-Detig, R and Enbody, E and Sackton, TB}, title = {A Fast, Reproducible, High-throughput Variant Calling Workflow for Population Genomics.}, journal = {Molecular biology and evolution}, volume = {41}, number = {1}, pages = {}, pmid = {38069903}, issn = {1537-1719}, mesh = {Animals ; *Software ; *Metagenomics ; Workflow ; Genomics ; Sequence Analysis, DNA ; High-Throughput Nucleotide Sequencing ; }, abstract = {The increasing availability of genomic resequencing data sets and high-quality reference genomes across the tree of life present exciting opportunities for comparative population genomic studies. However, substantial challenges prevent the simple reuse of data across different studies and species, arising from variability in variant calling pipelines, data quality, and the need for computationally intensive reanalysis. Here, we present snpArcher, a flexible and highly efficient workflow designed for the analysis of genomic resequencing data in nonmodel organisms. snpArcher provides a standardized variant calling pipeline and includes modules for variant quality control, data visualization, variant filtering, and other downstream analyses. Implemented in Snakemake, snpArcher is user-friendly, reproducible, and designed to be compatible with high-performance computing clusters and cloud environments. To demonstrate the flexibility of this pipeline, we applied snpArcher to 26 public resequencing data sets from nonmammalian vertebrates. These variant data sets are hosted publicly to enable future comparative population genomic analyses. With its extensibility and the availability of public data sets, snpArcher will contribute to a broader understanding of genetic variation across species by facilitating the rapid use and reuse of large genomic data sets.}, } @article {pmid38067890, year = {2023}, author = {Kiarashi, Y and Saghafi, S and Das, B and Hegde, C and Madala, VSK and Nakum, A and Singh, R and Tweedy, R and Doiron, M and Rodriguez, AD and Levey, AI and Clifford, GD and Kwon, H}, title = {Graph Trilateration for Indoor Localization in Sparsely Distributed Edge Computing Devices in Complex Environments Using Bluetooth Technology.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067890}, issn = {1424-8220}, support = {cox-emory2019//James M. Cox Foundation and Cox Enterprises, Inc/ ; }, mesh = {Humans ; *Cloud Computing ; Wireless Technology ; Health Status ; Movement ; *Spatial Navigation/physiology ; }, abstract = {Spatial navigation patterns in indoor space usage can reveal important cues about the cognitive health of participants. In this work, we present a low-cost, scalable, open-source edge computing system using Bluetooth low energy (BLE) beacons for tracking indoor movements in a large, 1700 m2 facility used to carry out therapeutic activities for participants with mild cognitive impairment (MCI). The facility is instrumented with 39 edge computing systems, along with an on-premise fog server. The participants carry a BLE beacon, in which BLE signals are received and analyzed by the edge computing systems. Edge computing systems are sparsely distributed in the wide, complex indoor space, challenging the standard trilateration technique for localizing subjects, which assumes a dense installation of BLE beacons. We propose a graph trilateration approach that considers the temporal density of hits from the BLE beacon to surrounding edge devices to handle the inconsistent coverage of edge devices. This proposed method helps us tackle the varying signal strength, which leads to intermittent detection of beacons. The proposed method can pinpoint the positions of multiple participants with an average error of 4.4 m and over 85% accuracy in region-level localization across the entire study area. Our experimental results, evaluated in a clinical environment, suggest that an ordinary medical facility can be transformed into a smart space that enables automatic assessment of individuals' movements, which may reflect health status or response to treatment.}, } @article {pmid38067868, year = {2023}, author = {Garcia-Perez, A and Miñón, R and Torre-Bastida, AI and Zulueta-Guerrero, E}, title = {Analysing Edge Computing Devices for the Deployment of Embedded AI.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067868}, issn = {1424-8220}, support = {SONETO project, ref. KK-2023/00038//Basque Government Elkartek program/ ; }, abstract = {In recent years, more and more devices are connected to the network, generating an overwhelming amount of data. This term that is booming today is known as the Internet of Things. In order to deal with these data close to the source, the term Edge Computing arises. The main objective is to address the limitations of cloud processing and satisfy the growing demand for applications and services that require low latency, greater efficiency and real-time response capabilities. Furthermore, it is essential to underscore the intrinsic connection between artificial intelligence and edge computing within the context of our study. This integral relationship not only addresses the challenges posed by data proliferation but also propels a transformative wave of innovation, shaping a new era of data processing capabilities at the network's edge. Edge devices can perform real-time data analysis and make autonomous decisions without relying on constant connectivity to the cloud. This article aims at analysing and comparing Edge Computing devices when artificial intelligence algorithms are deployed on them. To this end, a detailed experiment involving various edge devices, models and metrics is conducted. In addition, we will observe how artificial intelligence accelerators such as Tensor Processing Unit behave. This analysis seeks to respond to the choice of a device that best suits the necessary AI requirements. As a summary, in general terms, the Jetson Nano provides the best performance when only CPU is used. Nevertheless the utilisation of a TPU drastically enhances the results.}, } @article {pmid38067859, year = {2023}, author = {Balatsouras, CP and Karras, A and Karras, C and Karydis, I and Sioutas, S}, title = {WiCHORD+: A Scalable, Sustainable, and P2P Chord-Based Ecosystem for Smart Agriculture Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067859}, issn = {1424-8220}, support = {Save-Water//European Union and national funds of Greece and Albania under the Interreg IPA II Cross-border Cooperation Programme "Greece - Albania 2014-2020"/ ; }, abstract = {In the evolving landscape of Industry 4.0, the convergence of peer-to-peer (P2P) systems, LoRa-enabled wireless sensor networks (WSNs), and distributed hash tables (DHTs) represents a major advancement that enhances sustainability in the modern agriculture framework and its applications. In this study, we propose a P2P Chord-based ecosystem for sustainable and smart agriculture applications, inspired by the inner workings of the Chord protocol. The node-centric approach of WiCHORD+ is a standout feature, streamlining operations in WSNs and leading to more energy-efficient and straightforward system interactions. Instead of traditional key-centric methods, WiCHORD+ is a node-centric protocol that is compatible with the inherent characteristics of WSNs. This unique design integrates seamlessly with distributed hash tables (DHTs), providing an efficient mechanism to locate nodes and ensure robust data retrieval while reducing energy consumption. Additionally, by utilizing the MAC address of each node in data routing, WiCHORD+ offers a more direct and efficient data lookup mechanism, essential for the timely and energy-efficient operation of WSNs. While the increasing dependence of smart agriculture on cloud computing environments for data storage and machine learning techniques for real-time prediction and analytics continues, frameworks like the proposed WiCHORD+ appear promising for future IoT applications due to their compatibility with modern devices and peripherals. Ultimately, the proposed approach aims to effectively incorporate LoRa, WSNs, DHTs, cloud computing, and machine learning, by providing practical solutions to the ongoing challenges in the current smart agriculture landscape and IoT applications.}, } @article {pmid38067809, year = {2023}, author = {Park, J and Jeong, J}, title = {An Autoscaling System Based on Predicting the Demand for Resources and Responding to Failure in Forecasting.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067809}, issn = {1424-8220}, support = {2018R1A5A7023490//National Research Foundation of Korea/ ; 2021R1F1A1061514//National Research Foundation of Korea/ ; S-2022-G0001-00070//Dongguk University/ ; }, abstract = {In recent years, the convergence of edge computing and sensor technologies has become a pivotal frontier revolutionizing real-time data processing. In particular, the practice of data acquisition-which encompasses the collection of sensory information in the form of images and videos, followed by their transmission to a remote cloud infrastructure for subsequent analysis-has witnessed a notable surge in adoption. However, to ensure seamless real-time processing irrespective of the data volume being conveyed or the frequency of incoming requests, it is vital to proactively locate resources within the cloud infrastructure specifically tailored to data-processing tasks. Many studies have focused on the proactive prediction of resource demands through the use of deep learning algorithms, generating considerable interest in real-time data processing. Nonetheless, an inherent risk arises when relying solely on predictive resource allocation, as it can heighten the susceptibility to system failure. In this study, a framework that includes algorithms that periodically monitor resource requirements and dynamically adjust resource provisioning to match the actual demand is proposed. Under experimental conditions with the Bitbrains dataset, setting the network throughput to 300 kB/s and with a threshold of 80%, the proposed system provides a 99% performance improvement in terms of the autoscaling algorithm and requires only 0.43 ms of additional computational overhead compared to relying on a simple prediction model alone.}, } @article {pmid38067758, year = {2023}, author = {Khan, A and Khattak, KS and Khan, ZH and Gulliver, TA and Abdullah, }, title = {Edge Computing for Effective and Efficient Traffic Characterization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067758}, issn = {1424-8220}, support = {National Center for Big Data and Cloud Computing//Higher Education Commission/ ; }, abstract = {Traffic flow analysis is essential to develop smart urban mobility solutions. Although numerous tools have been proposed, they employ only a small number of parameters. To overcome this limitation, an edge computing solution is proposed based on nine traffic parameters, namely, vehicle count, direction, speed, and type, flow, peak hour factor, density, time headway, and distance headway. The proposed low-cost solution is easy to deploy and maintain. The sensor node is comprised of a Raspberry Pi 4, Pi camera, Intel Movidius Neural Compute Stick 2, Xiaomi MI Power Bank, and Zong 4G Bolt+. Pre-trained models from the OpenVINO Toolkit are employed for vehicle detection and classification, and a centroid tracking algorithm is used to estimate vehicle speed. The measured traffic parameters are transmitted to the ThingSpeak cloud platform via 4G. The proposed solution was field-tested for one week (7 h/day), with approximately 10,000 vehicles per day. The count, classification, and speed accuracies obtained were 79.8%, 93.2%, and 82.9%, respectively. The sensor node can operate for approximately 8 h with a 10,000 mAh power bank and the required data bandwidth is 1.5 MB/h. The proposed edge computing solution overcomes the limitations of existing traffic monitoring systems and can work in hostile environments.}, } @article {pmid38067756, year = {2023}, author = {Aljebreen, M and Alohali, MA and Mahgoub, H and Aljameel, SS and Alsumayt, A and Sayed, A}, title = {Multi-Objective Seagull Optimization Algorithm with Deep Learning-Enabled Vulnerability Detection for Secure Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067756}, issn = {1424-8220}, support = {PNURSP2023R330//Princess Nourah bint Abdulrahman University/ ; RSP2023R459//King Saud University/ ; }, abstract = {Cloud computing (CC) is an internet-enabled environment that provides computing services such as networking, databases, and servers to clients and organizations in a cost-effective manner. Despite the benefits rendered by CC, its security remains a prominent concern to overcome. An intrusion detection system (IDS) is generally used to detect both normal and anomalous behavior in networks. The design of IDS using a machine learning (ML) technique comprises a series of methods that can learn patterns from data and forecast the outcomes consequently. In this background, the current study designs a novel multi-objective seagull optimization algorithm with a deep learning-enabled vulnerability detection (MOSOA-DLVD) technique to secure the cloud platform. The MOSOA-DLVD technique uses the feature selection (FS) method and hyperparameter tuning strategy to identify the presence of vulnerabilities or attacks in the cloud infrastructure. Primarily, the FS method is implemented using the MOSOA technique. Furthermore, the MOSOA-DLVD technique uses a deep belief network (DBN) method for intrusion detection and its classification. In order to improve the detection outcomes of the DBN algorithm, the sooty tern optimization algorithm (STOA) is applied for the hyperparameter tuning process. The performance of the proposed MOSOA-DLVD system was validated with extensive simulations upon a benchmark IDS dataset. The improved intrusion detection results of the MOSOA-DLVD approach with a maximum accuracy of 99.34% establish the proficiency of the model compared with recent methods.}, } @article {pmid38067703, year = {2023}, author = {Cicero, S and Guarascio, M and Guerrieri, A and Mungari, S}, title = {A Deep Anomaly Detection System for IoT-Based Smart Buildings.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067703}, issn = {1424-8220}, abstract = {In recent years, technological advancements in sensor, communication, and data storage technologies have led to the increasingly widespread use of smart devices in different types of buildings, such as residential homes, offices, and industrial installations. The main benefit of using these devices is the possibility of enhancing different crucial aspects of life within these buildings, including energy efficiency, safety, health, and occupant comfort. In particular, the fast progress in the field of the Internet of Things has yielded exponential growth in the number of connected smart devices and, consequently, increased the volume of data generated and exchanged. However, traditional Cloud-Computing platforms have exhibited limitations in their capacity to handle and process the continuous data exchange, leading to the rise of new computing paradigms, such as Edge Computing and Fog Computing. In this new complex scenario, advanced Artificial Intelligence and Machine Learning can play a key role in analyzing the generated data and predicting unexpected or anomalous events, allowing for quickly setting up effective responses against these unexpected events. To the best of our knowledge, current literature lacks Deep-Learning-based approaches specifically devised for guaranteeing safety in IoT-Based Smart Buildings. For this reason, we adopt an unsupervised neural architecture for detecting anomalies, such as faults, fires, theft attempts, and more, in such contexts. In more detail, in our proposal, data from a sensor network are processed by a Sparse U-Net neural model. The proposed approach is lightweight, making it suitable for deployment on the edge nodes of the network, and it does not require a pre-labeled training dataset. Experimental results conducted on a real-world case study demonstrate the effectiveness of the developed solution.}, } @article {pmid38067697, year = {2023}, author = {Mehmood, KT and Atiq, S and Hussain, MM}, title = {Enhancing QoS of Telecom Networks through Server Load Management in Software-Defined Networking (SDN).}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {23}, pages = {}, pmid = {38067697}, issn = {1424-8220}, abstract = {In the modern era, with the emergence of the Internet of Things (IoT), big data applications, cloud computing, and the ever-increasing demand for high-speed internet with the aid of upgraded telecom network resources, users now require virtualization of the network for smart handling of modern-day challenges to obtain better services (in terms of security, reliability, scalability, etc.). These requirements can be fulfilled by using software-defined networking (SDN). This research article emphasizes one of the major aspects of the practical implementation of SDN to enhance the QoS of a virtual network through the load management of network servers. In an SDN-based network, several servers are available to fulfill users' hypertext transfer protocol (HTTP) requests to ensure dynamic routing under the influence of the SDN controller. However, if the number of requests is directed to a specific server, the controller is bound to follow the user-programmed instructions, and the load on that server is increased, which results in (a) an increase in end-to-end user delay, (b) a decrease in the data transfer rate, and (c) a decrease in the available bandwidth of the targeted server. All of the above-mentioned factors will result in the degradation of network QoS. With the implementation of the proposed algorithm, dynamic active sensing server load management (DASLM), on the SDN controller, the load on the server is shared based on QoS control parameters (throughput, response time, round trip time, etc.). The overall delay is reduced, and the bandwidth utilization along with throughput is also increased.}, } @article {pmid38062043, year = {2023}, author = {Stanimirova, R and Tarrio, K and Turlej, K and McAvoy, K and Stonebrook, S and Hu, KT and Arévalo, P and Bullock, EL and Zhang, Y and Woodcock, CE and Olofsson, P and Zhu, Z and Barber, CP and Souza, CM and Chen, S and Wang, JA and Mensah, F and Calderón-Loor, M and Hadjikakou, M and Bryan, BA and Graesser, J and Beyene, DL and Mutasha, B and Siame, S and Siampale, A and Friedl, MA}, title = {A global land cover training dataset from 1984 to 2020.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {879}, pmid = {38062043}, issn = {2052-4463}, support = {80NSSC18K0994//National Aeronautics and Space Administration (NASA)/ ; }, abstract = {State-of-the-art cloud computing platforms such as Google Earth Engine (GEE) enable regional-to-global land cover and land cover change mapping with machine learning algorithms. However, collection of high-quality training data, which is necessary for accurate land cover mapping, remains costly and labor-intensive. To address this need, we created a global database of nearly 2 million training units spanning the period from 1984 to 2020 for seven primary and nine secondary land cover classes. Our training data collection approach leveraged GEE and machine learning algorithms to ensure data quality and biogeographic representation. We sampled the spectral-temporal feature space from Landsat imagery to efficiently allocate training data across global ecoregions and incorporated publicly available and collaborator-provided datasets to our database. To reflect the underlying regional class distribution and post-disturbance landscapes, we strategically augmented the database. We used a machine learning-based cross-validation procedure to remove potentially mis-labeled training units. Our training database is relevant for a wide array of studies such as land cover change, agriculture, forestry, hydrology, urban development, among many others.}, } @article {pmid38061141, year = {2024}, author = {Long, K and Chen, Z and Zhang, H and Zhang, M}, title = {Spatiotemporal disturbances and attribution analysis of mangrove in southern China from 1986 to 2020 based on time-series Landsat imagery.}, journal = {The Science of the total environment}, volume = {912}, number = {}, pages = {169157}, doi = {10.1016/j.scitotenv.2023.169157}, pmid = {38061141}, issn = {1879-1026}, abstract = {As one of the most productive ecosystems in the world, mangrove has a critical role to play in both the natural ecosystem and the human economic and social society. However, two thirds of the world's mangrove have been irreversibly damaged over the past 100 years, as a result of ongoing human activities and climate change. In this paper, adopting Landsat for the past 36 years as the data source, the detection of spatiotemporal changes of mangrove in southern China was carried out based on the Google Earth Engine (GEE) cloud platform using the LandTrendr algorithm. In addition, the attribution of mangrove disturbances was analyzed by a random forest algorithm. The results indicated the area of mangrove recovery (5174.64 hm[2]) was much larger than the area of mangrove disturbances (1625.40 hm[2]) over the 35-year period in the study area. The disturbances of mangrove in southern China were dominated by low and low-to-medium-level disturbances, with an area of 1009.89 hm[2], accounting for 57.50 % of the total disturbances. The mangrove recovery was also dominated by low and low-to-medium-level recovery, with an area of 3239.19 hm[2], accounting for 62.61 % of the total recovery area. Both human and natural factors interacted and influenced each other, together causing spatiotemporal disturbances of mangrove in southern China during 1986-2020. The mangrove disturbances in the Phase I (1986-2000) and Phase III (2011-2020) were characterized by human-induced (50.74 % and 58.86 %), such as construction of roads and aquaculture ponds. The mangrove disturbances in the Phase II (2001-2010) were dominated by natural factors (55.73 %), such as tides, flooding, and species invasions. It was also observed that the area of mangrove recovery in southern China increased dramatically from 1986 to 2020 due to the promulgation and implementation of the Chinese government's policy on mangrove protection, as well as increased human awareness of mangrove wetland protection.}, } @article {pmid38053971, year = {2023}, author = {Bernardi, M and Cardarelli, F}, title = {Phasor identifier: A cloud-based analysis of phasor-FLIM data on Python notebooks.}, journal = {Biophysical reports}, volume = {3}, number = {4}, pages = {100135}, pmid = {38053971}, issn = {2667-0747}, abstract = {This paper introduces an innovative approach utilizing Google Colaboratory for the versatile analysis of phasor fluorescence lifetime imaging microscopy (FLIM) data collected from various samples (e.g., cuvette, cells, tissues) and in various input file formats. In fact, phasor-FLIM widespread adoption has been hampered by complex instrumentation and data analysis requirements. We mean to make advanced FLIM analysis more accessible to researchers through a cloud-based solution that 1) harnesses robust computational resources, 2) eliminates hardware limitations, and 3) supports both CPU and GPU processing. We envision a paradigm shift in FLIM data accessibility and potential, aligning with the evolving field of artificial intelligence-driven FLIM analysis. This approach simplifies FLIM data handling and opens doors for diverse applications, from studying cellular metabolism to investigating drug encapsulation, benefiting researchers across multiple domains. The comparative analysis of freely distributed FLIM tools highlights the unique advantages of this approach in terms of adaptability, scalability, and open-source nature.}, } @article {pmid38053860, year = {2023}, author = {Moparthi, NR and Balakrishna, G and Chithaluru, P and Kolla, M and Kumar, M}, title = {An improved energy-efficient cloud-optimized load-balancing for IoT frameworks.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21947}, pmid = {38053860}, issn = {2405-8440}, abstract = {As wireless communication grows, so does the need for smart, simple, affordable solutions. The need prompted academics to develop appropriate network solutions ranging from wireless sensor networks (WSNs) to the Internet of Things (IoT). With the innovations of researchers, the necessity for enhancements in existing researchers has increased. Initially, network protocols were the focus of study and development. Regardless, IoT devices are already being employed in different industries and collecting massive amounts of data through complicated applications. This necessitates IoT load-balancing research. Several studies tried to address the communication overheads produced by significant IoT network traffic. These studies intended to control network loads by evenly spreading them across IoT nodes. Eventually, the practitioners decided to migrate the IoT node data and the apps processing it to the cloud. So, the difficulty is to design a cloud-based load balancer algorithm that meets the criteria of IoT network protocols. Defined as a unique method for controlling loads on cloud-integrated IoT networks. The suggested method analyses actual and virtual host machine needs in cloud computing environments. The purpose of the proposed model is to design a load balancer that improves network response time while reducing energy consumption. The proposed load balancer algorithm may be easily integrated with peer-existing IoT frameworks. Handling the load for cloud-based IoT architectures with the above-described methods. Significantly boosts response time for the IoT network by 60 %. The proposed scheme has less energy consumption (31 %), less execution time (24\%), decreased node shutdown time (45 %), and less infrastructure cost (48\%) in comparison to existing frameworks. Based on the simulation results, it is concluded that the proposed framework offers an improved solution for IoT-based cloud load-balancing issues.}, } @article {pmid38053722, year = {2023}, author = {Tang, R and Aridas, NK and Talip, MSA}, title = {Design of a data processing method for the farmland environmental monitoring based on improved Spark components.}, journal = {Frontiers in big data}, volume = {6}, number = {}, pages = {1282352}, pmid = {38053722}, issn = {2624-909X}, abstract = {With the popularization of big data technology, agricultural data processing systems have become more intelligent. In this study, a data processing method for farmland environmental monitoring based on improved Spark components is designed. It introduces the FAST-Join (Join critical filtering sampling partition optimization) algorithm in the Spark component for equivalence association query optimization to improve the operating efficiency of the Spark component and cluster. The experimental results show that the amount of data written and read in Shuffle by Spark optimized by the FAST-join algorithm only accounts for 0.958 and 1.384% of the original data volume on average, and the calculation speed is 202.11% faster than the original. The average data processing time and occupied memory size of the Spark cluster are reduced by 128.22 and 76.75% compared with the originals. It also compared the cluster performance of the FAST-join and Equi-join algorithms. The Spark cluster optimized by the FAST-join algorithm reduced the processing time and occupied memory size by an average of 68.74 and 37.80% compared with the Equi-join algorithm, which shows that the FAST-join algorithm can effectively improve the efficiency of inter-data table querying and cluster computing.}, } @article {pmid38052579, year = {2023}, author = {Yu, L and Zhang, Z and Lai, Y and Zhao, Y and Mo, F}, title = {Edge computing-based intelligent monitoring system for manhole cover.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {10}, pages = {18792-18819}, doi = {10.3934/mbe.2023833}, pmid = {38052579}, issn = {1551-0018}, abstract = {Unusual states of manhole covers (MCs), such as being tilted, lost or flooded, can present substantial safety hazards and risks to pedestrians and vehicles on the roadway. Most MCs are still being managed through manual regular inspections and have limited information technology integration. This leads to time-consuming and labor-intensive identification with a lower level of accuracy. In this paper, we propose an edge computing-based intelligent monitoring system for manhole covers (EC-MCIMS). Sensors detect the MC and send status and positioning information via LoRa to the edge gateway located on the nearby wisdom pole. The edge gateway utilizes a lightweight machine learning model, trained on the edge impulse (EI) platform, which can predict the state of the MC. If an abnormality is detected, the display and voice device on the wisdom pole will respectively show and broadcast messages to alert pedestrians and vehicles. Simultaneously, the information is uploaded to the cloud platform, enabling remote maintenance personnel to promptly repair and restore it. Tests were performed on the EI platform and in Dongguan townships, demonstrating that the average response time for identifying MCs is 4.81 s. Higher responsiveness and lower power consumption were obtained compared to cloud computing models. Moreover, the system utilizes a lightweight model that better reduces read-only memory (ROM) and random-access memory (RAM), while maintaining an average identification accuracy of 94%.}, } @article {pmid38049547, year = {2023}, author = {Parashar, D and Kumar, A and Palni, S and Pandey, A and Singh, A and Singh, AP}, title = {Use of machine learning-based classification algorithms in the monitoring of Land Use and Land Cover practices in a hilly terrain.}, journal = {Environmental monitoring and assessment}, volume = {196}, number = {1}, pages = {8}, pmid = {38049547}, issn = {1573-2959}, mesh = {Cities ; *Environmental Monitoring/methods ; *Hot Temperature ; Algorithms ; Support Vector Machine ; }, abstract = {The current high rate of urbanization in developing countries and its consequences, like traffic congestion, slum development, scarcity of resources, and urban heat islands, raise a need for better Land Use Land Cover (LULC) classification mapping for improved planning. This study mainly deals with two objectives: 1) to explore the applicability of machine learning-based techniques, especially the Random forest (RF) algorithm and Support Vector Machine (SVM) algorithm as the potential classifiers for LULC mapping under different scenarios, and 2) to prepare a better LULC classification model for mountain terrain by using different indices with combination of spectral bands. Due to differences in topography, shadows, spectral confusion from overlapping spectral signatures of different land cover types, and a lack of access for ground verification, classification in mountainous terrain is difficult task compared to plain terrain classification. An enhanced LULC classification model has been designed using two popular machine learning (ML) classifier algorithms, SVM and RF, explicitly for mountainous terrains by taking into consideration of a study area of Gopeshwer town in the Chamoli district of Uttarakhand state, India. Online-based cloud platform Google Earth Engine (GEE) was used for overall processing. Four classification models were built using Sentinel 2B satellite imagery with 20m and 10m resolutions. Two of these models (Model 'i' based on RF algorithm and Model 'ii' based on SVM algorithm) were designed using spectral bands of visible and infrared wavelengths, and the other two (Model 'iii' based on RF algorithm and Model 'iv' based on SVM algorithm) with the addition of indices with spectral bands. The accuracy assessment was done using the confusion matrix based on the output results. Obtained result highlights that the overall accuracy for model 'i' and model 'ii' were 82% and 86% respectively, whereas these were 87.17% and 87.2% for model 'iii' and model 'iv' respectively. Finally, the study compared the performance of each model based on different accuracy metrics for better LULC mapping. It proposes an improved LULC classification model for mountainous terrains, which can contribute to better land management and planning in the study area.}, } @article {pmid38046398, year = {2023}, author = {Babar, M and Ahmad Jan, M and He, X and Usman Tariq, M and Mastorakis, S and Alturki, R}, title = {An Optimized IoT-enabled Big Data Analytics Architecture for Edge-Cloud Computing.}, journal = {IEEE internet of things journal}, volume = {10}, number = {5}, pages = {3995-4005}, pmid = {38046398}, issn = {2327-4662}, support = {P20 GM109090/GM/NIGMS NIH HHS/United States ; }, abstract = {The awareness of edge computing is attaining eminence and is largely acknowledged with the rise of Internet of Things (IoT). Edge-enabled solutions offer efficient computing and control at the network edge to resolve the scalability and latency-related concerns. Though, it comes to be challenging for edge computing to tackle diverse applications of IoT as they produce massive heterogeneous data. The IoT-enabled frameworks for Big Data analytics face numerous challenges in their existing structural design, for instance, the high volume of data storage and processing, data heterogeneity, and processing time among others. Moreover, the existing proposals lack effective parallel data loading and robust mechanisms for handling communication overhead. To address these challenges, we propose an optimized IoT-enabled big data analytics architecture for edge-cloud computing using machine learning. In the proposed scheme, an edge intelligence module is introduced to process and store the big data efficiently at the edges of the network with the integration of cloud technology. The proposed scheme is composed of two layers: IoT-edge and Cloud-processing. The data injection and storage is carried out with an optimized MapReduce parallel algorithm. Optimized Yet Another Resource Negotiator (YARN) is used for efficiently managing the cluster. The proposed data design is experimentally simulated with an authentic dataset using Apache Spark. The comparative analysis is decorated with existing proposals and traditional mechanisms. The results justify the efficiency of our proposed work.}, } @article {pmid38043630, year = {2024}, author = {Doo, FX and Parekh, VS and Kanhere, A and Savani, D and Tejani, AS and Sapkota, A and Yi, PH}, title = {Evaluation of Climate-Aware Metrics Tools for Radiology Informatics and Artificial Intelligence: Toward a Potential Radiology Ecolabel.}, journal = {Journal of the American College of Radiology : JACR}, volume = {21}, number = {2}, pages = {239-247}, doi = {10.1016/j.jacr.2023.11.019}, pmid = {38043630}, issn = {1558-349X}, mesh = {Humans ; Artificial Intelligence ; Radiography ; *Radiology ; *Medical Informatics ; Diagnostic Imaging ; }, abstract = {Radiology is a major contributor to health care's impact on climate change, in part due to its reliance on energy-intensive equipment as well as its growing technological reliance. Delivering modern patient care requires a robust informatics team to move images from the imaging equipment to the workstations and the health care system. Radiology informatics is the field that manages medical imaging IT. This involves the acquisition, storage, retrieval, and use of imaging information in health care to improve access and quality, which includes PACS, cloud services, and artificial intelligence. However, the electricity consumption of computing and the life cycle of various computer components expands the carbon footprint of health care. The authors provide a general framework to understand the environmental impact of clinical radiology informatics, which includes using the international Greenhouse Gas Protocol to draft a definition of scopes of emissions pertinent to radiology informatics, as well as exploring existing tools to measure and account for these emissions. A novel standard ecolabel for radiology informatics tools, such as the Energy Star label for consumer devices or Leadership in Energy and Environmental Design certification for buildings, should be developed to promote awareness and guide radiologists and radiology informatics leaders in making environmentally conscious decisions for their clinical practice. At this critical climate juncture, the radiology community has a unique and pressing obligation to consider our shared environmental responsibility in innovating clinical technology for patient care.}, } @article {pmid38042609, year = {2023}, author = {Shaikh, TA and Rasool, T and Verma, P}, title = {Machine intelligence and medical cyber-physical system architectures for smart healthcare: Taxonomy, challenges, opportunities, and possible solutions.}, journal = {Artificial intelligence in medicine}, volume = {146}, number = {}, pages = {102692}, doi = {10.1016/j.artmed.2023.102692}, pmid = {38042609}, issn = {1873-2860}, mesh = {Humans ; *Artificial Intelligence ; *Computer Security ; Delivery of Health Care ; Cloud Computing ; }, abstract = {Hospitals use medical cyber-physical systems (MCPS) more often to give patients quality continuous care. MCPS isa life-critical, context-aware, networked system of medical equipment. It has been challenging to achieve high assurance in system software, interoperability, context-aware intelligence, autonomy, security and privacy, and device certifiability due to the necessity to create complicated MCPS that are safe and efficient. The MCPS system is shown in the paper as a newly developed application case study of artificial intelligence in healthcare. Applications for various CPS-based healthcare systems are discussed, such as telehealthcare systems for managing chronic diseases (cardiovascular diseases, epilepsy, hearing loss, and respiratory diseases), supporting medication intake management, and tele-homecare systems. The goal of this study is to provide a thorough overview of the essential components of the MCPS from several angles, including design, methodology, and important enabling technologies, including sensor networks, the Internet of Things (IoT), cloud computing, and multi-agent systems. Additionally, some significant applications are investigated, such as smart cities, which are regarded as one of the key applications that will offer new services for industrial systems, transportation networks, energy distribution, monitoring of environmental changes, business and commerce applications, emergency response, and other social and recreational activities.The four levels of an MCPS's general architecture-data collecting, data aggregation, cloud processing, and action-are shown in this study. Different encryption techniques must be employed to ensure data privacy inside each layer due to the variations in hardware and communication capabilities of each layer. We compare established and new encryption techniques based on how well they support safe data exchange, secure computing, and secure storage. Our thorough experimental study of each method reveals that, although enabling innovative new features like secure sharing and safe computing, developing encryption approaches significantly increases computational and storage overhead. To increase the usability of newly developed encryption schemes in an MCPS and to provide a comprehensive list of tools and databases to assist other researchers, we provide a list of opportunities and challenges for incorporating machine intelligence-based MCPS in healthcare applications in our paper's conclusion.}, } @article {pmid38039654, year = {2024}, author = {Chen, X and Li, J and Chen, D and Zhou, Y and Tu, Z and Lin, M and Kang, T and Lin, J and Gong, T and Zhu, L and Zhou, J and Lin, OY and Guo, J and Dong, J and Guo, D and Qu, X}, title = {CloudBrain-MRS: An intelligent cloud computing platform for in vivo magnetic resonance spectroscopy preprocessing, quantification, and analysis.}, journal = {Journal of magnetic resonance (San Diego, Calif. : 1997)}, volume = {358}, number = {}, pages = {107601}, doi = {10.1016/j.jmr.2023.107601}, pmid = {38039654}, issn = {1096-0856}, mesh = {Humans ; *Cloud Computing ; *Artificial Intelligence ; Magnetic Resonance Spectroscopy/methods ; Magnetic Resonance Imaging/methods ; Software ; }, abstract = {Magnetic resonance spectroscopy (MRS) is an important clinical imaging method for diagnosis of diseases. MRS spectrum is used to observe the signal intensity of metabolites or further infer their concentrations. Although the magnetic resonance vendors commonly provide basic functions of spectrum plots and metabolite quantification, the spread of clinical research of MRS is still limited due to the lack of easy-to-use processing software or platform. To address this issue, we have developed CloudBrain-MRS, a cloud-based online platform that provides powerful hardware and advanced algorithms. The platform can be accessed simply through a web browser, without the need of any program installation on the user side. CloudBrain-MRS also integrates the classic LCModel and advanced artificial intelligence algorithms and supports batch preprocessing, quantification, and analysis of MRS data from different vendors. Additionally, the platform offers useful functions: (1) Automatically statistical analysis to find biomarkers for diseases; (2) Consistency verification between the classic and artificial intelligence quantification algorithms; (3) Colorful three-dimensional visualization for easy observation of individual metabolite spectrum. Last, data of both healthy subjects and patients with mild cognitive impairment are used to demonstrate the functions of the platform. To the best of our knowledge, this is the first cloud computing platform for in vivo MRS with artificial intelligence processing. We have shared our cloud platform at MRSHub, providing at least two years of free access and service. If you are interested, please visit https://mrshub.org/software_all/#CloudBrain-MRS or https://csrc.xmu.edu.cn/CloudBrain.html.}, } @article {pmid38035280, year = {2023}, author = {Zhao, K and Farrell, K and Mashiku, M and Abay, D and Tang, K and Oberste, MS and Burns, CC}, title = {A search-based geographic metadata curation pipeline to refine sequencing institution information and support public health.}, journal = {Frontiers in public health}, volume = {11}, number = {}, pages = {1254976}, pmid = {38035280}, issn = {2296-2565}, mesh = {*Metadata ; *Public Health ; High-Throughput Nucleotide Sequencing ; China ; United Kingdom ; }, abstract = {BACKGROUND: The National Center for Biotechnology Information (NCBI) Sequence Read Archive (SRA) has amassed a vast reservoir of genetic data since its inception in 2007. These public data hold immense potential for supporting pathogen surveillance and control. However, the lack of standardized metadata and inconsistent submission practices in SRA may impede the data's utility in public health.

METHODS: To address this issue, we introduce the Search-based Geographic Metadata Curation (SGMC) pipeline. SGMC utilized Python and web scraping to extract geographic data of sequencing institutions from NCBI SRA in the Cloud and its website. It then harnessed ChatGPT to refine the sequencing institution and location assignments. To illustrate the pipeline's utility, we examined the geographic distribution of the sequencing institutions and their countries relevant to polio eradication and categorized them.

RESULTS: SGMC successfully identified 7,649 sequencing institutions and their global locations from a random selection of 2,321,044 SRA accessions. These institutions were distributed across 97 countries, with strong representation in the United States, the United Kingdom and China. However, there was a lack of data from African, Central Asian, and Central American countries, indicating potential disparities in sequencing capabilities. Comparison with manually curated data for U.S. institutions reveals SGMC's accuracy rates of 94.8% for institutions, 93.1% for countries, and 74.5% for geographic coordinates.

CONCLUSION: SGMC may represent a novel approach using a generative AI model to enhance geographic data (country and institution assignments) for large numbers of samples within SRA datasets. This information can be utilized to bolster public health endeavors.}, } @article {pmid38035195, year = {2023}, author = {Olson, RH and Cohen Kalafut, N and Wang, D}, title = {MANGEM: A web app for multimodal analysis of neuronal gene expression, electrophysiology, and morphology.}, journal = {Patterns (New York, N.Y.)}, volume = {4}, number = {11}, pages = {100847}, pmid = {38035195}, issn = {2666-3899}, support = {P50 HD105353/HD/NICHD NIH HHS/United States ; R01 AG067025/AG/NIA NIH HHS/United States ; RF1 MH128695/MH/NIMH NIH HHS/United States ; }, abstract = {Single-cell techniques like Patch-seq have enabled the acquisition of multimodal data from individual neuronal cells, offering systematic insights into neuronal functions. However, these data can be heterogeneous and noisy. To address this, machine learning methods have been used to align cells from different modalities onto a low-dimensional latent space, revealing multimodal cell clusters. The use of those methods can be challenging without computational expertise or suitable computing infrastructure for computationally expensive methods. To address this, we developed a cloud-based web application, MANGEM (multimodal analysis of neuronal gene expression, electrophysiology, and morphology). MANGEM provides a step-by-step accessible and user-friendly interface to machine learning alignment methods of neuronal multimodal data. It can run asynchronously for large-scale data alignment, provide users with various downstream analyses of aligned cells, and visualize the analytic results. We demonstrated the usage of MANGEM by aligning multimodal data of neuronal cells in the mouse visual cortex.}, } @article {pmid38027905, year = {2023}, author = {Ait Abdelmoula, I and Idrissi Kaitouni, S and Lamrini, N and Jbene, M and Ghennioui, A and Mehdary, A and El Aroussi, M}, title = {Towards a sustainable edge computing framework for condition monitoring in decentralized photovoltaic systems.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21475}, pmid = {38027905}, issn = {2405-8440}, abstract = {In recent times, the rapid advancements in technology have led to a digital revolution in urban areas, and new computing frameworks are emerging to address the current issues in monitoring and fault detection, particularly in the context of the growing renewable decentralized energy systems. This research proposes a novel framework for monitoring the condition of decentralized photovoltaic systems within a smart city infrastructure. The approach uses edge computing to overcome the challenges associated with costly processing through remote cloud servers. By processing data at the edge of the network, this concept allows for significant gains in speed and bandwidth consumption, making it suitable for a sustainable city environment. In the proposed edge-learning scheme, several machine learning models are compared to find the best suitable model achieving both high accuracy and low latency in detecting photovoltaic faults. Four light and rapid machine learning models, namely, CBLOF, LOF, KNN, ANN, are selected as best performers and trained locally in decentralized edge nodes. The overall approach is deployed in a smart solar campus with multiple distributed PV units located in the R&D platform Green & Smart Building Park. Several experiments were conducted on different anomaly scenarios, and the models were evaluated based on their supervision method, f1-score, inference time, RAM usage, and model size. The paper also investigates the impact of the type of supervision and the class of the model on the anomaly detection performance. The findings indicated that the supervised artificial neural network (ANN) had superior performance compared to other models, obtaining an f1-score of 80 % even in the most unfavorable conditions. The findings also showed that KNN was the most suitable unsupervised model for the investigated experiments achieving good f1-scores (100 %, 95 % and 92 %) in 3 out of 4 scenarios making it a good candidate for similar anomaly detection tasks.}, } @article {pmid38027596, year = {2023}, author = {Mohammed, MA and Lakhan, A and Abdulkareem, KH and Khanapi Abd Ghani, M and Abdulameer Marhoon, H and Nedoma, J and Martinek, R}, title = {Multi-objectives reinforcement federated learning blockchain enabled Internet of things and Fog-Cloud infrastructure for transport data.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21639}, doi = {10.1016/j.heliyon.2023.e21639}, pmid = {38027596}, issn = {2405-8440}, abstract = {For the past decade, there has been a significant increase in customer usage of public transport applications in smart cities. These applications rely on various services, such as communication and computation, provided by additional nodes within the smart city environment. However, these services are delivered by a diverse range of cloud computing-based servers that are widely spread and heterogeneous, leading to cybersecurity becoming a crucial challenge among these servers. Numerous machine-learning approaches have been proposed in the literature to address the cybersecurity challenges in heterogeneous transport applications within smart cities. However, the centralized security and scheduling strategies suggested so far have yet to produce optimal results for transport applications. This work aims to present a secure decentralized infrastructure for transporting data in fog cloud networks. This paper introduces Multi-Objectives Reinforcement Federated Learning Blockchain (MORFLB) for Transport Infrastructure. MORFLB aims to minimize processing and transfer delays while maximizing long-term rewards by identifying known and unknown attacks on remote sensing data in-vehicle applications. MORFLB incorporates multi-agent policies, proof-of-work hashing validation, and decentralized deep neural network training to achieve minimal processing and transfer delays. It comprises vehicle applications, decentralized fog, and cloud nodes based on blockchain reinforcement federated learning, which improves rewards through trial and error. The study formulates a combinatorial problem that minimizes and maximizes various factors for vehicle applications. The experimental results demonstrate that MORFLB effectively reduces processing and transfer delays while maximizing rewards compared to existing studies. It provides a promising solution to address the cybersecurity challenges in intelligent transport applications within smart cities. In conclusion, this paper presents MORFLB, a combination of different schemes that ensure the execution of transport data under their constraints and achieve optimal results with the suggested decentralized infrastructure based on blockchain technology.}, } @article {pmid38027579, year = {2023}, author = {Guo, LL and Calligan, M and Vettese, E and Cook, S and Gagnidze, G and Han, O and Inoue, J and Lemmon, J and Li, J and Roshdi, M and Sadovy, B and Wallace, S and Sung, L}, title = {Development and validation of the SickKids Enterprise-wide Data in Azure Repository (SEDAR).}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21586}, pmid = {38027579}, issn = {2405-8440}, abstract = {OBJECTIVES: To describe the processes developed by The Hospital for Sick Children (SickKids) to enable utilization of electronic health record (EHR) data by creating sequentially transformed schemas for use across multiple user types.

METHODS: We used Microsoft Azure as the cloud service provider and named this effort the SickKids Enterprise-wide Data in Azure Repository (SEDAR). Epic Clarity data from on-premises was copied to a virtual network in Microsoft Azure. Three sequential schemas were developed. The Filtered Schema added a filter to retain only SickKids and valid patients. The Curated Schema created a data structure that was easier to navigate and query. Each table contained a logical unit such as patients, hospital encounters or laboratory tests. Data validation of randomly sampled observations in the Curated Schema was performed. The SK-OMOP Schema was designed to facilitate research and machine learning. Two individuals mapped medical elements to standard Observational Medical Outcomes Partnership (OMOP) concepts.

RESULTS: A copy of Clarity data was transferred to Microsoft Azure and updated each night using log shipping. The Filtered Schema and Curated Schema were implemented as stored procedures and executed each night with incremental updates or full loads. Data validation required up to 16 iterations for each Curated Schema table. OMOP concept mapping achieved at least 80 % coverage for each SK-OMOP table.

CONCLUSIONS: We described our experience in creating three sequential schemas to address different EHR data access requirements. Future work should consider replicating this approach at other institutions to determine whether approaches are generalizable.}, } @article {pmid38022923, year = {2023}, author = {Han, J and Sun, R and Zeeshan, M and Rehman, A and Ullah, I}, title = {The impact of digital transformation on green total factor productivity of heavily polluting enterprises.}, journal = {Frontiers in psychology}, volume = {14}, number = {}, pages = {1265391}, pmid = {38022923}, issn = {1664-1078}, abstract = {INTRODUCTION: Digital transformation has become an important engine for economic high-quality development and environment high-level protection. However, green total factor productivity (GTFP), as an indicator that comprehensively reflects economic and environmental benefits, there is a lack of studies that analyze the effect of digital transformation on heavily polluting enterprises' GTFP from a micro perspective, and its impact mechanism is still unclear. Therefore, we aim to study the impact of digital transformation on heavily polluting enterprises' GTFP and its mechanism, and explore the heterogeneity of its impact.

METHODS: We use Chinese A-share listed enterprises in the heavily polluting industry data from 2007 to 2019, measure enterprise digital transformation indicator using text analysis, and measure enterprise GTFP indicator using the GML index based on SBM directional distance function, to investigate the impact of digital transformation on heavily polluting enterprises' GTFP.

RESULTS: Digital transformation can significantly enhance heavily polluting enterprises' GTFP, and this finding still holds after considering the endogenous problem and conducting robustness tests. Digital transformation can enhance heavily polluting enterprises' GTFP by promoting green innovation, improving management efficiency, and reducing external transaction costs. The improvement role of digital transformation on heavily polluting enterprises' GTFP is more obvious in the samples of non-state-owned enterprises, non-high-tech industries, and the eastern region. Compared with blockchain technology, artificial intelligence technology, cloud computing technology, big data technology, and digital technology application can significantly improve heavily polluting enterprises' GTFP.

DISCUSSION: Our paper breaks through the limitations of existing research, which not only theoretically enriches the literature related to digital transformation and GTFP, but also practically provides policy implications for continuously promoting heavily polluting enterprises' digital transformation and facilitating their high-quality development.}, } @article {pmid38020047, year = {2023}, author = {Ko, HYK and Tripathi, NK and Mozumder, C and Muengtaweepongsa, S and Pal, I}, title = {Real-Time Remote Patient Monitoring and Alarming System for Noncommunicable Lifestyle Diseases.}, journal = {International journal of telemedicine and applications}, volume = {2023}, number = {}, pages = {9965226}, pmid = {38020047}, issn = {1687-6415}, abstract = {Telemedicine and remote patient monitoring (RPM) systems have been gaining interest and received adaptation in healthcare sectors since the COVID-19 pandemic due to their efficiency and capability to deliver timely healthcare services while containing COVID-19 transmission. These systems were developed using the latest technology in wireless sensors, medical devices, cloud computing, mobile computing, telecommunications, and machine learning technologies. In this article, a real-time remote patient monitoring system is proposed with an accessible, compact, accurate, and low-cost design. The implemented system is designed to an end-to-end communication interface between medical practitioners and patients. The objective of this study is to provide remote healthcare services to patients who need ongoing care or those who have been discharged from the hospital without affecting their daily routines. The developed monitoring system was then evaluated on 1177 records from MIMIC-III clinical dataset (aged between 19 and 99 years). The performance analysis of the proposed system achieved 88.7% accuracy in generating alerts with logistic regression classification algorithm. This result reflects positively on the quality and robustness of the proposed study. Since the processing time of the proposed system is less than 2 minutes, it can be stated that the system has a high computational speed and is convenient to use in real-time monitoring. Furthermore, the proposed system will fulfil to cover the lower doctor-to-patient ratio by monitoring patients from remote locations and aged people who reside in their residences.}, } @article {pmid38006682, year = {2024}, author = {Geroski, T and Gkaintes, O and Vulović, A and Ukaj, N and Barrasa-Fano, J and Perez-Boerema, F and Milićević, B and Atanasijević, A and Živković, J and Živić, A and Roumpi, M and Exarchos, T and Hellmich, C and Scheiner, S and Van Oosterwyck, H and Jakovljević, D and Ivanović, M and Filipović, N}, title = {SGABU computational platform for multiscale modeling: Bridging the gap between education and research.}, journal = {Computer methods and programs in biomedicine}, volume = {243}, number = {}, pages = {107935}, doi = {10.1016/j.cmpb.2023.107935}, pmid = {38006682}, issn = {1872-7565}, mesh = {*Software ; *User-Computer Interface ; Computer Simulation ; Language ; Workflow ; Computational Biology/methods ; }, abstract = {BACKGROUND AND OBJECTIVE: In accordance with the latest aspirations in the field of bioengineering, there is a need to create a web accessible, but powerful cloud computational platform that combines datasets and multiscale models related to bone modeling, cancer, cardiovascular diseases and tissue engineering. The SGABU platform may become a powerful information system for research and education that can integrate data, extract information, and facilitate knowledge exchange with the goal of creating and developing appropriate computing pipelines to provide accurate and comprehensive biological information from the molecular to organ level.

METHODS: The datasets integrated into the platform are obtained from experimental and/or clinical studies and are mainly in tabular or image file format, including metadata. The implementation of multiscale models, is an ambitious effort of the platform to capture phenomena at different length scales, described using partial and ordinary differential equations, which are solved numerically on complex geometries with the use of the finite element method. The majority of the SGABU platform's simulation pipelines are provided as Common Workflow Language (CWL) workflows. Each of them requires creating a CWL implementation on the backend and a user-friendly interface using standard web technologies. Platform is available at https://sgabu-test.unic.kg.ac.rs/login.

RESULTS: The main dashboard of the SGABU platform is divided into sections for each field of research, each one of which includes a subsection of datasets and multiscale models. The datasets can be presented in a simple form as tabular data, or using technologies such as Plotly.js for 2D plot interactivity, Kitware Paraview Glance for 3D view. Regarding the models, the usage of Docker containerization for packing the individual tools and CWL orchestration for describing inputs with validation forms and outputs with tabular views for output visualization, interactive diagrams, 3D views and animations.

CONCLUSIONS: In practice, the structure of SGABU platform means that any of the integrated workflows can work equally well on any other bioengineering platform. The key advantage of the SGABU platform over similar efforts is its versatility offered with the use of modern, modular, and extensible technology for various levels of architecture.}, } @article {pmid38005614, year = {2023}, author = {Zhang, T and Jin, X and Bai, S and Peng, Y and Li, Y and Zhang, J}, title = {Smart Public Transportation Sensing: Enhancing Perception and Data Management for Efficient and Safety Operations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {22}, pages = {}, pmid = {38005614}, issn = {1424-8220}, support = {No. KCXST20221021111201002//Science and Technology Innovation Committee of Shenzhen/ ; }, abstract = {The use of cloud computing, big data, IoT, and mobile applications in the public transportation industry has resulted in the generation of vast and complex data, of which the large data volume and data variety have posed several obstacles to effective data sensing and processing with high efficiency in a real-time data-driven public transportation management system. To overcome the above-mentioned challenges and to guarantee optimal data availability for data sensing and processing in public transportation perception, a public transportation sensing platform is proposed to collect, integrate, and organize diverse data from different data sources. The proposed data perception platform connects multiple data systems and some edge intelligent perception devices to enable the collection of various types of data, including traveling information of passengers and transaction data of smart cards. To enable the efficient extraction of precise and detailed traveling behavior, an efficient field-level data lineage exploration method is proposed during logical plan generation and is integrated into the FlinkSQL system seamlessly. Furthermore, a row-level fine-grained permission control mechanism is adopted to support flexible data management. With these two techniques, the proposed data management system can support efficient data processing on large amounts of data and conducts comprehensive analysis and application of business data from numerous different sources to realize the value of the data with high data safety. Through operational testing in real environments, the proposed platform has proven highly efficient and effective in managing organizational operations, data assets, data life cycle, offline development, and backend administration over a large amount of various types of public transportation traffic data.}, } @article {pmid38005586, year = {2023}, author = {Nugroho, AK and Shioda, S and Kim, T}, title = {Optimal Resource Provisioning and Task Offloading for Network-Aware and Federated Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {22}, pages = {}, pmid = {38005586}, issn = {1424-8220}, support = {2021R1F1A1059109//National Research Foundation of Korea/ ; Research Grant, 2022//Pusan National University/ ; }, abstract = {Compared to cloud computing, mobile edge computing (MEC) is a promising solution for delay-sensitive applications due to its proximity to end users. Because of its ability to offload resource-intensive tasks to nearby edge servers, MEC allows a diverse range of compute- and storage-intensive applications to operate on resource-constrained devices. The optimal utilization of MEC can lead to enhanced responsiveness and quality of service, but it requires careful design from the perspective of user-base station association, virtualized resource provisioning, and task distribution. Also, considering the limited exploration of the federation concept in the existing literature, its impacts on the allocation and management of resources still remain not widely recognized. In this paper, we study the network and MEC resource scheduling problem, where some edge servers are federated, limiting resource expansion within the same federations. The integration of network and MEC is crucial, emphasizing the necessity of a joint approach. In this work, we present NAFEOS, a proposed solution formulated as a two-stage algorithm that can effectively integrate association optimization with vertical and horizontal scaling. The Stage-1 problem optimizes the user-base station association and federation assignment so that the edge servers can be utilized in a balanced manner. The following Stage-2 dynamically schedules both vertical and horizontal scaling so that the fluctuating task-offloading demands from users are fulfilled. The extensive evaluations and comparison results show that the proposed approach can effectively achieve optimal resource utilization.}, } @article {pmid38005558, year = {2023}, author = {Oliveira, M and Chauhan, S and Pereira, F and Felgueiras, C and Carvalho, D}, title = {Blockchain Protocols and Edge Computing Targeting Industry 5.0 Needs.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {22}, pages = {}, pmid = {38005558}, issn = {1424-8220}, abstract = {"Industry 5.0" is the latest industrial revolution. A variety of cutting-edge technologies, including artificial intelligence, the Internet of Things (IoT), and others, come together to form it. Billions of devices are connected for high-speed data transfer, especially in a 5G-enabled industrial environment for information collection and processing. Most of the issues, such as access control mechanism, time to fetch the data from different devices, and protocols used, may not be applicable in the future as these protocols are based upon a centralized mechanism. This centralized mechanism may have a single point of failure along with the computational overhead. Thus, there is a need for an efficient decentralized access control mechanism for device-to-device (D2D) communication in various industrial sectors, for example, sensors in different regions may collect and process the data for making intelligent decisions. In such an environment, reliability, security, and privacy are major concerns as most of the solutions are based upon a centralized control mechanism. To mitigate the aforementioned issues, this paper provides the opportunities for and highlights some of the most impressive initiatives that help to curve the future. This new era will bring about significant changes in the way businesses operate, allowing them to become more cost-effective, more efficient, and produce higher-quality goods and services. As sensors are getting more accurate, cheaper, and have lower time responses, 5G networks are being integrated, and more industrial equipment and machinery are becoming available; hence, various sectors, including the manufacturing sector, are going through a significant period of transition right now. Additionally, the emergence of the cloud enables modern production models that use the cloud (both internal and external services), networks, and systems to leverage the cloud's low cost, scalability, increased computational power, real-time communication, and data transfer capabilities to create much smarter and more autonomous systems. We discuss the ways in which decentralized networks that make use of protocols help to achieve decentralization and how network meshes can grow to make things more secure, reliable, and cohere with these technologies, which are not going away anytime soon. We emphasize the significance of new design in regard to cybersecurity, data integrity, and storage by using straightforward examples that have the potential to lead to the excellence of distributed systems. This groundbreaking paper delves deep into the world of industrial automation and explores the possibilities to adopt blockchain for developing solutions for smart cities, smart homes, healthcare, smart agriculture, autonomous vehicles, and supply chain management within Industry 5.0. With an in-depth examination of various consensus mechanisms, readers gain a comprehensive understanding of the latest developments in this field. The paper also explores the current issues and challenges associated with blockchain adaptation for industrial automation and provides a thorough comparison of the available consensus, enabling end customers to select the most suitable one based on its unique advantages. Case studies highlight how to enable the adoption of blockchain in Industry 5.0 solutions effectively and efficiently, offering valuable insights into the potential challenges that lie ahead, particularly for smart industrial applications.}, } @article {pmid38004827, year = {2023}, author = {Kim, J and Koh, H}, title = {MiTree: A Unified Web Cloud Analytic Platform for User-Friendly and Interpretable Microbiome Data Mining Using Tree-Based Methods.}, journal = {Microorganisms}, volume = {11}, number = {11}, pages = {}, pmid = {38004827}, issn = {2076-2607}, support = {2021R1C1C1013861//National Research Foundation of Korea/ ; }, abstract = {The advent of next-generation sequencing has greatly accelerated the field of human microbiome studies. Currently, investigators are seeking, struggling and competing to find new ways to diagnose, treat and prevent human diseases through the human microbiome. Machine learning is a promising approach to help such an effort, especially due to the high complexity of microbiome data. However, many of the current machine learning algorithms are in a "black box", i.e., they are difficult to understand and interpret. In addition, clinicians, public health practitioners and biologists are not usually skilled at computer programming, and they do not always have high-end computing devices. Thus, in this study, we introduce a unified web cloud analytic platform, named MiTree, for user-friendly and interpretable microbiome data mining. MiTree employs tree-based learning methods, including decision tree, random forest and gradient boosting, that are well understood and suited to human microbiome studies. We also stress that MiTree can address both classification and regression problems through covariate-adjusted or unadjusted analysis. MiTree should serve as an easy-to-use and interpretable data mining tool for microbiome-based disease prediction modeling, and should provide new insights into microbiome-based diagnostics, treatment and prevention. MiTree is an open-source software that is available on our web server.}, } @article {pmid37987882, year = {2023}, author = {Bahadur, FT and Shah, SR and Nidamanuri, RR}, title = {Applications of remote sensing vis-à-vis machine learning in air quality monitoring and modelling: a review.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {12}, pages = {1502}, pmid = {37987882}, issn = {1573-2959}, mesh = {*Artificial Intelligence ; Remote Sensing Technology ; Environmental Monitoring ; *Air Pollution ; Machine Learning ; }, abstract = {Environmental contamination especially air pollution is an exponentially growing menace requiring immediate attention, as it lingers on with the associated risks of health, economic and ecological crisis. The special focus of this study is on the advances in Air Quality (AQ) monitoring using modern sensors, integrated monitoring systems, remote sensing and the usage of Machine Learning (ML), Deep Learning (DL) algorithms, artificial neural networks, recent computational techniques, hybridizing techniques and different platforms available for AQ modelling. The modern world is data-driven, where critical decisions are taken based on the available and accessible data. Today's data analytics is a consequence of the information explosion we have reached. The current research also tends to re-evaluate its scope with data analytics. The emergence of artificial intelligence and machine learning in the research scenario has radically changed the methodologies and approaches of modern research. The aim of this review is to assess the impact of data analytics such as ML/DL frameworks, data integration techniques, advanced statistical modelling, cloud computing platforms and constantly improving optimization algorithms on AQ research. The usage of remote sensing in AQ monitoring along with providing enormous datasets is constantly filling the spatial gaps of ground stations, as the long-term air pollutant dynamics is best captured by the panoramic view of satellites. Remote sensing coupled with the techniques of ML/DL has the most impact in shaping the modern trends in AQ research. Current standing of research in this field, emerging trends and future scope are also discussed.}, } @article {pmid37979853, year = {2024}, author = {Wilkinson, R and Mleczko, MM and Brewin, RJW and Gaston, KJ and Mueller, M and Shutler, JD and Yan, X and Anderson, K}, title = {Environmental impacts of earth observation data in the constellation and cloud computing era.}, journal = {The Science of the total environment}, volume = {909}, number = {}, pages = {168584}, doi = {10.1016/j.scitotenv.2023.168584}, pmid = {37979853}, issn = {1879-1026}, abstract = {Numbers of Earth Observation (EO) satellites have increased exponentially over the past decade reaching the current population of 1193 (January 2023). Consequently, EO data volumes have mushroomed and data storage and processing have migrated to the cloud. Whilst attention has been given to the launch and in-orbit environmental impacts of satellites, EO data environmental footprints have been overlooked. These issues require urgent attention given data centre water and energy consumption, high carbon emissions for computer component manufacture, and difficulty of recycling computer components. Doing so is essential if the environmental good of EO is to withstand scrutiny. We provide the first assessment of the EO data life-cycle and estimate that the current size of the global EO data collection is ~807 PB, increasing by ~100 PB/year. Storage of this data volume generates annual CO2 equivalent emissions of 4101 t. Major state-funded EO providers use 57 of their own data centres globally, and a further 178 private cloud services, with considerable duplication of datasets across repositories. We explore scenarios for the environmental cost of performing EO functions on the cloud compared to desktop machines. A simple band arithmetic function applied to a Landsat 9 scene using Google Earth Engine (GEE) generated CO2 equivalent (e) emissions of 0.042-0.69 g CO2e (locally) and 0.13-0.45 g CO2e (European data centre; values multiply by nine for Australian data centre). Computation-based emissions scale rapidly for more intense processes and when testing code. When using cloud services such as GEE, users have no choice about the data centre used and we push for EO providers to be more transparent about the location-specific impacts of EO work, and to provide tools for measuring the environmental cost of cloud computation. The EO community as a whole needs to critically consider the broad suite of EO data life-cycle impacts.}, } @article {pmid37979340, year = {2023}, author = {Tomassini, S and Falcionelli, N and Bruschi, G and Sbrollini, A and Marini, N and Sernani, P and Morettini, M and Müller, H and Dragoni, AF and Burattini, L}, title = {On-cloud decision-support system for non-small cell lung cancer histology characterization from thorax computed tomography scans.}, journal = {Computerized medical imaging and graphics : the official journal of the Computerized Medical Imaging Society}, volume = {110}, number = {}, pages = {102310}, doi = {10.1016/j.compmedimag.2023.102310}, pmid = {37979340}, issn = {1879-0771}, mesh = {Humans ; *Carcinoma, Non-Small-Cell Lung/diagnostic imaging/pathology ; *Lung Neoplasms/diagnostic imaging/pathology ; *Carcinoma, Squamous Cell/pathology ; Tomography, X-Ray Computed/methods ; ROC Curve ; }, abstract = {Non-Small Cell Lung Cancer (NSCLC) accounts for about 85% of all lung cancers. Developing non-invasive techniques for NSCLC histology characterization may not only help clinicians to make targeted therapeutic treatments but also prevent subjects from undergoing lung biopsy, which is challenging and could lead to clinical implications. The motivation behind the study presented here is to develop an advanced on-cloud decision-support system, named LUCY, for non-small cell LUng Cancer histologY characterization directly from thorax Computed Tomography (CT) scans. This aim was pursued by selecting thorax CT scans of 182 LUng ADenocarcinoma (LUAD) and 186 LUng Squamous Cell carcinoma (LUSC) subjects from four openly accessible data collections (NSCLC-Radiomics, NSCLC-Radiogenomics, NSCLC-Radiomics-Genomics and TCGA-LUAD), in addition to the implementation and comparison of two end-to-end neural networks (the core layer of whom is a convolutional long short-term memory layer), the performance evaluation on test dataset (NSCLC-Radiomics-Genomics) from a subject-level perspective in relation to NSCLC histological subtype location and grade, and the dynamic visual interpretation of the achieved results by producing and analyzing one heatmap video for each scan. LUCY reached test Area Under the receiver operating characteristic Curve (AUC) values above 77% in all NSCLC histological subtype location and grade groups, and a best AUC value of 97% on the entire dataset reserved for testing, proving high generalizability to heterogeneous data and robustness. Thus, LUCY is a clinically-useful decision-support system able to timely, non-invasively and reliably provide visually-understandable predictions on LUAD and LUSC subjects in relation to clinically-relevant information.}, } @article {pmid37961077, year = {2023}, author = {Kwabla, W and Dinc, F and Oumimoun, K and Kockara, S and Halic, T and Demirel, D and Arikatla, S and Ahmadi, S}, title = {Evaluation of WebRTC in the Cloud for Surgical Simulations: A case study on Virtual Rotator Cuff Arthroscopic Skill Trainer (ViRCAST).}, journal = {Learning and collaboration technologies : 10th International Conference, LCT 2023, held as part of the 25th HCI International Conference, HCII 2023, Copenhagen, Denmark, July 23-28, 2023, proceedings. Part II. LCT (Conference) (10th : 2...}, volume = {14041}, number = {}, pages = {127-143}, pmid = {37961077}, support = {R01 EB005807/EB/NIBIB NIH HHS/United States ; R01 EB025241/EB/NIBIB NIH HHS/United States ; P20 GM103429/GM/NIGMS NIH HHS/United States ; R44 AR075481/AR/NIAMS NIH HHS/United States ; R01 EB033674/EB/NIBIB NIH HHS/United States ; }, abstract = {Web Real-Time Communication (WebRTC) is an open-source technology which enables remote peer-to-peer video and audio connection. It has quickly become the new standard for real-time communications over the web and is commonly used as a video conferencing platform. In this study, we present a different application domain which may greatly benefit from WebRTC technology, that is virtual reality (VR) based surgical simulations. Virtual Rotator Cuff Arthroscopic Skill Trainer (ViRCAST) is our testing platform that we completed preliminary feasibility studies for WebRTC. Since the elasticity of cloud computing provides the ability to meet possible future hardware/software requirements and demand growth, ViRCAST is deployed in a cloud environment. Additionally, in order to have plausible simulations and interactions, any VR-based surgery simulator must have haptic feedback. Therefore, we implemented an interface to WebRTC for integrating haptic devices. We tested ViRCAST on Google cloud through haptic-integrated WebRTC at various client configurations. Our experiments showed that WebRTC with cloud and haptic integrations is a feasible solution for VR-based surgery simulators. From our experiments, the WebRTC integrated simulation produced an average frame rate of 33 fps, and the hardware integration produced an average lag of 0.7 milliseconds in real-time.}, } @article {pmid37960657, year = {2023}, author = {Farooq, MS and Abdullah, M and Riaz, S and Alvi, A and Rustam, F and Flores, MAL and Galán, JC and Samad, MA and Ashraf, I}, title = {A Survey on the Role of Industrial IoT in Manufacturing for Implementation of Smart Industry.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960657}, issn = {1424-8220}, support = {N/A//the European University of the Atlantic/ ; }, abstract = {The Internet of Things (IoT) is an innovative technology that presents effective and attractive solutions to revolutionize various domains. Numerous solutions based on the IoT have been designed to automate industries, manufacturing units, and production houses to mitigate human involvement in hazardous operations. Owing to the large number of publications in the IoT paradigm, in particular those focusing on industrial IoT (IIoT), a comprehensive survey is significantly important to provide insights into recent developments. This survey presents the workings of the IoT-based smart industry and its major components and proposes the state-of-the-art network infrastructure, including structured layers of IIoT architecture, IIoT network topologies, protocols, and devices. Furthermore, the relationship between IoT-based industries and key technologies is analyzed, including big data storage, cloud computing, and data analytics. A detailed discussion of IIoT-based application domains, smartphone application solutions, and sensor- and device-based IIoT applications developed for the management of the smart industry is also presented. Consequently, IIoT-based security attacks and their relevant countermeasures are highlighted. By analyzing the essential components, their security risks, and available solutions, future research directions regarding the implementation of IIoT are outlined. Finally, a comprehensive discussion of open research challenges and issues related to the smart industry is also presented.}, } @article {pmid37960612, year = {2023}, author = {Leng, J and Chen, X and Zhao, J and Wang, C and Zhu, J and Yan, Y and Zhao, J and Shi, W and Zhu, Z and Jiang, X and Lou, Y and Feng, C and Yang, Q and Xu, F}, title = {A Light Vehicle License-Plate-Recognition System Based on Hybrid Edge-Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960612}, issn = {1424-8220}, support = {No.ZR2022MF289//Shandong Provincial Natural Science Foundation/ ; ZR2019MA037//Shandong Provincial Natural Science Foundation/ ; No.62271293//National Natural Science Foundation of China/ ; No.2021GXRC071//2021 Jinan City "20 New Universities" Support Project/ ; No.2021yb08//Qilu University of Technology 2021 Campus General Teaching Reform Project/ ; No. P202204//Qilu University of Technology 2022 Talent Training and Teaching Reform Project/ ; }, abstract = {With the world moving towards low-carbon and environmentally friendly development, the rapid growth of new-energy vehicles is evident. The utilization of deep-learning-based license-plate-recognition (LPR) algorithms has become widespread. However, existing LPR systems have difficulty achieving timely, effective, and energy-saving recognition due to their inherent limitations such as high latency and energy consumption. An innovative Edge-LPR system that leverages edge computing and lightweight network models is proposed in this paper. With the help of this technology, the excessive reliance on the computational capacity and the uneven implementation of resources of cloud computing can be successfully mitigated. The system is specifically a simple LPR. Channel pruning was used to reconstruct the backbone layer, reduce the network model parameters, and effectively reduce the GPU resource consumption. By utilizing the computing resources of the Intel second-generation computing stick, the network models were deployed on edge gateways to detect license plates directly. The reliability and effectiveness of the Edge-LPR system were validated through the experimental analysis of the CCPD standard dataset and real-time monitoring dataset from charging stations. The experimental results from the CCPD common dataset demonstrated that the network's total number of parameters was only 0.606 MB, with an impressive accuracy rate of 97%.}, } @article {pmid37960584, year = {2023}, author = {Younas, MI and Iqbal, MJ and Aziz, A and Sodhro, AH}, title = {Toward QoS Monitoring in IoT Edge Devices Driven Healthcare-A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960584}, issn = {1424-8220}, support = {2020VBC0002//PIFI 2020 (2020VBC0002), China/ ; }, mesh = {Humans ; *Artificial Intelligence ; Cloud Computing ; *Disasters ; Industry ; Delivery of Health Care ; }, abstract = {Smart healthcare is altering the delivery of healthcare by combining the benefits of IoT, mobile, and cloud computing. Cloud computing has tremendously helped the health industry connect healthcare facilities, caregivers, and patients for information sharing. The main drivers for implementing effective healthcare systems are low latency and faster response times. Thus, quick responses among healthcare organizations are important in general, but in an emergency, significant latency at different stakeholders might result in disastrous situations. Thus, cutting-edge approaches like edge computing and artificial intelligence (AI) can deal with such problems. A packet cannot be sent from one location to another unless the "quality of service" (QoS) specifications are met. The term QoS refers to how well a service works for users. QoS parameters like throughput, bandwidth, transmission delay, availability, jitter, latency, and packet loss are crucial in this regard. Our focus is on the individual devices present at different levels of the smart healthcare infrastructure and the QoS requirements of the healthcare system as a whole. The contribution of this paper is five-fold: first, a novel pre-SLR method for comprehensive keyword research on subject-related themes for mining pertinent research papers for quality SLR; second, SLR on QoS improvement in smart healthcare apps; third a review of several QoS techniques used in current smart healthcare apps; fourth, the examination of the most important QoS measures in contemporary smart healthcare apps; fifth, offering solutions to the problems encountered in delivering QoS in smart healthcare IoT applications to improve healthcare services.}, } @article {pmid37960453, year = {2023}, author = {Abbas, Q and Ahmad, G and Alyas, T and Alghamdi, T and Alsaawy, Y and Alzahrani, A}, title = {Revolutionizing Urban Mobility: IoT-Enhanced Autonomous Parking Solutions with Transfer Learning for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {21}, pages = {}, pmid = {37960453}, issn = {1424-8220}, abstract = {Smart cities have emerged as a specialized domain encompassing various technologies, transitioning from civil engineering to technology-driven solutions. The accelerated development of technologies, such as the Internet of Things (IoT), software-defined networks (SDN), 5G, artificial intelligence, cognitive science, and analytics, has played a crucial role in providing solutions for smart cities. Smart cities heavily rely on devices, ad hoc networks, and cloud computing to integrate and streamline various activities towards common goals. However, the complexity arising from multiple cloud service providers offering myriad services necessitates a stable and coherent platform for sustainable operations. The Smart City Operational Platform Ecology (SCOPE) model has been developed to address the growing demands, and incorporates machine learning, cognitive correlates, ecosystem management, and security. SCOPE provides an ecosystem that establishes a balance for achieving sustainability and progress. In the context of smart cities, Internet of Things (IoT) devices play a significant role in enabling automation and data capture. This research paper focuses on a specific module of SCOPE, which deals with data processing and learning mechanisms for object identification in smart cities. Specifically, it presents a car parking system that utilizes smart identification techniques to identify vacant slots. The learning controller in SCOPE employs a two-tier approach, and utilizes two different models, namely Alex Net and YOLO, to ensure procedural stability and improvement.}, } @article {pmid37954389, year = {2023}, author = {Biswas, J and Jobaer, MA and Haque, SF and Islam Shozib, MS and Limon, ZA}, title = {Mapping and monitoring land use land cover dynamics employing Google Earth Engine and machine learning algorithms on Chattogram, Bangladesh.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21245}, pmid = {37954389}, issn = {2405-8440}, abstract = {Land use land cover change (LULC) significantly impacts urban sustainability, urban planning, climate change, natural resource management, and biodiversity. The Chattogram Metropolitan Area (CMA) has been going through rapid urbanization, which has impacted the LULC transformation and accelerated the growth of urban sprawl and unplanned development. To map those urban sprawls and natural resources depletion, this study aims to monitor the LULC change using Landsat satellite imagery from 2003 to 2023 in the cloud-based remote sensing platform Google Earth Engine (GEE). LULC has been classified into five distinct classes: waterbody, build-up, bare land, dense vegetation, and cropland, employing four machine learning algorithms (random forest, gradient tree boost, classification & regression tree, and support vector machine) in the GEE platform. The overall accuracy (kappa statistics) and the receiver operating characteristic (ROC) curve have demonstrated satisfactory results. The results indicate that the CART model outperforms other LULC models when considering efficiency and accuracy in the designated study region. The analysis of LULC conversions revealed notable trends, patterns, and magnitudes across all periods: 2003-2013, 2013-2023, and 2003-2023. The expansion of unregulated built-up areas and the decline of croplands emerged as primary concerns. However, there was a positive indication of a significant increase in dense vegetation within the study area over the 20 years.}, } @article {pmid37946898, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Sports Training Teaching Device Based on Big Data and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9795604}, pmid = {37946898}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/7339486.].}, } @article {pmid37946860, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Real-Time Detection of Body Nutrition in Sports Training Based on Cloud Computing and Somatosensory Network.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9784817}, pmid = {37946860}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/9911905.].}, } @article {pmid37942151, year = {2023}, author = {Faruqui, N and Yousuf, MA and Kateb, FA and Abdul Hamid, M and Monowar, MM}, title = {Healthcare As a Service (HAAS): CNN-based cloud computing model for ubiquitous access to lung cancer diagnosis.}, journal = {Heliyon}, volume = {9}, number = {11}, pages = {e21520}, pmid = {37942151}, issn = {2405-8440}, abstract = {The field of automated lung cancer diagnosis using Computed Tomography (CT) scans has been significantly advanced by the precise predictions offered by Convolutional Neural Network (CNN)-based classifiers. Critical areas of study include improving image quality, optimizing learning algorithms, and enhancing diagnostic accuracy. To facilitate a seamless transition from research laboratories to real-world applications, it is crucial to improve the technology's usability-a factor often neglected in current state-of-the-art research. Yet, current state-of-the-art research in this field frequently overlooks the need for expediting this process. This paper introduces Healthcare-As-A-Service (HAAS), an innovative concept inspired by Software-As-A-Service (SAAS) within the cloud computing paradigm. As a comprehensive lung cancer diagnosis service system, HAAS has the potential to reduce lung cancer mortality rates by providing early diagnosis opportunities to everyone. We present HAASNet, a cloud-compatible CNN that boasts an accuracy rate of 96.07%. By integrating HAASNet predictions with physio-symptomatic data from the Internet of Medical Things (IoMT), the proposed HAAS model generates accurate and reliable lung cancer diagnosis reports. Leveraging IoMT and cloud technology, the proposed service is globally accessible via the Internet, transcending geographic boundaries. This groundbreaking lung cancer diagnosis service achieves average precision, recall, and F1-scores of 96.47%, 95.39%, and 94.81%, respectively.}, } @article {pmid37941779, year = {2023}, author = {Wang, C and Dai, W}, title = {Lung nodule segmentation via semi-residual multi-resolution neural networks.}, journal = {Open life sciences}, volume = {18}, number = {1}, pages = {20220727}, pmid = {37941779}, issn = {2391-5412}, abstract = {The integration of deep neural networks and cloud computing has become increasingly prevalent within the domain of medical image processing, facilitated by the recent strides in neural network theory and the advent of the internet of things (IoTs). This juncture has led to the emergence of numerous image segmentation networks and innovative solutions that facilitate medical practitioners in diagnosing lung cancer. Within the contours of this study, we present an end-to-end neural network model, christened as the "semi-residual Multi-resolution Convolutional Neural Network" (semi-residual MCNN), devised to engender precise lung nodule segmentation maps within the milieu of cloud computing. Central to the architecture are three pivotal features, each coalescing to effectuate a notable enhancement in predictive accuracy: the incorporation of semi-residual building blocks, the deployment of group normalization techniques, and the orchestration of multi-resolution output heads. This innovative model is systematically subjected to rigorous training and testing regimes, using the LIDC-IDRI dataset - a widely embraced and accessible repository - comprising a diverse ensemble of 1,018 distinct lung CT images tailored to the realm of lung nodule segmentation.}, } @article {pmid37937074, year = {2023}, author = {Wadford, DA and Baumrind, N and Baylis, EF and Bell, JM and Bouchard, EL and Crumpler, M and Foote, EM and Gilliam, S and Glaser, CA and Hacker, JK and Ledin, K and Messenger, SL and Morales, C and Smith, EA and Sevinsky, JR and Corbett-Detig, RB and DeRisi, J and Jacobson, K}, title = {Implementation of California COVIDNet - a multi-sector collaboration for statewide SARS-CoV-2 genomic surveillance.}, journal = {Frontiers in public health}, volume = {11}, number = {}, pages = {1249614}, pmid = {37937074}, issn = {2296-2565}, support = {U01 CK000539/CK/NCEZID CDC HHS/United States ; U01CK000539/ACL/ACL HHS/United States ; }, mesh = {Humans ; *SARS-CoV-2/genetics ; *COVID-19/epidemiology ; Genomics ; California/epidemiology ; Data Management ; }, abstract = {INTRODUCTION: The SARS-CoV-2 pandemic represented a formidable scientific and technological challenge to public health due to its rapid spread and evolution. To meet these challenges and to characterize the virus over time, the State of California established the California SARS-CoV-2 Whole Genome Sequencing (WGS) Initiative, or "California COVIDNet". This initiative constituted an unprecedented multi-sector collaborative effort to achieve large-scale genomic surveillance of SARS-CoV-2 across California to monitor the spread of variants within the state, to detect new and emerging variants, and to characterize outbreaks in congregate, workplace, and other settings.

METHODS: California COVIDNet consists of 50 laboratory partners that include public health laboratories, private clinical diagnostic laboratories, and academic sequencing facilities as well as expert advisors, scientists, consultants, and contractors. Data management, sample sourcing and processing, and computational infrastructure were major challenges that had to be resolved in the midst of the pandemic chaos in order to conduct SARS-CoV-2 genomic surveillance. Data management, storage, and analytics needs were addressed with both conventional database applications and newer cloud-based data solutions, which also fulfilled computational requirements.

RESULTS: Representative and randomly selected samples were sourced from state-sponsored community testing sites. Since March of 2021, California COVIDNet partners have contributed more than 450,000 SARS-CoV-2 genomes sequenced from remnant samples from both molecular and antigen tests. Combined with genomes from CDC-contracted WGS labs, there are currently nearly 800,000 genomes from all 61 local health jurisdictions (LHJs) in California in the COVIDNet sequence database. More than 5% of all reported positive tests in the state have been sequenced, with similar rates of sequencing across 5 major geographic regions in the state.

DISCUSSION: Implementation of California COVIDNet revealed challenges and limitations in the public health system. These were overcome by engaging in novel partnerships that established a successful genomic surveillance program which provided valuable data to inform the COVID-19 public health response in California. Significantly, California COVIDNet has provided a foundational data framework and computational infrastructure needed to respond to future public health crises.}, } @article {pmid37933859, year = {2024}, author = {Varadi, M and Bertoni, D and Magana, P and Paramval, U and Pidruchna, I and Radhakrishnan, M and Tsenkov, M and Nair, S and Mirdita, M and Yeo, J and Kovalevskiy, O and Tunyasuvunakool, K and Laydon, A and Žídek, A and Tomlinson, H and Hariharan, D and Abrahamson, J and Green, T and Jumper, J and Birney, E and Steinegger, M and Hassabis, D and Velankar, S}, title = {AlphaFold Protein Structure Database in 2024: providing structure coverage for over 214 million protein sequences.}, journal = {Nucleic acids research}, volume = {52}, number = {D1}, pages = {D368-D375}, pmid = {37933859}, issn = {1362-4962}, support = {//Google DeepMind/ ; 2019R1A6A1A10073437//National Research Foundation of Korea/ ; //Samsung DS Research Fund/ ; //Seoul National University/ ; RS-2023-00250470//National Research Foundation of Korea/ ; }, mesh = {Amino Acid Sequence ; *Artificial Intelligence ; Databases, Protein ; *Proteome ; Search Engine ; Proteins/chemistry ; *Protein Structure, Secondary ; }, abstract = {The AlphaFold Database Protein Structure Database (AlphaFold DB, https://alphafold.ebi.ac.uk) has significantly impacted structural biology by amassing over 214 million predicted protein structures, expanding from the initial 300k structures released in 2021. Enabled by the groundbreaking AlphaFold2 artificial intelligence (AI) system, the predictions archived in AlphaFold DB have been integrated into primary data resources such as PDB, UniProt, Ensembl, InterPro and MobiDB. Our manuscript details subsequent enhancements in data archiving, covering successive releases encompassing model organisms, global health proteomes, Swiss-Prot integration, and a host of curated protein datasets. We detail the data access mechanisms of AlphaFold DB, from direct file access via FTP to advanced queries using Google Cloud Public Datasets and the programmatic access endpoints of the database. We also discuss the improvements and services added since its initial release, including enhancements to the Predicted Aligned Error viewer, customisation options for the 3D viewer, and improvements in the search engine of AlphaFold DB.}, } @article {pmid37932347, year = {2023}, author = {Bao, J and Wu, C and Lin, Y and Zhong, L and Chen, X and Yin, R}, title = {A scalable approach to optimize traffic signal control with federated reinforcement learning.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {19184}, pmid = {37932347}, issn = {2045-2322}, abstract = {Intelligent Transportation has seen significant advancements with Deep Learning and the Internet of Things, making Traffic Signal Control (TSC) research crucial for reducing congestion, travel time, emissions, and energy consumption. Reinforcement Learning (RL) has emerged as the primary method for TSC, but centralized learning poses communication and computing challenges, while distributed learning struggles to adapt across intersections. This paper presents a novel approach using Federated Learning (FL)-based RL for TSC. FL integrates knowledge from local agents into a global model, overcoming intersection variations with a unified agent state structure. To endow the model with the capacity to globally represent the TSC task while preserving the distinctive feature information inherent to each intersection, a segment of the RL neural network is aggregated to the cloud, and the remaining layers undergo fine-tuning upon convergence of the model training process. Extensive experiments demonstrate reduced queuing and waiting times globally, and the successful scalability of the proposed model is validated on a real-world traffic network in Monaco, showing its potential for new intersections.}, } @article {pmid37932308, year = {2023}, author = {Mangalampalli, S and Karri, GR and Mohanty, SN and Ali, S and Khan, MI and Abduvalieva, D and Awwad, FA and Ismail, EAA}, title = {Fault tolerant trust based task scheduler using Harris Hawks optimization and deep reinforcement learning in multi cloud environment.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {19179}, pmid = {37932308}, issn = {2045-2322}, abstract = {Cloud Computing model provides on demand delivery of seamless services to customers around the world yet single point of failures occurs in cloud model due to improper assignment of tasks to precise virtual machines which leads to increase in rate of failures which effects SLA based trust parameters (Availability, success rate, turnaround efficiency) upon which impacts trust on cloud provider. In this paper, we proposed a task scheduling algorithm which captures priorities of all tasks, virtual resources from task manager which comes onto cloud application console are fed to task scheduler which takes scheduling decisions based on hybridization of both Harris hawk optimization and ML based reinforcement algorithms to enhance the scheduling process. Task scheduling in this research performed in two phases i.e. Task selection and task mapping phases. In task selection phase, all incoming priorities of tasks, VMs are captured and generates schedules using Harris hawks optimization. In task mapping phase, generated schedules are optimized using a DQN model which is based on deep reinforcement learning. In this research, we used multi cloud environment to tackle availability of VMs if there is an increase in upcoming tasks dynamically and migrate tasks to one cloud to another to mitigate migration time. Extensive simulations are conducted in Cloudsim and workload generated by fabricated datasets and realtime synthetic workloads from NASA, HPC2N are used to check efficacy of our proposed scheduler (FTTHDRL). It compared against existing task schedulers i.e. MOABCQ, RATS-HM, AINN-BPSO approaches and our proposed FTTHDRL outperforms existing mechanisms by minimizing rate of failures, resource cost, improved SLA based trust parameters.}, } @article {pmid37928198, year = {2023}, author = {Mee, L and Barribeau, SM}, title = {Influence of social lifestyles on host-microbe symbioses in the bees.}, journal = {Ecology and evolution}, volume = {13}, number = {11}, pages = {e10679}, pmid = {37928198}, issn = {2045-7758}, abstract = {Microbiomes are increasingly recognised as critical for the health of an organism. In eusocial insect societies, frequent social interactions allow for high-fidelity transmission of microbes across generations, leading to closer host-microbe coevolution. The microbial communities of bees with other social lifestyles are less studied, and few comparisons have been made between taxa that vary in social structure. To address this gap, we leveraged a cloud-computing resource and publicly available transcriptomic data to conduct a survey of microbial diversity in bee samples from a variety of social lifestyles and taxa. We consistently recover the core microbes of well-studied corbiculate bees, supporting this method's ability to accurately characterise microbial communities. We find that the bacterial communities of bees are influenced by host location, phylogeny and social lifestyle, although no clear effect was found for fungal or viral microbial communities. Bee genera with more complex societies tend to harbour more diverse microbes, with Wolbachia detected more commonly in solitary tribes. We present a description of the microbiota of Euglossine bees and find that they do not share the "corbiculate core" microbiome. Notably, we find that bacteria with known anti-pathogenic properties are present across social bee genera, suggesting that symbioses that enhance host immunity are important with higher sociality. Our approach provides an inexpensive means of exploring microbiomes of a given taxa and identifying avenues for further research. These findings contribute to our understanding of the relationships between bees and their associated microbial communities, highlighting the importance of considering microbiome dynamics in investigations of bee health.}, } @article {pmid37917778, year = {2023}, author = {Qian, J and She, Q}, title = {The impact of corporate digital transformation on the export product quality: Evidence from Chinese enterprises.}, journal = {PloS one}, volume = {18}, number = {11}, pages = {e0293461}, pmid = {37917778}, issn = {1932-6203}, abstract = {The digital economy has become a driving force in the rapid development of the global economy and the promotion of export trade. Pivotal in its advent, the digital transformation of enterprises utilizes cloud computing, big data, artificial intelligence, and other digital technologies to provide an impetus for evolution and transformation in various industries and fields. in enhancing quality and efficiency. This has been critical for enhancing both quality and efficiency in enterprises based in the People's Republic of China. Through the available data on its listed enterprises, this paper measures their digital transformation through a textual analysis and examines how this transformation influences their export product quality. We then explore the possible mechanisms at work in this influence from the perspective of enterprise heterogeneity. The results find that: (1) Digital transformation significantly enhances the export product quality in an enterprises, and the empirical findings still hold after a series of robustness tests; (2) Further mechanism analysis reveals that the digital transformation can positively affect export product quality through the two mechanisms of process productivity (φ), the ability to produce output using fewer variable inputs, and product productivity (ξ), the ability to produce quality with fewer fixed outlays; (3) In terms of enterprise heterogeneity, the impact of digital transformation on export product quality is significant for enterprises engaged in general trade or high-tech industries and those with strong corporate governance. In terms of heterogeneity in digital transformation of enterprise and the regional digital infrastructure level, the higher the level of digital transformation and regional digital infrastructure, the greater the impact of digital transformation on export product quality. This paper has practical implications for public policies that offer vital aid to enterprises as they seek digital transformation to remain sync with the digital economy, upgrade their product quality, and drive the sustainable, high-quality, and healthy development of their nation's economy.}, } @article {pmid37905003, year = {2023}, author = {Copeland, CJ and Roddy, JW and Schmidt, AK and Secor, PR and Wheeler, TJ}, title = {VIBES: A Workflow for Annotating and Visualizing Viral Sequences Integrated into Bacterial Genomes.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37905003}, support = {R01 AI138981/AI/NIAID NIH HHS/United States ; R01 GM132600/GM/NIGMS NIH HHS/United States ; }, abstract = {Bacteriophages are viruses that infect bacteria. Many bacteriophages integrate their genomes into the bacterial chromosome and become prophages. Prophages may substantially burden or benefit host bacteria fitness, acting in some cases as parasites and in others as mutualists, and have been demonstrated to increase host virulence. The increasing ease of bacterial genome sequencing provides an opportunity to deeply explore prophage prevalence and insertion sites. Here we present VIBES, a workflow intended to automate prophage annotation in complete bacterial genome sequences. VIBES provides additional context to prophage annotations by annotating bacterial genes and viral proteins in user-provided bacterial and viral genomes. The VIBES pipeline is implemented as a Nextflow-driven workflow, providing a simple, unified interface for execution on local, cluster, and cloud computing environments. For each step of the pipeline, a container including all necessary software dependencies is provided. VIBES produces results in simple tab separated format and generates intuitive and interactive visualizations for data exploration. Despite VIBES' primary emphasis on prophage annotation, its generic alignment-based design allows it to be deployed as a general-purpose sequence similarity search manager. We demonstrate the utility of the VIBES prophage annotation workflow by searching for 178 Pf phage genomes across 1,072 Pseudomonas spp. genomes. VIBES software is available at https://github.com/TravisWheelerLab/VIBES.}, } @article {pmid37899771, year = {2023}, author = {Cai, T and Herner, K and Yang, T and Wang, M and Acosta Flechas, M and Harris, P and Holzman, B and Pedro, K and Tran, N}, title = {Accelerating Machine Learning Inference with GPUs in ProtoDUNE Data Processing.}, journal = {Computing and software for big science}, volume = {7}, number = {1}, pages = {11}, pmid = {37899771}, issn = {2510-2044}, abstract = {We study the performance of a cloud-based GPU-accelerated inference server to speed up event reconstruction in neutrino data batch jobs. Using detector data from the ProtoDUNE experiment and employing the standard DUNE grid job submission tools, we attempt to reprocess the data by running several thousand concurrent grid jobs, a rate we expect to be typical of current and future neutrino physics experiments. We process most of the dataset with the GPU version of our processing algorithm and the remainder with the CPU version for timing comparisons. We find that a 100-GPU cloud-based server is able to easily meet the processing demand, and that using the GPU version of the event processing algorithm is two times faster than processing these data with the CPU version when comparing to the newest CPUs in our sample. The amount of data transferred to the inference server during the GPU runs can overwhelm even the highest-bandwidth network switches, however, unless care is taken to observe network facility limits or otherwise distribute the jobs to multiple sites. We discuss the lessons learned from this processing campaign and several avenues for future improvements.}, } @article {pmid37898096, year = {2023}, author = {Horsley, JJ and Thomas, RH and Chowdhury, FA and Diehl, B and McEvoy, AW and Miserocchi, A and de Tisi, J and Vos, SB and Walker, MC and Winston, GP and Duncan, JS and Wang, Y and Taylor, PN}, title = {Complementary structural and functional abnormalities to localise epileptogenic tissue.}, journal = {EBioMedicine}, volume = {97}, number = {}, pages = {104848}, pmid = {37898096}, issn = {2352-3964}, support = {/WT_/Wellcome Trust/United Kingdom ; MR/T04294X/1/MRC_/Medical Research Council/United Kingdom ; U01 NS090407/NS/NINDS NIH HHS/United States ; }, mesh = {Humans ; Retrospective Studies ; *Epilepsy/diagnostic imaging/surgery ; Electroencephalography/methods ; Electrocorticography ; *Drug Resistant Epilepsy/surgery ; Seizures ; }, abstract = {BACKGROUND: When investigating suitability for epilepsy surgery, people with drug-refractory focal epilepsy may have intracranial EEG (iEEG) electrodes implanted to localise seizure onset. Diffusion-weighted magnetic resonance imaging (dMRI) may be acquired to identify key white matter tracts for surgical avoidance. Here, we investigate whether structural connectivity abnormalities, inferred from dMRI, may be used in conjunction with functional iEEG abnormalities to aid localisation of the epileptogenic zone (EZ), improving surgical outcomes in epilepsy.

METHODS: We retrospectively investigated data from 43 patients (42% female) with epilepsy who had surgery following iEEG. Twenty-five patients (58%) were free from disabling seizures (ILAE 1 or 2) at one year. Interictal iEEG functional, and dMRI structural connectivity abnormalities were quantified by comparison to a normative map and healthy controls. We explored whether the resection of maximal abnormalities related to improved surgical outcomes, in both modalities individually and concurrently. Additionally, we suggest how connectivity abnormalities may inform the placement of iEEG electrodes pre-surgically using a patient case study.

FINDINGS: Seizure freedom was 15 times more likely in patients with resection of maximal connectivity and iEEG abnormalities (p = 0.008). Both modalities separately distinguished patient surgical outcome groups and when used simultaneously, a decision tree correctly separated 36 of 43 (84%) patients.

INTERPRETATION: Our results suggest that both connectivity and iEEG abnormalities may localise epileptogenic tissue, and that these two modalities may provide complementary information in pre-surgical evaluations.

FUNDING: This research was funded by UKRI, CDT in Cloud Computing for Big Data, NIH, MRC, Wellcome Trust and Epilepsy Research UK.}, } @article {pmid37896735, year = {2023}, author = {Ramzan, M and Shoaib, M and Altaf, A and Arshad, S and Iqbal, F and Castilla, ÁK and Ashraf, I}, title = {Distributed Denial of Service Attack Detection in Network Traffic Using Deep Learning Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896735}, issn = {1424-8220}, support = {N/A//the European University of Atlantic/ ; }, abstract = {Internet security is a major concern these days due to the increasing demand for information technology (IT)-based platforms and cloud computing. With its expansion, the Internet has been facing various types of attacks. Viruses, denial of service (DoS) attacks, distributed DoS (DDoS) attacks, code injection attacks, and spoofing are the most common types of attacks in the modern era. Due to the expansion of IT, the volume and severity of network attacks have been increasing lately. DoS and DDoS are the most frequently reported network traffic attacks. Traditional solutions such as intrusion detection systems and firewalls cannot detect complex DDoS and DoS attacks. With the integration of artificial intelligence-based machine learning and deep learning methods, several novel approaches have been presented for DoS and DDoS detection. In particular, deep learning models have played a crucial role in detecting DDoS attacks due to their exceptional performance. This study adopts deep learning models including recurrent neural network (RNN), long short-term memory (LSTM), and gradient recurrent unit (GRU) to detect DDoS attacks on the most recent dataset, CICDDoS2019, and a comparative analysis is conducted with the CICIDS2017 dataset. The comparative analysis contributes to the development of a competent and accurate method for detecting DDoS attacks with reduced execution time and complexity. The experimental results demonstrate that models perform equally well on the CICDDoS2019 dataset with an accuracy score of 0.99, but there is a difference in execution time, with GRU showing less execution time than those of RNN and LSTM.}, } @article {pmid37896596, year = {2023}, author = {Sheu, RK and Lin, YC and Pardeshi, MS and Huang, CY and Pai, KC and Chen, LC and Huang, CC}, title = {Adaptive Autonomous Protocol for Secured Remote Healthcare Using Fully Homomorphic Encryption (AutoPro-RHC).}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896596}, issn = {1424-8220}, support = {Grant MOST 111-2321-B-075A-001//Ministry of Science and Technology/ ; }, mesh = {Humans ; *Blood Glucose Self-Monitoring ; *Computer Security ; Blood Glucose ; Confidentiality ; Privacy ; Delivery of Health Care ; }, abstract = {The outreach of healthcare services is a challenge to remote areas with affected populations. Fortunately, remote health monitoring (RHM) has improved the hospital service quality and has proved its sustainable growth. However, the absence of security may breach the health insurance portability and accountability act (HIPAA), which has an exclusive set of rules for the privacy of medical data. Therefore, the goal of this work is to design and implement the adaptive Autonomous Protocol (AutoPro) on the patient's remote healthcare (RHC) monitoring data for the hospital using fully homomorphic encryption (FHE). The aim is to perform adaptive autonomous FHE computations on recent RHM data for providing health status reporting and maintaining the confidentiality of every patient. The autonomous protocol works independently within the group of prime hospital servers without the dependency on the third-party system. The adaptiveness of the protocol modes is based on the patient's affected level of slight, medium, and severe cases. Related applications are given as glucose monitoring for diabetes, digital blood pressure for stroke, pulse oximeter for COVID-19, electrocardiogram (ECG) for cardiac arrest, etc. The design for this work consists of an autonomous protocol, hospital servers combining multiple prime/local hospitals, and an algorithm based on fast fully homomorphic encryption over the torus (TFHE) library with a ring-variant by the Gentry, Sahai, and Waters (GSW) scheme. The concrete-ML model used within this work is trained using an open heart disease dataset from the UCI machine learning repository. Preprocessing is performed to recover the lost and incomplete data in the dataset. The concrete-ML model is evaluated both on the workstation and cloud server. Also, the FHE protocol is implemented on the AWS cloud network with performance details. The advantages entail providing confidentiality to the patient's data/report while saving the travel and waiting time for the hospital services. The patient's data will be completely confidential and can receive emergency services immediately. The FHE results show that the highest accuracy is achieved by support vector classification (SVC) of 88% and linear regression (LR) of 86% with the area under curve (AUC) of 91% and 90%, respectively. Ultimately, the FHE-based protocol presents a novel system that is successfully demonstrated on the cloud network.}, } @article {pmid37896541, year = {2023}, author = {Ramachandran, D and Naqi, SM and Perumal, G and Abbas, Q}, title = {DLTN-LOSP: A Novel Deep-Linear-Transition-Network-Based Resource Allocation Model with the Logic Overhead Security Protocol for Cloud Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896541}, issn = {1424-8220}, support = {IMSIU-RP23067//Deanship of Scientific Research at Imam Mohammad Ibn Saud Islamic University (IMSIU)/ ; }, abstract = {Cloud organizations now face a challenge in managing the enormous volume of data and various resources in the cloud due to the rapid growth of the virtualized environment with many service users, ranging from small business owners to large corporations. The performance of cloud computing may suffer from ineffective resource management. As a result, resources must be distributed fairly among various stakeholders without sacrificing the organization's profitability or the satisfaction of its customers. A customer's request cannot be put on hold indefinitely just because the necessary resources are not available on the board. Therefore, a novel cloud resource allocation model incorporating security management is developed in this paper. Here, the Deep Linear Transition Network (DLTN) mechanism is developed for effectively allocating resources to cloud systems. Then, an Adaptive Mongoose Optimization Algorithm (AMOA) is deployed to compute the beamforming solution for reward prediction, which supports the process of resource allocation. Moreover, the Logic Overhead Security Protocol (LOSP) is implemented to ensure secured resource management in the cloud system, where Burrows-Abadi-Needham (BAN) logic is used to predict the agreement logic. During the results analysis, the performance of the proposed DLTN-LOSP model is validated and compared using different metrics such as makespan, processing time, and utilization rate. For system validation and testing, 100 to 500 resources are used in this study, and the results achieved a make-up of 2.3% and a utilization rate of 13 percent. Moreover, the obtained results confirm the superiority of the proposed framework, with better performance outcomes.}, } @article {pmid37896525, year = {2023}, author = {Pierleoni, P and Concetti, R and Belli, A and Palma, L and Marzorati, S and Esposito, M}, title = {A Cloud-IoT Architecture for Latency-Aware Localization in Earthquake Early Warning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {20}, pages = {}, pmid = {37896525}, issn = {1424-8220}, abstract = {An effective earthquake early warning system requires rapid and reliable earthquake source detection. Despite the numerous proposed epicenter localization solutions in recent years, their utilization within the Internet of Things (IoT) framework and integration with IoT-oriented cloud platforms remain underexplored. This paper proposes a complete IoT architecture for earthquake detection, localization, and event notification. The architecture, which has been designed, deployed, and tested on a standard cloud platform, introduces an innovative approach by implementing P-wave "picking" directly on IoT devices, deviating from traditional regional earthquake early warning (EEW) approaches. Pick association, source localization, event declaration, and user notification functionalities are also deployed on the cloud. The cloud integration simplifies the integration of other services in the architecture, such as data storage and device management. Moreover, a localization algorithm based on the hyperbola method is proposed, but here, the time difference of arrival multilateration is applied that is often used in wireless sensor network applications. The results show that the proposed end-to-end architecture is able to provide a quick estimate of the earthquake epicenter location with acceptable errors for an EEW system scenario. Rigorous testing against the standard of reference in Italy for regional EEW showed an overall 3.39 s gain in the system localization speed, thus offering a tangible metric of the efficiency and potential proposed system as an EEW solution.}, } @article {pmid37895480, year = {2023}, author = {Lorenzo-Villegas, DL and Gohil, NV and Lamo, P and Gurajala, S and Bagiu, IC and Vulcanescu, DD and Horhat, FG and Sorop, VB and Diaconu, M and Sorop, MI and Oprisoni, A and Horhat, RM and Susan, M and MohanaSundaram, A}, title = {Innovative Biosensing Approaches for Swift Identification of Candida Species, Intrusive Pathogenic Organisms.}, journal = {Life (Basel, Switzerland)}, volume = {13}, number = {10}, pages = {}, pmid = {37895480}, issn = {2075-1729}, abstract = {Candida is the largest genus of medically significant fungi. Although most of its members are commensals, residing harmlessly in human bodies, some are opportunistic and dangerously invasive. These have the ability to cause severe nosocomial candidiasis and candidemia that affect the viscera and bloodstream. A prompt diagnosis will lead to a successful treatment modality. The smart solution of biosensing technologies for rapid and precise detection of Candida species has made remarkable progress. The development of point-of-care (POC) biosensor devices involves sensor precision down to pico-/femtogram level, cost-effectiveness, portability, rapidity, and user-friendliness. However, futuristic diagnostics will depend on exploiting technologies such as multiplexing for high-throughput screening, CRISPR, artificial intelligence (AI), neural networks, the Internet of Things (IoT), and cloud computing of medical databases. This review gives an insight into different biosensor technologies designed for the detection of medically significant Candida species, especially Candida albicans and C. auris, and their applications in the medical setting.}, } @article {pmid37893978, year = {2023}, author = {Dineva, K and Atanasova, T}, title = {Health Status Classification for Cows Using Machine Learning and Data Management on AWS Cloud.}, journal = {Animals : an open access journal from MDPI}, volume = {13}, number = {20}, pages = {}, pmid = {37893978}, issn = {2076-2615}, support = {Д01-62/18.03.2021///Ministry of Education and Science of the Republic Bulgaria/ ; }, abstract = {The health and welfare of livestock are significant for ensuring the sustainability and profitability of the agricultural industry. Addressing efficient ways to monitor and report the health status of individual cows is critical to prevent outbreaks and maintain herd productivity. The purpose of the study is to develop a machine learning (ML) model to classify the health status of milk cows into three categories. In this research, data are collected from existing non-invasive IoT devices and tools in a dairy farm, monitoring the micro- and macroenvironment of the cow in combination with particular information on age, days in milk, lactation, and more. A workflow of various data-processing methods is systematized and presented to create a complete, efficient, and reusable roadmap for data processing, modeling, and real-world integration. Following the proposed workflow, the data were treated, and five different ML algorithms were trained and tested to select the most descriptive one to monitor the health status of individual cows. The highest result for health status assessment is obtained by random forest classifier (RFC) with an accuracy of 0.959, recall of 0.954, and precision of 0.97. To increase the security, speed, and reliability of the work process, a cloud architecture of services is presented to integrate the trained model as an additional functionality in the Amazon Web Services (AWS) environment. The classification results of the ML model are visualized in a newly created interface in the client application.}, } @article {pmid37886380, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Cloud Computing Load Balancing Mechanism Taking into Account Load Balancing Ant Colony Optimization Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9831926}, pmid = {37886380}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/3120883.].}, } @article {pmid37885760, year = {2023}, author = {Hachisuca, AMM and de Souza, EG and Oliveira, WKM and Bazzi, CL and Donato, DG and Mendes, IS and Abdala, MC and Mercante, E}, title = {AgDataBox-IoT - application development for agrometeorological stations in smart.}, journal = {MethodsX}, volume = {11}, number = {}, pages = {102419}, pmid = {37885760}, issn = {2215-0161}, abstract = {Currently, Brazil is one of the world's largest grain producers and exporters. Agriculture has already entered its 4.0 version (2017), also known as digital agriculture, when the industry has entered the 4.0 era (2011). This new paradigm uses Internet of Things (IoT) techniques, sensors installed in the field, network of interconnected sensors in the plot, drones for crop monitoring, multispectral cameras, storage and processing of data in Cloud Computing, and Big Data techniques to process the large volumes of generated data. One of the practical options for implementing precision agriculture is the segmentation of the plot into management zones, aiming at maximizing profits according to the productive potential of each zone, being economically viable even for small producers. Considering that climate factors directly influence yield, this study describes the development of a sensor network for climate monitoring of management zones (microclimates), allowing the identification of climate factors that influence yield at each of its stages.•Application of the internet of things to assist in decision making in the agricultural production system.•AgDataBox (ADB-IoT) web platform has an Application Programming Interface (API).•An agrometeorological station capable of monitoring all meteorological parameters was developed (Kate 3.0).}, } @article {pmid37879464, year = {2024}, author = {Dube, T and Dube, T and Dalu, T and Gxokwe, S and Marambanyika, T}, title = {Assessment of land use and land cover, water nutrient and metal concentration related to illegal mining activities in an Austral semi-arid river system: A remote sensing and multivariate analysis approach.}, journal = {The Science of the total environment}, volume = {907}, number = {}, pages = {167919}, doi = {10.1016/j.scitotenv.2023.167919}, pmid = {37879464}, issn = {1879-1026}, abstract = {The mining sector in various countries, particularly in the sub-Saharan African region, faces significant impact from the emergence of small-scale unlicensed artisanal mines. This trend is influenced by the rising demand and prices for minerals, along with prevalent poverty levels. Thus, the detrimental impacts of these artisanal mines on the natural environment (i.e., rivers) have remained poorly understood particularly in the Zimbabwean context. To understand the consequences of this situation, a study was conducted in the Umzingwane Catchment, located in southern Zimbabwe, focusing on the variations in water nutrient and metal concentrations in rivers affected by illegal mining activities along their riparian zones. Using multi-year Sentinel-2 composite data and the random forest machine learning algorithm on the Google Earth Engine cloud-computing platform, we mapped the spatial distribution of illegal mines in the affected regions and seven distinct land use classes, including artisanal mines, bare surfaces, settlements, official mines, croplands, and natural vegetation, with an acceptable overall and class accuracies of ±70 % were identified. Artisanal mines were found to be located along rivers and this was attributed to their large water requirements needed during the mining process. The water quality analysis revealed elevated nutrient concentrations, such as ammonium and nitrate (range 0.10-20.0 mg L[-1]), which could be attributed to mine drainage from the use of ammonium nitrate explosives during mining activities. Additionally, the prevalence of croplands in the area may have potentially contributed to increased nutrient concentrations. The principal component analysis and hierarchical cluster analysis revealed three clusters, with one of these clusters showing parameters like Ca, Mg, K, Hg and Na, which are usually associated with mineral gypsum found in the drainage of artisanal mines in the selected rivers. Cluster 2 consisted of B, Cu, Fe, Pb, and Mn, which are likely from the natural environment and finally, cluster 3 contained As, Cd, Cr, and Zn, which were likely associated with both legal and illegal mining operations. These findings provide essential insights into the health of the studied river system and the impacts of human activities in the region. They further serve as a foundation for developing and implementing regulatory measures aimed at protecting riverine systems, in line with sustainable development goal 15.1 which focuses on preserving and conserving terrestrial and inland freshwater ecosystems, including rivers. By acting on this information, authorities can work towards safeguarding these vital natural resources and promoting sustainable development in the area.}, } @article {pmid37869808, year = {2023}, author = {Gal-Nadasan, N and Stoicu-Tivadar, V and Gal-Nadasan, E and Dinu, AR}, title = {Robotic Process Automation Based Data Extraction from Handwritten Medical Forms.}, journal = {Studies in health technology and informatics}, volume = {309}, number = {}, pages = {68-72}, doi = {10.3233/SHTI230741}, pmid = {37869808}, issn = {1879-8365}, mesh = {*Robotics ; *Robotic Surgical Procedures ; Software ; Automation ; Machine Learning ; }, abstract = {This paper proposes to create an RPA(robotic process automation) based software robot that can digitalize and extract data from handwritten medical forms. The RPA robot uses a taxonomy that is specific for the medical form and associates the extracted data with the taxonomy. This is accomplished using UiPath studio to create the robot, Google Cloud Vision OCR(optical character recognition) to create the DOM (digital object model) file and UiPath machine learning (ML) API to extract the data from the medical form. Due to the fact that the medical form is in a non-standard format a data extraction template had to be applied. After the extraction process the data can be saved into databases or into a spreadsheets.}, } @article {pmid37867911, year = {2023}, author = {Eneh, AH and Udanor, CN and Ossai, NI and Aneke, SO and Ugwoke, PO and Obayi, AA and Ugwuishiwu, CH and Okereke, GE}, title = {Towards an improved internet of things sensors data quality for a smart aquaponics system yield prediction.}, journal = {MethodsX}, volume = {11}, number = {}, pages = {102436}, pmid = {37867911}, issn = {2215-0161}, abstract = {The mobile aquaponics system is a sustainable integrated aquaculture-crop production system in which wastewater from fish ponds are utilized in crop production, filtered, and returned for aquaculture uses. This process ensures the optimization of water and nutrients as well as the simultaneous production of fish and crops in portable homestead models. The Lack of datasets and documentations on monitoring growth parameters in Sub-Saharan Africa hamper the effective management and prediction of yields. Water quality impacts the fish growth rate, feed consumption, and general well-being irrespective of the system. This research presents an improvement on the IoT water quality sensor system earlier developed in a previous study in carried out in conjunction with two local catfish farmers. The improved system produced datasets that when trained using several machine learning algorithms achieved a test RMSE score of 0.6140 against 1.0128 from the old system for fish length prediction using Decision Tree Regressor. Further testing with the XGBoost Regressor achieved a test RMSE score of 7.0192 for fish weight prediction from the initial IoT dataset and 0.7793 from the improved IoT dataset. Both systems achieved a prediction accuracy of 99%. These evaluations clearly show that the improved system outperformed the initial one.•The discovery and use of improved IoT pond water quality sensors.•Development of machine learning models to evaluate the methods.•Testing of the datasets from the two methods using the machine learning models.}, } @article {pmid37864543, year = {2023}, author = {Patel, M and Dayan, I and Fishman, EK and Flores, M and Gilbert, FJ and Guindy, M and Koay, EJ and Rosenthal, M and Roth, HR and Linguraru, MG}, title = {Accelerating artificial intelligence: How federated learning can protect privacy, facilitate collaboration, and improve outcomes.}, journal = {Health informatics journal}, volume = {29}, number = {4}, pages = {14604582231207744}, doi = {10.1177/14604582231207744}, pmid = {37864543}, issn = {1741-2811}, mesh = {Humans ; *Artificial Intelligence ; Privacy ; Learning ; *Pancreatic Neoplasms ; }, abstract = {Cross-institution collaborations are constrained by data-sharing challenges. These challenges hamper innovation, particularly in artificial intelligence, where models require diverse data to ensure strong performance. Federated learning (FL) solves data-sharing challenges. In typical collaborations, data is sent to a central repository where models are trained. With FL, models are sent to participating sites, trained locally, and model weights aggregated to create a master model with improved performance. At the 2021 Radiology Society of North America's (RSNA) conference, a panel was conducted titled "Accelerating AI: How Federated Learning Can Protect Privacy, Facilitate Collaboration and Improve Outcomes." Two groups shared insights: researchers from the EXAM study (EMC CXR AI Model) and members of the National Cancer Institute's Early Detection Research Network's (EDRN) pancreatic cancer working group. EXAM brought together 20 institutions to create a model to predict oxygen requirements of patients seen in the emergency department with COVID-19 symptoms. The EDRN collaboration is focused on improving outcomes for pancreatic cancer patients through earlier detection. This paper describes major insights from the panel, including direct quotes. The panelists described the impetus for FL, the long-term potential vision of FL, challenges faced in FL, and the immediate path forward for FL.}, } @article {pmid37863925, year = {2023}, author = {Naboureh, A and Li, A and Bian, J and Lei, G and Nan, X}, title = {Land cover dataset of the China Central-Asia West-Asia Economic Corridor from 1993 to 2018.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {728}, pmid = {37863925}, issn = {2052-4463}, support = {42090015 and 41801370//National Natural Science Foundation of China (National Science Foundation of China)/ ; 42090015 and 41801370//National Natural Science Foundation of China (National Science Foundation of China)/ ; 2019365//Youth Innovation Promotion Association of the Chinese Academy of Sciences (Youth Innovation Promotion Association CAS)/ ; }, abstract = {Land Cover (LC) maps offer vital knowledge for various studies, ranging from sustainable development to climate change. The China Central-Asia West-Asia Economic Corridor region, as a core component of the Belt and Road initiative program, has been experiencing some of the most severe LC change tragedies, such as the Aral Sea crisis and Lake Urmia shrinkage, in recent decades. Therefore, there is a high demand for producing a fine-resolution, spatially-explicit, and long-term LC dataset for this region. However, except China, such dataset for the rest of the region (Kyrgyzstan, Turkmenistan, Kazakhstan, Uzbekistan, Tajikistan, Turkey, and Iran) is currently lacking. Here, we constructed a historical set of six 30-m resolution LC maps between 1993 and 2018 at 5-year time intervals for the seven countries where nearly 200,000 Landsat scenes were classified into nine LC types within Google Earth Engine cloud computing platform. The generated LC maps displayed high accuracies. This publicly available dataset has the potential to be broadly applied in environmental policy and management.}, } @article {pmid37860633, year = {2023}, author = {Muratore, L and Tsagarakis, N}, title = {XBot2D: towards a robotics hybrid cloud architecture for field robotics.}, journal = {Frontiers in robotics and AI}, volume = {10}, number = {}, pages = {1168694}, pmid = {37860633}, issn = {2296-9144}, abstract = {Nowadays, robotics applications requiring the execution of complex tasks in real-world scenarios are still facing many challenges related to highly unstructured and dynamic environments in domains such as emergency response and search and rescue where robots have to operate for prolonged periods trading off computational performance with increased power autonomy and vice versa. In particular, there is a crucial need for robots capable of adapting to such settings while at the same time providing robustness and extended power autonomy. A possible approach to overcome the conflicting demand of a computational performing system with the need for long power autonomy is represented by cloud robotics, which can boost the computational capabilities of the robot while reducing the energy consumption by exploiting the offload of resources to the cloud. Nevertheless, the communication constraint due to limited bandwidth, latency, and connectivity, typical of field robotics, makes cloud-enabled robotics solutions challenging to deploy in real-world applications. In this context, we designed and realized the XBot2D software architecture, which provides a hybrid cloud manager capable of dynamically and seamlessly allocating robotics skills to perform a distributed computation based on the current network condition and the required latency, and computational/energy resources of the robot in use. The proposed framework leverage on the two dimensions, i.e., 2D (local and cloud), in a transparent way for the user, providing support for Real-Time (RT) skills execution on the local robot, as well as machine learning and A.I. resources on the cloud with the possibility to automatically relocate the above based on the required performances and communication quality. XBot2D implementation and its functionalities are presented and validated in realistic tasks involving the CENTAURO robot and the Amazon Web Service Elastic Computing Cloud (AWS EC2) infrastructure with different network conditions.}, } @article {pmid37860604, year = {2023}, author = {Post, AR and Ho, N and Rasmussen, E and Post, I and Cho, A and Hofer, J and Maness, AT and Parnell, T and Nix, DA}, title = {Hypermedia-based software architecture enables Test-Driven Development.}, journal = {JAMIA open}, volume = {6}, number = {4}, pages = {ooad089}, pmid = {37860604}, issn = {2574-2531}, support = {P30 CA042014/CA/NCI NIH HHS/United States ; }, abstract = {OBJECTIVES: Using agile software development practices, develop and evaluate an architecture and implementation for reliable and user-friendly self-service management of bioinformatic data stored in the cloud.

MATERIALS AND METHODS: Comprehensive Oncology Research Environment (CORE) Browser is a new open-source web application for cancer researchers to manage sequencing data organized in a flexible format in Amazon Simple Storage Service (S3) buckets. It has a microservices- and hypermedia-based architecture, which we integrated with Test-Driven Development (TDD), the iterative writing of computable specifications for how software should work prior to development. Relying on repeating patterns found in hypermedia-based architectures, we hypothesized that hypermedia would permit developing test "templates" that can be parameterized and executed for each microservice, maximizing code coverage while minimizing effort.

RESULTS: After one-and-a-half years of development, the CORE Browser backend had 121 test templates and 875 custom tests that were parameterized and executed 3031 times, providing 78% code coverage.

DISCUSSION: Architecting to permit test reuse through a hypermedia approach was a key success factor for our testing efforts. CORE Browser's application of hypermedia and TDD illustrates one way to integrate software engineering methods into data-intensive networked applications. Separating bioinformatic data management from analysis distinguishes this platform from others in bioinformatics and may provide stable data management while permitting analysis methods to advance more rapidly.

CONCLUSION: Software engineering practices are underutilized in informatics. Similar informatics projects will more likely succeed through application of good architecture and automated testing. Our approach is broadly applicable to data management tools involving cloud data storage.}, } @article {pmid37860463, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Application of Cloud Computing in the Prediction of Exercise Improvement of Cardiovascular and Digestive Systems in Obese Patients.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9872648}, pmid = {37860463}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/4695722.].}, } @article {pmid37860366, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Medical Cloud Computing Data Processing to Optimize the Effect of Drugs.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9869843}, pmid = {37860366}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/5560691.].}, } @article {pmid37860340, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Cloud Computing into Respiratory Rehabilitation Training-Assisted Treatment of Patients with Pneumonia.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9795658}, pmid = {37860340}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/5884174.].}, } @article {pmid37859937, year = {2023}, author = {Hornik, J and Rachamim, M and Graguer, S}, title = {Fog computing: a platform for big-data marketing analytics.}, journal = {Frontiers in artificial intelligence}, volume = {6}, number = {}, pages = {1242574}, pmid = {37859937}, issn = {2624-8212}, abstract = {Marketing science embraces a wider variety of data types and measurement tools necessary for strategy, research, and applied decision making. Managing the marketing data generated by internet of things (IoT) sensors and actuators is one of the biggest challenges faced by marketing managers when deploying an IoT system. This short note shows how traditional cloud-based IoT systems are challenged by the large scale, heterogeneity, and high latency witnessed in some cloud ecosystems. It introduces researchers to one recent breakthrough, fog computing, an emerging concept that decentralizes applications, strategies, and data analytics into the network itself using a distributed and federated computing model. It transforms centralized cloud to distributed fog by bringing storage and computation closer to the user end. Fog computing is considered a novel marketplace phenomenon which can support AI and management strategies, especially for the design of "smart marketing".}, } @article {pmid37856442, year = {2023}, author = {Uhlrich, SD and Falisse, A and Kidziński, Ł and Muccini, J and Ko, M and Chaudhari, AS and Hicks, JL and Delp, SL}, title = {OpenCap: Human movement dynamics from smartphone videos.}, journal = {PLoS computational biology}, volume = {19}, number = {10}, pages = {e1011462}, pmid = {37856442}, issn = {1553-7358}, support = {P41 EB027060/EB/NIBIB NIH HHS/United States ; R01 AR077604/AR/NIAMS NIH HHS/United States ; }, mesh = {Humans ; *Smartphone ; *Models, Biological ; Muscles/physiology ; Software ; Biomechanical Phenomena ; Movement/physiology ; }, abstract = {Measures of human movement dynamics can predict outcomes like injury risk or musculoskeletal disease progression. However, these measures are rarely quantified in large-scale research studies or clinical practice due to the prohibitive cost, time, and expertise required. Here we present and validate OpenCap, an open-source platform for computing both the kinematics (i.e., motion) and dynamics (i.e., forces) of human movement using videos captured from two or more smartphones. OpenCap leverages pose estimation algorithms to identify body landmarks from videos; deep learning and biomechanical models to estimate three-dimensional kinematics; and physics-based simulations to estimate muscle activations and musculoskeletal dynamics. OpenCap's web application enables users to collect synchronous videos and visualize movement data that is automatically processed in the cloud, thereby eliminating the need for specialized hardware, software, and expertise. We show that OpenCap accurately predicts dynamic measures, like muscle activations, joint loads, and joint moments, which can be used to screen for disease risk, evaluate intervention efficacy, assess between-group movement differences, and inform rehabilitation decisions. Additionally, we demonstrate OpenCap's practical utility through a 100-subject field study, where a clinician using OpenCap estimated musculoskeletal dynamics 25 times faster than a laboratory-based approach at less than 1% of the cost. By democratizing access to human movement analysis, OpenCap can accelerate the incorporation of biomechanical metrics into large-scale research studies, clinical trials, and clinical practice.}, } @article {pmid37854642, year = {2023}, author = {Zhang, M}, title = {Optimization Strategy of College Students' Education Management Based on Smart Cloud Platform Teaching.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {5642142}, pmid = {37854642}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Cloud Computing ; Students ; Big Data ; Commerce ; }, abstract = {With the passage of time and social changes, the form of education is also changing step by step. In just a few decades, information technology has developed by leaps and bounds, and digital education has not yet been widely promoted. Intelligent education cloud platforms based on big data, Internet of things, cloud computing, and artificial intelligence have begun to emerge. The research on the "smart campus" cloud platform is conducive to improving the utilization rate of existing hardware equipment in colleges and universities and is conducive in improving the level of teaching software deployment. At the same time, this research also provides a new idea for the research in the field of cloud security. While cloud computing brings convenience to teaching work, it also brings new problems to system security. At present, virtualization technology is still in the ascendant stage in the construction of "smart campus" in colleges and universities and is gradually applied to cloud computing service products. At present, there are many cases about the construction of teaching resource platform, but most of them are modified from the early resource management system, which has strong coupling of single system, insufficient functions of collecting, processing, searching, sharing, and reusing resources, and weak application support ability for related business systems. Under this social background, this paper studies the teaching process management system for intelligent classroom.}, } @article {pmid37853124, year = {2023}, author = {Wang, Y and Hollingsworth, PM and Zhai, D and West, CD and Green, JMH and Chen, H and Hurni, K and Su, Y and Warren-Thomas, E and Xu, J and Ahrends, A}, title = {High-resolution maps show that rubber causes substantial deforestation.}, journal = {Nature}, volume = {623}, number = {7986}, pages = {340-346}, pmid = {37853124}, issn = {1476-4687}, mesh = {Asia, Southeastern ; Biodiversity ; Cloud Computing ; *Conservation of Natural Resources/statistics & numerical data/trends ; *Forests ; *Geographic Mapping ; *Rubber ; *Satellite Imagery ; }, abstract = {Understanding the effects of cash crop expansion on natural forest is of fundamental importance. However, for most crops there are no remotely sensed global maps[1], and global deforestation impacts are estimated using models and extrapolations. Natural rubber is an example of a principal commodity for which deforestation impacts have been highly uncertain, with estimates differing more than fivefold[1-4]. Here we harnessed Earth observation satellite data and cloud computing[5] to produce high-resolution maps of rubber (10 m pixel size) and associated deforestation (30 m pixel size) for Southeast Asia. Our maps indicate that rubber-related forest loss has been substantially underestimated in policy, by the public and in recent reports[6-8]. Our direct remotely sensed observations show that deforestation for rubber is at least twofold to threefold higher than suggested by figures now widely used for setting policy[4]. With more than 4 million hectares of forest loss for rubber since 1993 (at least 2 million hectares since 2000) and more than 1 million hectares of rubber plantations established in Key Biodiversity Areas, the effects of rubber on biodiversity and ecosystem services in Southeast Asia could be extensive. Thus, rubber deserves more attention in domestic policy, within trade agreements and in incoming due-diligence legislation.}, } @article {pmid37850120, year = {2023}, author = {Teng, Z and Chen, J and Wang, J and Wu, S and Chen, R and Lin, Y and Shen, L and Jackson, R and Zhou, J and Yang, C}, title = {Panicle-Cloud: An Open and AI-Powered Cloud Computing Platform for Quantifying Rice Panicles from Drone-Collected Imagery to Enable the Classification of Yield Production in Rice.}, journal = {Plant phenomics (Washington, D.C.)}, volume = {5}, number = {}, pages = {0105}, pmid = {37850120}, issn = {2643-6515}, abstract = {Rice (Oryza sativa) is an essential stable food for many rice consumption nations in the world and, thus, the importance to improve its yield production under global climate changes. To evaluate different rice varieties' yield performance, key yield-related traits such as panicle number per unit area (PNpM[2]) are key indicators, which have attracted much attention by many plant research groups. Nevertheless, it is still challenging to conduct large-scale screening of rice panicles to quantify the PNpM[2] trait due to complex field conditions, a large variation of rice cultivars, and their panicle morphological features. Here, we present Panicle-Cloud, an open and artificial intelligence (AI)-powered cloud computing platform that is capable of quantifying rice panicles from drone-collected imagery. To facilitate the development of AI-powered detection models, we first established an open diverse rice panicle detection dataset that was annotated by a group of rice specialists; then, we integrated several state-of-the-art deep learning models (including a preferred model called Panicle-AI) into the Panicle-Cloud platform, so that nonexpert users could select a pretrained model to detect rice panicles from their own aerial images. We trialed the AI models with images collected at different attitudes and growth stages, through which the right timing and preferred image resolutions for phenotyping rice panicles in the field were identified. Then, we applied the platform in a 2-season rice breeding trial to valid its biological relevance and classified yield production using the platform-derived PNpM[2] trait from hundreds of rice varieties. Through correlation analysis between computational analysis and manual scoring, we found that the platform could quantify the PNpM[2] trait reliably, based on which yield production was classified with high accuracy. Hence, we trust that our work demonstrates a valuable advance in phenotyping the PNpM[2] trait in rice, which provides a useful toolkit to enable rice breeders to screen and select desired rice varieties under field conditions.}, } @article {pmid37848896, year = {2023}, author = {Kline, JA and Reed, B and Frost, A and Alanis, N and Barshay, M and Melzer, A and Galbraith, JW and Budd, A and Winn, A and Pun, E and Camargo, CA}, title = {Database derived from an electronic medical record-based surveillance network of US emergency department patients with acute respiratory illness.}, journal = {BMC medical informatics and decision making}, volume = {23}, number = {1}, pages = {224}, pmid = {37848896}, issn = {1472-6947}, mesh = {Humans ; *Electronic Health Records ; Emergency Service, Hospital ; *Respiratory Tract Infections/diagnosis/epidemiology ; Laboratories ; Public Health ; }, abstract = {BACKGROUND: For surveillance of episodic illness, the emergency department (ED) represents one of the largest interfaces for generalizable data about segments of the US public experiencing a need for unscheduled care. This protocol manuscript describes the development and operation of a national network linking symptom, clinical, laboratory and disposition data that provides a public database dedicated to the surveillance of acute respiratory infections (ARIs) in EDs.

METHODS: The Respiratory Virus Laboratory Emergency Department Network Surveillance (RESP-LENS) network includes 26 academic investigators, from 24 sites, with 91 hospitals, and the Centers for Disease Control and Prevention (CDC) to survey viral infections. All data originate from electronic medical records (EMRs) accessed by structured query language (SQL) coding. Each Tuesday, data are imported into the standard data form for ARI visits that occurred the prior week (termed the index file); outcomes at 30 days and ED volume are also recorded. Up to 325 data fields can be populated for each case. Data are transferred from sites into an encrypted Google Cloud Platform, then programmatically checked for compliance, parsed, and aggregated into a central database housed on a second cloud platform prior to transfer to CDC.

RESULTS: As of August, 2023, the network has reported data on over 870,000 ARI cases selected from approximately 5.2 million ED encounters. Post-contracting challenges to network execution have included local shifts in testing policies and platforms, delays in ICD-10 coding to detect ARI cases, and site-level personnel turnover. The network is addressing these challenges and is poised to begin streaming weekly data for dissemination.

CONCLUSIONS: The RESP-LENS network provides a weekly updated database that is a public health resource to survey the epidemiology, viral causes, and outcomes of ED patients with acute respiratory infections.}, } @article {pmid37848573, year = {2023}, author = {Atchyuth, BAS and Swain, R and Das, P}, title = {Near real-time flood inundation and hazard mapping of Baitarani River Basin using Google Earth Engine and SAR imagery.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {11}, pages = {1331}, pmid = {37848573}, issn = {1573-2959}, mesh = {*Floods ; *Rivers ; Search Engine ; Environmental Monitoring/methods ; Water ; }, abstract = {Flood inundation mapping and satellite imagery monitoring are critical and effective responses during flood events. Mapping of a flood using optical data is limited due to the unavailability of cloud-free images. Because of its capacity to penetrate clouds and operate in all kinds of weather, synthetic aperture radar is preferred for water inundation mapping. Flood mapping in Eastern India's Baitarani River Basin for 2018, 2019, 2020, 2021, and 2022 was performed in this study using Sentinel-1 imagery and Google Earth Engine with Otsu's algorithm. Different machine-learning algorithms were used to map the LULC of the study region. Dual polarizations VH and VV and their combinations VV×VH, VV+VH, VH-VV, VV-VH, VV/VH, and VH/VV were examined to identify non-water and water bodies. The normalized difference water index (NDWI) map derived from Sentinel-2 data validated the surface water inundation with 80% accuracy. The total inundated areas were identified as 440.3 km[2] in 2018, 268.58 km[2] in 2019, 178.40 km[2] in 2020, 203.79 km[2] in 2021, and 321.33 km[2] in 2022, respectively. The overlap of flood maps on the LULC map indicated that flooding highly affected agriculture and urban areas in these years. The approach using the near-real-time Sentinel-1 SAR imagery and GEE platform can be operationalized for periodic flood mapping, helps develop flood control measures, and helps enhance flood management. The generated annual flood inundation maps are also useful for policy development, agriculture yield estimation, crop insurance framing, etc.}, } @article {pmid37841693, year = {2023}, author = {Familiar, AM and Mahtabfar, A and Fathi Kazerooni, A and Kiani, M and Vossough, A and Viaene, A and Storm, PB and Resnick, AC and Nabavizadeh, A}, title = {Radio-pathomic approaches in pediatric neuro-oncology: Opportunities and challenges.}, journal = {Neuro-oncology advances}, volume = {5}, number = {1}, pages = {vdad119}, pmid = {37841693}, issn = {2632-2498}, support = {75N91019D00024/CA/NCI NIH HHS/United States ; }, abstract = {With medical software platforms moving to cloud environments with scalable storage and computing, the translation of predictive artificial intelligence (AI) models to aid in clinical decision-making and facilitate personalized medicine for cancer patients is becoming a reality. Medical imaging, namely radiologic and histologic images, has immense analytical potential in neuro-oncology, and models utilizing integrated radiomic and pathomic data may yield a synergistic effect and provide a new modality for precision medicine. At the same time, the ability to harness multi-modal data is met with challenges in aggregating data across medical departments and institutions, as well as significant complexity in modeling the phenotypic and genotypic heterogeneity of pediatric brain tumors. In this paper, we review recent pathomic and integrated pathomic, radiomic, and genomic studies with clinical applications. We discuss current challenges limiting translational research on pediatric brain tumors and outline technical and analytical solutions. Overall, we propose that to empower the potential residing in radio-pathomics, systemic changes in cross-discipline data management and end-to-end software platforms to handle multi-modal data sets are needed, in addition to embracing modern AI-powered approaches. These changes can improve the performance of predictive models, and ultimately the ability to advance brain cancer treatments and patient outcomes through the development of such models.}, } @article {pmid37840574, year = {2023}, author = {Jang, H and Park, S and Koh, H}, title = {Comprehensive microbiome causal mediation analysis using MiMed on user-friendly web interfaces.}, journal = {Biology methods & protocols}, volume = {8}, number = {1}, pages = {bpad023}, pmid = {37840574}, issn = {2396-8923}, abstract = {It is a central goal of human microbiome studies to see the roles of the microbiome as a mediator that transmits environmental, behavioral, or medical exposures to health or disease outcomes. Yet, mediation analysis is not used as much as it should be. One reason is because of the lack of carefully planned routines, compilers, and automated computing systems for microbiome mediation analysis (MiMed) to perform a series of data processing, diversity calculation, data normalization, downstream data analysis, and visualizations. Many researchers in various disciplines (e.g. clinicians, public health practitioners, and biologists) are not also familiar with related statistical methods and programming languages on command-line interfaces. Thus, in this article, we introduce a web cloud computing platform, named as MiMed, that enables comprehensive MiMed on user-friendly web interfaces. The main features of MiMed are as follows. First, MiMed can survey the microbiome in various spheres (i) as a whole microbial ecosystem using different ecological measures (e.g. alpha- and beta-diversity indices) or (ii) as individual microbial taxa (e.g. phyla, classes, orders, families, genera, and species) using different data normalization methods. Second, MiMed enables covariate-adjusted analysis to control for potential confounding factors (e.g. age and gender), which is essential to enhance the causality of the results, especially for observational studies. Third, MiMed enables a breadth of statistical inferences in both mediation effect estimation and significance testing. Fourth, MiMed provides flexible and easy-to-use data processing and analytic modules and creates nice graphical representations. Finally, MiMed employs ChatGPT to search for what has been known about the microbial taxa that are found significantly as mediators using artificial intelligence technologies. For demonstration purposes, we applied MiMed to the study on the mediating roles of oral microbiome in subgingival niches between e-cigarette smoking and gingival inflammation. MiMed is freely available on our web server (http://mimed.micloud.kr).}, } @article {pmid37838111, year = {2024}, author = {Li, W and Li, SM and Kang, MC and Xiong, X and Wang, P and Tao, LQ}, title = {Multi-characteristic tannic acid-reinforced polyacrylamide/sodium carboxymethyl cellulose ionic hydrogel strain sensor for human-machine interaction.}, journal = {International journal of biological macromolecules}, volume = {254}, number = {Pt 2}, pages = {127434}, doi = {10.1016/j.ijbiomac.2023.127434}, pmid = {37838111}, issn = {1879-0003}, mesh = {Humans ; *Carboxymethylcellulose Sodium ; Ions ; *Hydrogels ; Electric Conductivity ; }, abstract = {Big data and cloud computing are propelling research in human-computer interface within academia. However, the potential of wearable human-machine interaction (HMI) devices utilizing multiperformance ionic hydrogels remains largely unexplored. Here, we present a motion recognition-based HMI system that enhances movement training. We engineered dual-network PAM/CMC/TA (PCT) hydrogels by reinforcing polyacrylamide (PAM) and sodium carboxymethyl cellulose (CMC) polymers with tannic acid (TA). These hydrogels possess exceptional transparency, adhesion, and remodelling features. By combining an elastic PAM backbone with tunable amounts of CMC and TA, the PCT hydrogels achieve optimal electromechanical performance. As strain sensors, they demonstrate higher sensitivity (GF = 4.03), low detection limit (0.5 %), and good linearity (0.997). Furthermore, we developed a highly accurate (97.85 %) motion recognition system using machine learning and hydrogel-based wearable sensors. This system enables contactless real-time training monitoring and wireless control of trolley operations. Our research underscores the effectiveness of PCT hydrogels for real-time HMI, thus advancing next-generation HMI systems.}, } @article {pmid37837127, year = {2023}, author = {Al-Bazzaz, H and Azam, M and Amayri, M and Bouguila, N}, title = {Unsupervised Mixture Models on the Edge for Smart Energy Consumption Segmentation with Feature Saliency.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {19}, pages = {}, pmid = {37837127}, issn = {1424-8220}, abstract = {Smart meter datasets have recently transitioned from monthly intervals to one-second granularity, yielding invaluable insights for diverse metering functions. Clustering analysis, a fundamental data mining technique, is extensively applied to discern unique energy consumption patterns. However, the advent of high-resolution smart meter data brings forth formidable challenges, including non-Gaussian data distributions, unknown cluster counts, and varying feature importance within high-dimensional spaces. This article introduces an innovative learning framework integrating the expectation-maximization algorithm with the minimum message length criterion. This unified approach enables concurrent feature and model selection, finely tuned for the proposed bounded asymmetric generalized Gaussian mixture model with feature saliency. Our experiments aim to replicate an efficient smart meter data analysis scenario by incorporating three distinct feature extraction methods. We rigorously validate the clustering efficacy of our proposed algorithm against several state-of-the-art approaches, employing diverse performance metrics across synthetic and real smart meter datasets. The clusters that we identify effectively highlight variations in residential energy consumption, furnishing utility companies with actionable insights for targeted demand reduction efforts. Moreover, we demonstrate our method's robustness and real-world applicability by harnessing Concordia's High-Performance Computing infrastructure. This facilitates efficient energy pattern characterization, particularly within smart meter environments involving edge cloud computing. Finally, we emphasize that our proposed mixture model outperforms three other models in this paper's comparative study. We achieve superior performance compared to the non-bounded variant of the proposed mixture model by an average percentage improvement of 7.828%.}, } @article {pmid37832430, year = {2023}, author = {Schacherer, DP and Herrmann, MD and Clunie, DA and Höfener, H and Clifford, W and Longabaugh, WJR and Pieper, S and Kikinis, R and Fedorov, A and Homeyer, A}, title = {The NCI Imaging Data Commons as a platform for reproducible research in computational pathology.}, journal = {Computer methods and programs in biomedicine}, volume = {242}, number = {}, pages = {107839}, pmid = {37832430}, issn = {1872-7565}, support = {HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; *Software ; Reproducibility of Results ; Cloud Computing ; Diagnostic Imaging ; *Lung Neoplasms/diagnostic imaging ; }, abstract = {BACKGROUND AND OBJECTIVES: Reproducibility is a major challenge in developing machine learning (ML)-based solutions in computational pathology (CompPath). The NCI Imaging Data Commons (IDC) provides >120 cancer image collections according to the FAIR principles and is designed to be used with cloud ML services. Here, we explore its potential to facilitate reproducibility in CompPath research.

METHODS: Using the IDC, we implemented two experiments in which a representative ML-based method for classifying lung tumor tissue was trained and/or evaluated on different datasets. To assess reproducibility, the experiments were run multiple times with separate but identically configured instances of common ML services.

RESULTS: The results of different runs of the same experiment were reproducible to a large extent. However, we observed occasional, small variations in AUC values, indicating a practical limit to reproducibility.

CONCLUSIONS: We conclude that the IDC facilitates approaching the reproducibility limit of CompPath research (i) by enabling researchers to reuse exactly the same datasets and (ii) by integrating with cloud ML services so that experiments can be run in identically configured computing environments.}, } @article {pmid37831665, year = {2023}, author = {Saif, Y and Yusof, Y and Rus, AZM and Ghaleb, AM and Mejjaouli, S and Al-Alimi, S and Didane, DH and Latif, K and Abdul Kadir, AZ and Alshalabi, H and Sadeq, S}, title = {Implementing circularity measurements in industry 4.0-based manufacturing metrology using MQTT protocol and Open CV: A case study.}, journal = {PloS one}, volume = {18}, number = {10}, pages = {e0292814}, pmid = {37831665}, issn = {1932-6203}, mesh = {*Commerce ; *Industry ; Algorithms ; Cloud Computing ; Communication ; }, abstract = {In the context of Industry 4.0, manufacturing metrology is crucial for inspecting and measuring machines. The Internet of Things (IoT) technology enables seamless communication between advanced industrial devices through local and cloud computing servers. This study investigates the use of the MQTT protocol to enhance the performance of circularity measurement data transmission between cloud servers and round-hole data sources through Open CV. Accurate inspection of circular characteristics, particularly roundness errors, is vital for lubricant distribution, assemblies, and rotational force innovation. Circularity measurement techniques employ algorithms like the minimal zone circle tolerance algorithm. Vision inspection systems, utilizing image processing techniques, can promptly and accurately detect quality concerns by analyzing the model's surface through circular dimension analysis. This involves sending the model's image to a computer, which employs techniques such as Hough Transform, Edge Detection, and Contour Analysis to identify circular features and extract relevant parameters. This method is utilized in the camera industry and component assembly. To assess the performance, a comparative experiment was conducted between the non-contact-based 3SMVI system and the contact-based CMM system widely used in various industries for roundness evaluation. The CMM technique is known for its high precision but is time-consuming. Experimental results indicated a variation of 5 to 9.6 micrometers between the two methods. It is suggested that using a high-resolution camera and appropriate lighting conditions can further enhance result precision.}, } @article {pmid37829921, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: An Optimized Decision Method for Smart Teaching Effect Based on Cloud Computing and Deep Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9862737}, pmid = {37829921}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/6907172.].}, } @article {pmid37829877, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Construction of Big Data Computational Intelligence System for E-Government in Cloud Computing Environment and Its Development Impact.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9873976}, pmid = {37829877}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/7295060.].}, } @article {pmid37829372, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Construction of a Health Management Model for Early Identification of Ischaemic Stroke in Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9820647}, pmid = {37829372}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/1018056.].}, } @article {pmid37819909, year = {2023}, author = {Wang, TY and Cui, J and Fan, Y}, title = {A wearable-based sports health monitoring system using CNN and LSTM with self-attentions.}, journal = {PloS one}, volume = {18}, number = {10}, pages = {e0292012}, pmid = {37819909}, issn = {1932-6203}, mesh = {Humans ; Athletes ; *Athletic Performance ; *Cell Phone ; Neural Networks, Computer ; *Wearable Electronic Devices ; }, abstract = {Sports performance and health monitoring are essential for athletes to maintain peak performance and avoid potential injuries. In this paper, we propose a sports health monitoring system that utilizes wearable devices, cloud computing, and deep learning to monitor the health status of sports persons. The system consists of a wearable device that collects various physiological parameters and a cloud server that contains a deep learning model to predict the sportsperson's health status. The proposed model combines a Convolutional Neural Network (CNN), Long Short-Term Memory (LSTM), and self-attention mechanisms. The model is trained on a large dataset of sports persons' physiological data and achieves an accuracy of 93%, specificity of 94%, precision of 95%, and an F1 score of 92%. The sports person can access the cloud server using their mobile phone to receive a report of their health status, which can be used to monitor their performance and make any necessary adjustments to their training or competition schedule.}, } @article {pmid37819832, year = {2023}, author = {Ruiz-Zafra, A and Precioso, D and Salvador, B and Lubian-Lopez, SP and Jimenez, J and Benavente-Fernandez, I and Pigueiras, J and Gomez-Ullate, D and Gontard, LC}, title = {NeoCam: An Edge-Cloud Platform for Non-Invasive Real-Time Monitoring in Neonatal Intensive Care Units.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {6}, pages = {2614-2624}, doi = {10.1109/JBHI.2023.3240245}, pmid = {37819832}, issn = {2168-2208}, mesh = {Infant, Newborn ; Infant ; Humans ; *Intensive Care Units, Neonatal ; *Cloud Computing ; Infant, Premature ; Software ; Algorithms ; }, abstract = {In this work we introduce NeoCam, an open source hardware-software platform for video-based monitoring of preterms infants in Neonatal Intensive Care Units (NICUs). NeoCam includes an edge computing device that performs video acquisition and processing in real-time. Compared to other proposed solutions, it has the advantage of handling data more efficiently by performing most of the processing on the device, including proper anonymisation for better compliance with privacy regulations. In addition, it allows to perform various video analysis tasks of clinical interest in parallel at speeds of between 20 and 30 frames-per-second. We introduce algorithms to measure without contact the breathing rate, motor activity, body pose and emotional status of the infants. For breathing rate, our system shows good agreement with existing methods provided there is sufficient light and proper imaging conditions. Models for motor activity and stress detection are new to the best of our knowledge. NeoCam has been tested on preterms in the NICU of the University Hospital Puerta del Mar (Cádiz, Spain), and we report the lessons learned from this trial.}, } @article {pmid37819321, year = {2023}, author = {Machado, IA and Lacerda, MAS and Martinez-Blanco, MDR and Serrano, A and García-Baonza, R and Ortiz-Rodriguez, JM}, title = {Chameleon: a cloud computing Industry 4.0 neutron spectrum unfolding code.}, journal = {Radiation protection dosimetry}, volume = {199}, number = {15-16}, pages = {1877-1882}, doi = {10.1093/rpd/ncac298}, pmid = {37819321}, issn = {1742-3406}, support = {APQ-01018-21//Fundação de Amparo à Pesquisa do Estado de Minas Gerais/ ; //Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; //OMADS Co./ ; }, mesh = {*Cloud Computing ; *Algorithms ; Neural Networks, Computer ; Internet ; Neutrons ; }, abstract = {This work presents Chameleon, a cloud computing (CC) Industry 4.0 (I4) neutron spectrum unfolding code. The code was designed under the Python programming language, using Streamlit framework®, and it is executed on the cloud, as I4 CC technology through internet, by using mobile devices with internet connectivity and a web navigator. In its first version, as a proof of concept, the SPUNIT algorithm was implemented. The main functionalities and the preliminary tests performed to validate the code are presented. Chameleon solves the neutron spectrum unfolding problem and it is easy, friendly and intuitive. It can be applied with success in various workplaces. More validation tests are in progress. Future implementations will include improving the graphical user interface, inserting other algorithms, such as GRAVEL, MAXED and neural networks, and implementing an algorithm to estimate uncertainties in the calculated integral quantities.}, } @article {pmid37816030, year = {2023}, author = {, }, title = {Retraction: Relationship between employees' career maturity and career planning of edge computing and cloud collaboration from the perspective of organizational behavior.}, journal = {PloS one}, volume = {18}, number = {10}, pages = {e0292209}, pmid = {37816030}, issn = {1932-6203}, } @article {pmid37809681, year = {2023}, author = {Chen, C and Yang, X and Jiang, S and Liu, Z}, title = {Mapping and spatiotemporal dynamics of land-use and land-cover change based on the Google Earth Engine cloud platform from Landsat imagery: A case study of Zhoushan Island, China.}, journal = {Heliyon}, volume = {9}, number = {9}, pages = {e19654}, pmid = {37809681}, issn = {2405-8440}, abstract = {Land resources are an essential foundation for socioeconomic development. Island land resources are limited, the type changes are particularly frequent, and the environment is fragile. Therefore, large-scale, long-term, and high-accuracy land-use classification and spatiotemporal characteristic analysis are of great significance for the sustainable development of islands. Based on the advantages of remote sensing indices and principal component analysis in accurate classification, and taking Zhoushan Archipelago, China, as the study area, in this work long-term satellite remote sensing data were used to perform land-use classification and spatiotemporal characteristic analysis. The classification results showed that the land-use types could be exactly classified, with the overall accuracy and Kappa coefficient greater than 94% and 0.93, respectively. The results of the spatiotemporal characteristic analysis showed that the built-up land and forest land areas increased by 90.00 km[2] and 36.83 km[2], respectively, while the area of the cropland/grassland decreased by 69.77 km[2]. The areas of the water bodies, tidal flats, and bare land exhibited slight change trends. The spatial coverage of Zhoushan Island continuously expanded toward the coast, encroaching on nearby sea areas and tidal flats. The cropland/grassland was the most transferred-out area, at up to 108.94 km[2], and built-up land was the most transferred-in areas, at up to 73.31 km[2]. This study provides a data basis and technical support for the scientific management of land resources.}, } @article {pmid37804778, year = {2023}, author = {Lakhan, A and Mohammed, MA and Abdulkareem, KH and Hamouda, H and Alyahya, S}, title = {Autism Spectrum Disorder detection framework for children based on federated learning integrated CNN-LSTM.}, journal = {Computers in biology and medicine}, volume = {166}, number = {}, pages = {107539}, doi = {10.1016/j.compbiomed.2023.107539}, pmid = {37804778}, issn = {1879-0534}, abstract = {The incidence of Autism Spectrum Disorder (ASD) among children, attributed to genetics and environmental factors, has been increasing daily. ASD is a non-curable neurodevelopmental disorder that affects children's communication, behavior, social interaction, and learning skills. While machine learning has been employed for ASD detection in children, existing ASD frameworks offer limited services to monitor and improve the health of ASD patients. This paper presents a complex and efficient ASD framework with comprehensive services to enhance the results of existing ASD frameworks. Our proposed approach is the Federated Learning-enabled CNN-LSTM (FCNN-LSTM) scheme, designed for ASD detection in children using multimodal datasets. The ASD framework is built in a distributed computing environment where different ASD laboratories are connected to the central hospital. The FCNN-LSTM scheme enables local laboratories to train and validate different datasets, including Ages and Stages Questionnaires (ASQ), Facial Communication and Symbolic Behavior Scales (CSBS) Dataset, Parents Evaluate Developmental Status (PEDS), Modified Checklist for Autism in Toddlers (M-CHAT), and Screening Tool for Autism in Toddlers and Children (STAT) datasets, on different computing laboratories. To ensure the security of patient data, we have implemented a security mechanism based on advanced standard encryption (AES) within the federated learning environment. This mechanism allows all laboratories to offload and download data securely. We integrate all trained datasets into the aggregated nodes and make the final decision for ASD patients based on the decision process tree. Additionally, we have designed various Internet of Things (IoT) applications to improve the efficiency of ASD patients and achieve more optimal learning results. Simulation results demonstrate that our proposed framework achieves an ASD detection accuracy of approximately 99% compared to all existing ASD frameworks.}, } @article {pmid37794709, year = {2024}, author = {Lee, J and Kim, H and Kron, F}, title = {Virtual education strategies in the context of sustainable health care and medical education: A topic modelling analysis of four decades of research.}, journal = {Medical education}, volume = {58}, number = {1}, pages = {47-62}, doi = {10.1111/medu.15202}, pmid = {37794709}, issn = {1365-2923}, support = {NRF-2021R1F1A1056465//Ministry of Science & ICT/ ; }, mesh = {Humans ; Artificial Intelligence ; *Education, Medical ; Delivery of Health Care ; Learning ; *Virtual Reality ; }, abstract = {BACKGROUND: The growing importance of sustainability has led to the current literature being saturated with studies on the necessity of, and suggested topics for, education for sustainable health care (ESH). Even so, ESH implementation has been hindered by educator unpreparedness and resource scarcity. A potential resolution lies in virtual education. However, research on the strategies needed for successfully implementing virtual education in the context of sustainable health care and medical education is sparse; this study aims to fill the gap.

METHODS: Topic modelling, a computational text-mining method for analysing recurring patterns of co-occurring word clusters to reveal key topics prevalent across the texts, was used to examine how sustainability was addressed in research in medicine, medical education, and virtual education. A total of 17 631 studies, retrieved from Web of Science, Scopus and PubMed, were analysed.

RESULTS: Sustainability-related topics within health care, medical education and virtual education provided systematic implications for Sustainable Virtual Medical Education (SVME)-ESH via virtual platforms in a sustainable way. Analyses of keywords, phrases, topics and their associated networks indicate that SVME should address the three pillars of environmental, social and economic sustainability and medical practices to uphold them; employ different technologies and methods including simulations, virtual reality (VR), artificial intelligence (AI), cloud computing, distance learning; and implement strategies for collaborative development, persuasive diffusion and quality assurance.

CONCLUSIONS: This research suggests that sustainable strategies in virtual education for ESH require a systems approach, encompassing components such as learning content and objectives, evaluation, targeted learners, media, methods and strategies. The advancement of SVME necessitates that medical educators and researchers play a central and bridging role, guiding both the fields of sustainable health care and medical education in the development and implementation of SVME. In this way, they can prepare future physicians to address sustainability issues that impact patient care.}, } @article {pmid37773456, year = {2023}, author = {Buyukcavus, MH and Aydogan Akgun, F and Solak, S and Ucar, MHB and Fındık, Y and Baykul, T}, title = {Facial recognition by cloud-based APIs following surgically assisted rapid maxillary expansion.}, journal = {Journal of orofacial orthopedics = Fortschritte der Kieferorthopadie : Organ/official journal Deutsche Gesellschaft fur Kieferorthopadie}, volume = {}, number = {}, pages = {}, pmid = {37773456}, issn = {1615-6714}, abstract = {INTRODUCTION: This study aimed to investigate whether the facial soft tissue changes of individuals who had undergone surgically assisted rapid maxillary expansion (SARME) would be detected by three different well-known facial biometric recognition applications.

METHODS: To calculate similarity scores, the pre- and postsurgical photographs of 22 patients who had undergone SARME treatment were examined using three prominent cloud computing-based facial recognition application programming interfaces (APIs): AWS Rekognition (Amazon Web Services, Seattle, WA, USA), Microsoft Azure Cognitive (Microsoft, Redmond, WA, USA), and Face++ (Megvii, Beijing, China). The pre- and post-SARME photographs of the patients (relaxed, smiling, profile, and semiprofile) were used to calculate similarity scores using the APIs. Friedman's two-way analysis of variance and the Wilcoxon signed-rank test were used to compare the similarity scores obtained from the photographs of the different aspects of the face before and after surgery using the different programs. The relationship between measurements on lateral and posteroanterior cephalograms and the similarity scores was evaluated using the Spearman rank correlation.

RESULTS: The similarity scores were found to be lower with the Face++ program. When looking at the photo types, it was observed that the similarity scores were higher in the smiling photos. A statistically significant difference in the similarity scores (P < 0.05) was found between the relaxed and smiling photographs using the different programs. The correlation between the cephalometric and posteroanterior measurements and the similarity scores was not significant (P > 0.05).

CONCLUSION: SARME treatment caused a significant change in the similarity scores calculated with the help of three different facial recognition programs. The highest similarity scores were found in the smiling photographs, whereas the lowest scores were found in the profile photographs.}, } @article {pmid37766066, year = {2023}, author = {Mangalampalli, S and Karri, GR and Gupta, A and Chakrabarti, T and Nallamala, SH and Chakrabarti, P and Unhelkar, B and Margala, M}, title = {Fault-Tolerant Trust-Based Task Scheduling Algorithm Using Harris Hawks Optimization in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37766066}, issn = {1424-8220}, abstract = {Cloud computing is a distributed computing model which renders services for cloud users around the world. These services need to be rendered to customers with high availability and fault tolerance, but there are still chances of having single-point failures in the cloud paradigm, and one challenge to cloud providers is effectively scheduling tasks to avoid failures and acquire the trust of their cloud services by users. This research proposes a fault-tolerant trust-based task scheduling algorithm in which we carefully schedule tasks within precise virtual machines by calculating priorities for tasks and VMs. Harris hawks optimization was used as a methodology to design our scheduler. We used Cloudsim as a simulating tool for our entire experiment. For the entire simulation, we used synthetic fabricated data with different distributions and real-time supercomputer worklogs. Finally, we evaluated the proposed approach (FTTATS) with state-of-the-art approaches, i.e., ACO, PSO, and GA. From the simulation results, our proposed FTTATS greatly minimizes the makespan for ACO, PSO and GA algorithms by 24.3%, 33.31%, and 29.03%, respectively. The rate of failures for ACO, PSO, and GA were minimized by 65.31%, 65.4%, and 60.44%, respectively. Trust-based SLA parameters improved, i.e., availability improved for ACO, PSO, and GA by 33.38%, 35.71%, and 28.24%, respectively. The success rate improved for ACO, PSO, and GA by 52.69%, 39.41%, and 38.45%, respectively. Turnaround efficiency was minimized for ACO, PSO, and GA by 51.8%, 47.2%, and 33.6%, respectively.}, } @article {pmid37765972, year = {2023}, author = {Emish, M and Kelani, Z and Hassani, M and Young, SD}, title = {A Mobile Health Application Using Geolocation for Behavioral Activity Tracking.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765972}, issn = {1424-8220}, support = {n/a/DA/NIDA NIH HHS/United States ; n/a/AT/NCCIH NIH HHS/United States ; }, mesh = {*Mobile Applications ; Smartphone ; Advertising ; Algorithms ; *Blockchain ; }, abstract = {The increasing popularity of mHealth presents an opportunity for collecting rich datasets using mobile phone applications (apps). Our health-monitoring mobile application uses motion detection to track an individual's physical activity and location. The data collected are used to improve health outcomes, such as reducing the risk of chronic diseases and promoting healthier lifestyles through analyzing physical activity patterns. Using smartphone motion detection sensors and GPS receivers, we implemented an energy-efficient tracking algorithm that captures user locations whenever they are in motion. To ensure security and efficiency in data collection and storage, encryption algorithms are used with serverless and scalable cloud storage design. The database schema is designed around Mobile Advertising ID (MAID) as a unique identifier for each device, allowing for accurate tracking and high data quality. Our application uses Google's Activity Recognition Application Programming Interface (API) on Android OS or geofencing and motion sensors on iOS to track most smartphones available. In addition, our app leverages blockchain and traditional payments to streamline the compensations and has an intuitive user interface to encourage participation in research. The mobile tracking app was tested for 20 days on an iPhone 14 Pro Max, finding that it accurately captured location during movement and promptly resumed tracking after inactivity periods, while consuming a low percentage of battery life while running in the background.}, } @article {pmid37765912, year = {2023}, author = {Lilhore, UK and Manoharan, P and Simaiya, S and Alroobaea, R and Alsafyani, M and Baqasah, AM and Dalal, S and Sharma, A and Raahemifar, K}, title = {HIDM: Hybrid Intrusion Detection Model for Industry 4.0 Networks Using an Optimized CNN-LSTM with Transfer Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765912}, issn = {1424-8220}, abstract = {Industrial automation systems are undergoing a revolutionary change with the use of Internet-connected operating equipment and the adoption of cutting-edge advanced technology such as AI, IoT, cloud computing, and deep learning within business organizations. These innovative and additional solutions are facilitating Industry 4.0. However, the emergence of these technological advances and the quality solutions that they enable will also introduce unique security challenges whose consequence needs to be identified. This research presents a hybrid intrusion detection model (HIDM) that uses OCNN-LSTM and transfer learning (TL) for Industry 4.0. The proposed model utilizes an optimized CNN by using enhanced parameters of the CNN via the grey wolf optimizer (GWO) method, which fine-tunes the CNN parameters and helps to improve the model's prediction accuracy. The transfer learning model helps to train the model, and it transfers the knowledge to the OCNN-LSTM model. The TL method enhances the training process, acquiring the necessary knowledge from the OCNN-LSTM model and utilizing it in each next cycle, which helps to improve detection accuracy. To measure the performance of the proposed model, we conducted a multi-class classification analysis on various online industrial IDS datasets, i.e., ToN-IoT and UNW-NB15. We have conducted two experiments for these two datasets, and various performance-measuring parameters, i.e., precision, F-measure, recall, accuracy, and detection rate, were calculated for the OCNN-LSTM model with and without TL and also for the CNN and LSTM models. For the ToN-IoT dataset, the OCNN-LSTM with TL model achieved a precision of 92.7%; for the UNW-NB15 dataset, the precision was 94.25%, which is higher than OCNN-LSTM without TL.}, } @article {pmid37765893, year = {2023}, author = {Li, M and Zhang, J and Lin, J and Chen, Z and Zheng, X}, title = {FireFace: Leveraging Internal Function Features for Configuration of Functions on Serverless Edge Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765893}, issn = {1424-8220}, support = {62072108//the National Natural Science Foundation of China/ ; 83021094//the Funds for Scientific Research of Fujian Provincial Department of Finance/ ; }, abstract = {The emerging serverless computing has become a captivating paradigm for deploying cloud applications, alleviating developers' concerns about infrastructure resource management by configuring necessary parameters such as latency and memory constraints. Existing resource configuration solutions for cloud-based serverless applications can be broadly classified into modeling based on historical data or a combination of sparse measurements and interpolation/modeling. In pursuit of service response and conserving network bandwidth, platforms have progressively expanded from the traditional cloud to the edge. Compared to cloud platforms, serverless edge platforms often lead to more running overhead due to their limited resources, resulting in undesirable financial costs for developers when using the existing solutions. Meanwhile, it is extremely challenging to handle the heterogeneity of edge platforms, characterized by distinct pricing owing to their varying resource preferences. To tackle these challenges, we propose an adaptive and efficient approach called FireFace, consisting of prediction and decision modules. The prediction module extracts the internal features of all functions within the serverless application and uses this information to predict the execution time of the functions under specific configuration schemes. Based on the prediction module, the decision module analyzes the environment information and uses the Adaptive Particle Swarm Optimization algorithm and Genetic Algorithm Operator (APSO-GA) algorithm to select the most suitable configuration plan for each function, including CPU, memory, and edge platforms. In this way, it is possible to effectively minimize the financial overhead while fulfilling the Service Level Objectives (SLOs). Extensive experimental results show that our prediction model obtains optimal results under all three metrics, and the prediction error rate for real-world serverless applications is in the range of 4.25∼9.51%. Our approach can find the optimal resource configuration scheme for each application, which saves 7.2∼44.8% on average compared to other classic algorithms. Moreover, FireFace exhibits rapid adaptability, efficiently adjusting resource allocation schemes in response to dynamic environments.}, } @article {pmid37765859, year = {2023}, author = {Yang, D and Liu, Z and Wei, S}, title = {Interactive Learning for Network Anomaly Monitoring and Detection with Human Guidance in the Loop.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765859}, issn = {1424-8220}, abstract = {With the advancement in big data and cloud computing technology, we have witnessed tremendous developments in applying intelligent techniques in network operation and management. However, learning- and data-based solutions for network operation and maintenance cannot effectively adapt to the dynamic security situation or satisfy administrators' expectations alone. Anomaly detection of time-series monitoring indicators has been a major challenge for network administrative personnel. Monitored indicators in network operations are characterized by multiple instances with high dimensions and fluctuating time-series features and rely on system resource deployment and business environment variations. Hence, there is a growing consensus that conducting anomaly detection with machine intelligence under the operation and maintenance personnel's guidance is more effective than solely using learning and modeling. This paper intends to model the anomaly detection task as a Markov Decision Process and adopts the Double Deep Q-Network algorithm to train an anomaly detection agent, in which the multidimensional temporal convolution network is applied as the principal structure of the Q network and the interactive guidance information from the operation and maintenance personnel is introduced into the procedure to facilitate model convergence. Experimental results on the SMD dataset indicate that the proposed modeling and detection method achieves higher precision and recall rates compared to other learning-based methods. Our method achieves model optimization by using human-computer interactions continuously, which guarantees a faster and more consistent model training procedure and convergence.}, } @article {pmid37765801, year = {2023}, author = {Canonico, M and Desimoni, F and Ferrero, A and Grassi, PA and Irwin, C and Campani, D and Dal Molin, A and Panella, M and Magistrelli, L}, title = {Gait Monitoring and Analysis: A Mathematical Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765801}, issn = {1424-8220}, abstract = {Gait abnormalities are common in the elderly and individuals diagnosed with Parkinson's, often leading to reduced mobility and increased fall risk. Monitoring and assessing gait patterns in these populations play a crucial role in understanding disease progression, early detection of motor impairments, and developing personalized rehabilitation strategies. In particular, by identifying gait irregularities at an early stage, healthcare professionals can implement timely interventions and personalized therapeutic approaches, potentially delaying the onset of severe motor symptoms and improving overall patient outcomes. In this paper, we studied older adults affected by chronic diseases and/or Parkinson's disease by monitoring their gait due to wearable devices that can accurately detect a person's movements. In our study, about 50 people were involved in the trial (20 with Parkinson's disease and 30 people with chronic diseases) who have worn our device for at least 6 months. During the experimentation, each device collected 25 samples from the accelerometer sensor for each second. By analyzing those data, we propose a metric for the "gait quality" based on the measure of entropy obtained by applying the Fourier transform.}, } @article {pmid37765790, year = {2023}, author = {Wu, YL and Wang, CS and Weng, WC and Lin, YC}, title = {Development of a Cloud-Based Image Processing Health Checkup System for Multi-Item Urine Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {18}, pages = {}, pmid = {37765790}, issn = {1424-8220}, abstract = {With the busy pace of modern life, an increasing number of people are afflicted by lifestyle diseases. Going directly to the hospital for medical checks is not only time-consuming but also costly. Fortunately, the emergence of rapid tests has alleviated this burden. Accurately interpreting test results is extremely important; misinterpreting the results of rapid tests could lead to delayed medical treatment. Given that URS-10 serve as a rapid test capable of detecting 10 distinct parameters in urine samples, the results of assessing these parameters can offer insights into the subject's physiological condition. These parameters encompass aspects such as metabolism, renal function, diabetes, urinary tract disorders, hemolytic diseases, and acid-base balance, among others. Although the operational procedure is straightforward, the variegated color changes exhibited in the outcomes of individual parameters render it challenging for lay users to deduce causal factors solely from color variations. Moreover, potential misinterpretations could arise due to visual discrepancies. In this study, we successfully developed a cloud-based health checkup system that can be used in an indoor environment. The system is used by placing a URS-10 test strip on a colorimetric board developed for this study, then using a smartphone application to take images which are uploaded to a server for cloud computing. Finally, the interpretation results are stored in the cloud and sent back to the smartphone to be checked by the user. Furthermore, to confirm whether the color calibration technology can eliminate color differences between different cameras, and also whether the colorimetric board and the urine test strips can perform color comparisons correctly in different light intensity environments, indoor environments that could simulate a specific light intensity were established for testing purposes. When comparing the experimental results to real test strips, only two groups failed to reach an identification success rate of 100%, and in both of these cases the success rate reached 95%. The experimental results confirmed that the system developed in this study was able to eliminate color differences between camera devices and could be used without special technical requirements or training.}, } @article {pmid37757612, year = {2023}, author = {Palmer, GA and Tomkin, G and Martín-Alcalá, HE and Mendizabal-Ruiz, G and Cohen, J}, title = {The Internet of Things in assisted reproduction.}, journal = {Reproductive biomedicine online}, volume = {47}, number = {5}, pages = {103338}, doi = {10.1016/j.rbmo.2023.103338}, pmid = {37757612}, issn = {1472-6491}, mesh = {Humans ; *Internet of Things ; Internet ; Automation ; Laboratories ; Reproduction ; }, abstract = {The Internet of Things (IoT) is a network connecting physical objects with sensors, software and internet connectivity for data exchange. Integrating the IoT with medical devices shows promise in healthcare, particularly in IVF laboratories. By leveraging telecommunications, cybersecurity, data management and intelligent systems, the IoT can enable a data-driven laboratory with automation, improved conditions, personalized treatment and efficient workflows. The integration of 5G technology ensures fast and reliable connectivity for real-time data transmission, while blockchain technology secures patient data. Fog computing reduces latency and enables real-time analytics. Microelectromechanical systems enable wearable IoT and miniaturized monitoring devices for tracking IVF processes. However, challenges such as security risks and network issues must be addressed through cybersecurity measures and networking advancements. Clinical embryologists should maintain their expertise and knowledge for safety and oversight, even with IoT in the IVF laboratory.}, } @article {pmid37746608, year = {2023}, author = {Baghdadi, A and Guo, E and Lama, S and Singh, R and Chow, M and Sutherland, GR}, title = {Force Profile as Surgeon-Specific Signature.}, journal = {Annals of surgery open : perspectives of surgical history, education, and clinical approaches}, volume = {4}, number = {3}, pages = {e326}, pmid = {37746608}, issn = {2691-3593}, abstract = {OBJECTIVE: To investigate the notion that a surgeon's force profile can be the signature of their identity and performance.

SUMMARY BACKGROUND DATA: Surgeon performance in the operating room is an understudied topic. The advent of deep learning methods paired with a sensorized surgical device presents an opportunity to incorporate quantitative insight into surgical performance and processes. Using a device called the SmartForceps System and through automated analytics, we have previously reported surgeon force profile, surgical skill, and task classification. However, an investigation of whether an individual surgeon can be identified by surgical technique has yet to be studied.

METHODS: In this study, we investigate multiple neural network architectures to identify the surgeon associated with their time-series tool-tissue forces using bipolar forceps data. The surgeon associated with each 10-second window of force data was labeled, and the data were randomly split into 80% for model training and validation (10% validation) and 20% for testing. Data imbalance was mitigated through subsampling from more populated classes with a random size adjustment based on 0.1% of sample counts in the respective class. An exploratory analysis of force segments was performed to investigate underlying patterns differentiating individual surgical techniques.

RESULTS: In a dataset of 2819 ten-second time segments from 89 neurosurgical cases, the best-performing model achieved a micro-average area under the curve of 0.97, a testing F1-score of 0.82, a sensitivity of 82%, and a precision of 82%. This model was a time-series ResNet model to extract features from the time-series data followed by a linearized output into the XGBoost algorithm. Furthermore, we found that convolutional neural networks outperformed long short-term memory networks in performance and speed. Using a weighted average approach, an ensemble model was able to identify an expert surgeon with 83.8% accuracy using a validation dataset.

CONCLUSIONS: Our results demonstrate that each surgeon has a unique force profile amenable to identification using deep learning methods. We anticipate our models will enable a quantitative framework to provide bespoke feedback to surgeons and to track their skill progression longitudinally. Furthermore, the ability to recognize individual surgeons introduces the mechanism of correlating outcome to surgeon performance.}, } @article {pmid37745890, year = {2023}, author = {Habib, W and Connolly, J}, title = {A national-scale assessment of land use change in peatlands between 1989 and 2020 using Landsat data and Google Earth Engine-a case study of Ireland.}, journal = {Regional environmental change}, volume = {23}, number = {4}, pages = {124}, pmid = {37745890}, issn = {1436-3798}, abstract = {Over the centuries, anthropogenic pressure has severely impacted peatlands on the European continent. Peatlands cover ~ 21% (1.46 Mha) of Ireland's land surface, but 85% have been degraded due to management activities (land use). Ireland needs to meet its 2030 climate energy framework targets related to greenhouse gas (GHG) emissions from land use, land use change and forestry, including wetlands. Despite Ireland's voluntary decision to include peatlands in this system in 2020, information on land use activities and associated GHG emissions from peatlands is lacking. This study strives to fill this information gap by using Landsat (5, 8) data with Google Earth Engine and machine learning to examine and quantify land use on Irish peatlands across three time periods: 1990, 2005 and 2019. Four peatland land use classes were mapped and assessed: industrial peat extraction, forestry, grassland and residual peatland. The overall accuracy of the classification was 86% and 85% for the 2005 and 2019 maps, respectively. The accuracy of the 1990 dataset could not be assessed due to the unavailability of high-resolution reference data. The results indicate that extensive management activities have taken place in peatlands over the past three decades, which may have negative impacts on its ecological integrity and the many ecosystem services provided. By utilising cloud computing, temporal mosaicking and Landsat data, this study developed a robust methodology that overcomes cloud contamination and produces the first peatland land use maps of Ireland with wall-to-wall coverage. This has the potential for regional and global applications, providing maps that could help understand unsustainable management practices on peatlands and the impact on GHG emissions.}, } @article {pmid37745873, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Reform of University Education Teaching Based on Cloud Computing and Big Data Background.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9893153}, pmid = {37745873}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/8169938.].}, } @article {pmid37744210, year = {2023}, author = {Verner, E and Petropoulos, H and Baker, B and Bockholt, HJ and Fries, J and Bohsali, A and Raja, R and Trinh, DH and Calhoun, V}, title = {BrainForge: an online data analysis platform for integrative neuroimaging acquisition, analysis, and sharing.}, journal = {Concurrency and computation : practice & experience}, volume = {35}, number = {18}, pages = {}, pmid = {37744210}, issn = {1532-0626}, support = {R01 MH118695/MH/NIMH NIH HHS/United States ; R01 MH123610/MH/NIMH NIH HHS/United States ; R41 MH122201/MH/NIMH NIH HHS/United States ; R41 MH100070/MH/NIMH NIH HHS/United States ; R01 EB020407/EB/NIBIB NIH HHS/United States ; }, abstract = {BrainForge is a cloud-enabled, web-based analysis platform for neuroimaging research. This website allows users to archive data from a study and effortlessly process data on a high-performance computing cluster. After analyses are completed, results can be quickly shared with colleagues. BrainForge solves multiple problems for researchers who want to analyze neuroimaging data, including issues related to software, reproducibility, computational resources, and data sharing. BrainForge can currently process structural, functional, diffusion, and arterial spin labeling MRI modalities, including preprocessing and group level analyses. Additional pipelines are currently being added, and the pipelines can accept the BIDS format. Analyses are conducted completely inside of Singularity containers and utilize popular software packages including Nipype, Statistical Parametric Mapping, the Group ICA of fMRI Toolbox, and FreeSurfer. BrainForge also features several interfaces for group analysis, including a fully automated adaptive ICA approach.}, } @article {pmid37738400, year = {2023}, author = {Lim, HG and Fann, YC and Lee, YG}, title = {COWID: an efficient cloud-based genomics workflow for scalable identification of SARS-COV-2.}, journal = {Briefings in bioinformatics}, volume = {24}, number = {5}, pages = {}, pmid = {37738400}, issn = {1477-4054}, support = {HHSN261201400008C/NH/NIH HHS/United States ; ZIC NS009443/ImNIH/Intramural NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; 75N91019D00024/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; *COVID-19/diagnosis ; Cloud Computing ; SARS-CoV-2/genetics ; Workflow ; Genomics ; }, abstract = {Implementing a specific cloud resource to analyze extensive genomic data on severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) poses a challenge when resources are limited. To overcome this, we repurposed a cloud platform initially designed for use in research on cancer genomics (https://cgc.sbgenomics.com) to enable its use in research on SARS-CoV-2 to build Cloud Workflow for Viral and Variant Identification (COWID). COWID is a workflow based on the Common Workflow Language that realizes the full potential of sequencing technology for use in reliable SARS-CoV-2 identification and leverages cloud computing to achieve efficient parallelization. COWID outperformed other contemporary methods for identification by offering scalable identification and reliable variant findings with no false-positive results. COWID typically processed each sample of raw sequencing data within 5 min at a cost of only US$0.01. The COWID source code is publicly available (https://github.com/hendrick0403/COWID) and can be accessed on any computer with Internet access. COWID is designed to be user-friendly; it can be implemented without prior programming knowledge. Therefore, COWID is a time-efficient tool that can be used during a pandemic.}, } @article {pmid37732291, year = {2023}, author = {Pessin, VZ and Santos, CAS and Yamane, LH and Siman, RR and Baldam, RL and Júnior, VL}, title = {A method of Mapping Process for scientific production using the Smart Bibliometrics.}, journal = {MethodsX}, volume = {11}, number = {}, pages = {102367}, pmid = {37732291}, issn = {2215-0161}, abstract = {Big data launches a modern way of producing science and research around the world. Due to an explosion of data available in scientific databases, combined with recent advances in information technology, the researcher has at his disposal new methods and technologies that facilitate scientific development. Considering the challenges of producing science in a dynamic and complex scenario, the main objective of this article is to present a method aligned with tools recently developed to support scientific production, based on steps and technologies that will help researchers to materialize their objectives efficiently and effectively. Applying this method, the researcher can apply science mapping and bibliometric techniques with agility, taking advantage of an easy-to-use solution with cloud computing capabilities. From the application of the "Scientific Mapping Process", the researcher will be able to generate strategic information for a result-oriented scientific production, assertively going through the main steps of research and boosting scientific discovery in the most diverse fields of investigation. •The Scientific Mapping Process provides a method and a system to boost scientific development.•It automates Science Mapping and bibliometric analysis from scientific datasets.•It facilitates the researcher's work, increasing the assertiveness in scientific production.}, } @article {pmid37729405, year = {2023}, author = {Willett, DS and Brannock, J and Dissen, J and Keown, P and Szura, K and Brown, OB and Simonson, A}, title = {NOAA Open Data Dissemination: Petabyte-scale Earth system data in the cloud.}, journal = {Science advances}, volume = {9}, number = {38}, pages = {eadh0032}, pmid = {37729405}, issn = {2375-2548}, abstract = {NOAA Open Data Dissemination (NODD) makes NOAA environmental data publicly and freely available on Amazon Web Services (AWS), Microsoft Azure (Azure), and Google Cloud Platform (GCP). These data can be accessed by anyone with an internet connection and span key datasets across the Earth system including satellite imagery, radar, weather models and observations, ocean databases, and climate data records. Since its inception, NODD has grown to provide public access to more than 24 PB of NOAA data and can support billions of requests and petabytes of access daily. Stakeholders routinely access more than 5 PB of NODD data every month. NODD continues to grow to support open petabyte-scale Earth system data science in the cloud by onboarding additional NOAA data and exploring performant data formats. Here, we document how this program works with a focus on provenance, key datasets, and use. We also highlight how to access these data with the goal of accelerating use of NOAA resources in the cloud.}, } @article {pmid37718323, year = {2023}, author = {Namazi, F and Ezoji, M and Parmehr, EG}, title = {Paddy Rice mapping in fragmented lands by improved phenology curve and correlation measurements on Sentinel-2 imagery in Google earth engine.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {10}, pages = {1220}, doi = {10.1007/s10661-023-11808-3}, pmid = {37718323}, issn = {1573-2959}, mesh = {*Oryza ; Search Engine ; Environmental Monitoring ; Algorithms ; Water ; }, abstract = {Accurate and timely rice crop mapping is important to address the challenges of food security, water management, disease transmission, and land use change. However, accurate rice crop mapping is difficult due to the presence of mixed pixels in small and fragmented rice fields as well as cloud cover. In this paper, a phenology-based method using Sentinel-2 time series images is presented to solve these problems. First, the improved rice phenology curve is extracted based on Normalized Difference Vegetation Index and Land Surface Water Index time series data of rice fields. Then, correlation was taken between rice phenology curve and time series data of each pixel. The correlation result of each pixel shows the similarity of its time series behavior with the proposed rice phenology curve. In the next step, the maximum correlation value and its occurrence time are used as the feature vectors of each pixel to classification. Since correlation measurement provides data with better separability than its input data, training the classifier can be done with fewer samples and the classification is more accurate. The implementation of the proposed correlation-based algorithm can be done in a parallel computing. All the processes were performed on the Google Earth Engine cloud platform on the time series images of the Sentinel 2. The implementations show the high accuracy of this method.}, } @article {pmid37705635, year = {2023}, author = {Yang, J and Han, J and Wan, Q and Xing, S and Chen, F}, title = {A novel similarity measurement for triangular cloud models based on dual consideration of shape and distance.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1506}, pmid = {37705635}, issn = {2376-5992}, abstract = {It is important to be able to measure the similarity between two uncertain concepts for many real-life AI applications, such as image retrieval, collaborative filtering, risk assessment, and data clustering. Cloud models are important cognitive computing models that show promise in measuring the similarity of uncertain concepts. Here, we aim to address the shortcomings of existing cloud model similarity measurement algorithms, such as poor discrimination ability and unstable measurement results. We propose an EPTCM algorithm based on the triangular fuzzy number EW-type closeness and cloud drop variance, considering the shape and distance similarities of existing cloud models. The experimental results show that the EPTCM algorithm has good recognition and classification accuracy and is more accurate than the existing Likeness comparing method (LICM), overlap-based expectation curve (OECM), fuzzy distance-based similarity (FDCM) and multidimensional similarity cloud model (MSCM) methods. The experimental results also demonstrate that the EPTCM algorithm has successfully overcome the shortcomings of existing algorithms. In summary, the EPTCM method proposed here is effective and feasible to implement.}, } @article {pmid37702950, year = {2024}, author = {Pribec, I and Hachinger, S and Hayek, M and Pringle, GJ and Brüchle, H and Jamitzky, F and Mathias, G}, title = {Efficient and Reliable Data Management for Biomedical Applications.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2716}, number = {}, pages = {383-403}, pmid = {37702950}, issn = {1940-6029}, mesh = {*Data Management ; *Big Data ; Cloud Computing ; Documentation ; Movement ; }, abstract = {This chapter discusses the challenges and requirements of modern Research Data Management (RDM), particularly for biomedical applications in the context of high-performance computing (HPC). The FAIR data principles (Findable, Accessible, Interoperable, Reusable) are of special importance. Data formats, publication platforms, annotation schemata, automated data management and staging, the data infrastructure in HPC centers, file transfer and staging methods in HPC, and the EUDAT components are discussed. Tools and approaches for automated data movement and replication in cross-center workflows are explained, as well as the development of ontologies for structuring and quality-checking of metadata in computational biomedicine. The CompBioMed project is used as a real-world example of implementing these principles and tools in practice. The LEXIS project has built a workflow-execution and data management platform that follows the paradigm of HPC-Cloud convergence for demanding Big Data applications. It is used for orchestrating workflows with YORC, utilizing the data documentation initiative (DDI) and distributed computing resources (DCI). The platform is accessed by a user-friendly LEXIS portal for workflow and data management, making HPC and Cloud Computing significantly more accessible. Checkpointing, duplicate runs, and spare images of the data are used to create resilient workflows. The CompBioMed project is completing the implementation of such a workflow, using data replication and brokering, which will enable urgent computing on exascale platforms.}, } @article {pmid37702940, year = {2024}, author = {Bonde, B}, title = {Edge, Fog, and Cloud Against Disease: The Potential of High-Performance Cloud Computing for Pharma Drug Discovery.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2716}, number = {}, pages = {181-202}, pmid = {37702940}, issn = {1940-6029}, mesh = {Algorithms ; *Artificial Intelligence ; *Cloud Computing ; Drug Discovery ; Software ; }, abstract = {The high-performance computing (HPC) platform for large-scale drug discovery simulation demands significant investment in speciality hardware, maintenance, resource management, and running costs. The rapid growth in computing hardware has made it possible to provide cost-effective, robust, secure, and scalable alternatives to the on-premise (on-prem) HPC via Cloud, Fog, and Edge computing. It has enabled recent state-of-the-art machine learning (ML) and artificial intelligence (AI)-based tools for drug discovery, such as BERT, BARD, AlphaFold2, and GPT. This chapter attempts to overview types of software architectures for developing scientific software or application with deployment agnostic (on-prem to cloud and hybrid) use cases. Furthermore, the chapter aims to outline how the innovation is disrupting the orthodox mindset of monolithic software running on on-prem HPC and provide the paradigm shift landscape to microservices driven application programming (API) and message parsing interface (MPI)-based scientific computing across the distributed, high-available infrastructure. This is coupled with agile DevOps, and good coding practices, low code and no-code application development frameworks for cost-efficient, secure, automated, and robust scientific application life cycle management.}, } @article {pmid37693890, year = {2023}, author = {Zhang, W and Zhang, C and Cao, L and Liang, F and Xie, W and Tao, L and Chen, C and Yang, M and Zhong, L}, title = {Application of digital-intelligence technology in the processing of Chinese materia medica.}, journal = {Frontiers in pharmacology}, volume = {14}, number = {}, pages = {1208055}, pmid = {37693890}, issn = {1663-9812}, abstract = {Processing of Chinese Materia Medica (PCMM) is the concentrated embodiment, which is the core of Chinese unique traditional pharmaceutical technology. The processing includes the preparation steps such as cleansing, cutting and stir-frying, to make certain impacts on the quality and efficacy of Chinese botanical drugs. The rapid development of new computer digital technologies, such as big data analysis, Internet of Things (IoT), blockchain and cloud computing artificial intelligence, has promoted the rapid development of traditional pharmaceutical manufacturing industry with digitalization and intellectualization. In this review, the application of digital intelligence technology in the PCMM was analyzed and discussed, which hopefully promoted the standardization of the process and secured the quality of botanical drugs decoction pieces. Through the intellectualization and the digitization of production, safety and effectiveness of clinical use of traditional Chinese medicine (TCM) decoction pieces were ensured. This review also provided a theoretical basis for further technical upgrading and high-quality development of TCM industry.}, } @article {pmid37693367, year = {2023}, author = {Griffin, AC and Khairat, S and Bailey, SC and Chung, AE}, title = {A chatbot for hypertension self-management support: user-centered design, development, and usability testing.}, journal = {JAMIA open}, volume = {6}, number = {3}, pages = {ooad073}, pmid = {37693367}, issn = {2574-2531}, support = {UM1 TR004406/TR/NCATS NIH HHS/United States ; }, abstract = {OBJECTIVES: Health-related chatbots have demonstrated early promise for improving self-management behaviors but have seldomly been utilized for hypertension. This research focused on the design, development, and usability evaluation of a chatbot for hypertension self-management, called "Medicagent."

MATERIALS AND METHODS: A user-centered design process was used to iteratively design and develop a text-based chatbot using Google Cloud's Dialogflow natural language understanding platform. Then, usability testing sessions were conducted among patients with hypertension. Each session was comprised of: (1) background questionnaires, (2) 10 representative tasks within Medicagent, (3) System Usability Scale (SUS) questionnaire, and (4) a brief semi-structured interview. Sessions were video and audio recorded using Zoom. Qualitative and quantitative analyses were used to assess effectiveness, efficiency, and satisfaction of the chatbot.

RESULTS: Participants (n = 10) completed nearly all tasks (98%, 98/100) and spent an average of 18 min (SD = 10 min) interacting with Medicagent. Only 11 (8.6%) utterances were not successfully mapped to an intent. Medicagent achieved a mean SUS score of 78.8/100, which demonstrated acceptable usability. Several participants had difficulties navigating the conversational interface without menu and back buttons, felt additional information would be useful for redirection when utterances were not recognized, and desired a health professional persona within the chatbot.

DISCUSSION: The text-based chatbot was viewed favorably for assisting with blood pressure and medication-related tasks and had good usability.

CONCLUSION: Flexibility of interaction styles, handling unrecognized utterances gracefully, and having a credible persona were highlighted as design components that may further enrich the user experience of chatbots for hypertension self-management.}, } @article {pmid37692531, year = {2023}, author = {Angelidis, E}, title = {A perspective on large-scale simulation as an enabler for novel biorobotics applications.}, journal = {Frontiers in robotics and AI}, volume = {10}, number = {}, pages = {1102286}, pmid = {37692531}, issn = {2296-9144}, abstract = {Our understanding of the complex mechanisms that power biological intelligence has been greatly enhanced through the explosive growth of large-scale neuroscience and robotics simulation tools that are used by the research community to perform previously infeasible experiments, such as the simulation of the neocortex's circuitry. Nevertheless, simulation falls far from being directly applicable to biorobots due to the large discrepancy between the simulated and the real world. A possible solution for this problem is the further enhancement of existing simulation tools for robotics, AI and neuroscience with multi-physics capabilities. Previously infeasible or difficult to simulate scenarios, such as robots swimming on the water surface, interacting with soft materials, walking on granular materials etc., would be rendered possible within a multi-physics simulation environment designed for robotics. In combination with multi-physics simulation, large-scale simulation tools that integrate multiple simulation modules in a closed-loop manner help address fundamental questions around the organization of neural circuits and the interplay between the brain, body and environment. We analyze existing designs for large-scale simulation running on cloud and HPC infrastructure as well as their shortcomings. Based on this analysis we propose a next-gen modular architecture design based on multi-physics engines, that we believe would greatly benefit biorobotics and AI.}, } @article {pmid37688118, year = {2023}, author = {Urblik, L and Kajati, E and Papcun, P and Zolotova, I}, title = {A Modular Framework for Data Processing at the Edge: Design and Implementation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37688118}, issn = {1424-8220}, support = {APVV-20-0247//Slovak Research and Development Agency/ ; }, abstract = {There is a rapid increase in the number of edge devices in IoT solutions, generating vast amounts of data that need to be processed and analyzed efficiently. Traditional cloud-based architectures can face latency, bandwidth, and privacy challenges when dealing with this data flood. There is currently no unified approach to the creation of edge computing solutions. This work addresses this problem by exploring containerization for data processing solutions at the network's edge. The current approach involves creating a specialized application compatible with the device used. Another approach involves using containerization for deployment and monitoring. The heterogeneity of edge environments would greatly benefit from a universal modular platform. Our proposed edge computing-based framework implements a streaming extract, transform, and load pipeline for data processing and analysis using ZeroMQ as the communication backbone and containerization for scalable deployment. Results demonstrate the effectiveness of the proposed framework, making it suitable for time-sensitive IoT applications.}, } @article {pmid37688051, year = {2023}, author = {Shi, W and Chen, L and Zhu, X}, title = {Task Offloading Decision-Making Algorithm for Vehicular Edge Computing: A Deep-Reinforcement-Learning-Based Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37688051}, issn = {1424-8220}, support = {2022YFB3305500//National Key Research and Development Program of China/ ; 62273089//National Natural Science Foundation of China/ ; 62102080//National Natural Science Foundation of China/ ; BK20210204//Natural Science Foundation of Jiangsu Province/ ; }, abstract = {Efficient task offloading decision is a crucial technology in vehicular edge computing, which aims to fulfill the computational performance demands of complex vehicular tasks with respect to delay and energy consumption while minimizing network resource competition and consumption. Conventional distributed task offloading decisions rely solely on the local state of the vehicle, failing to optimize the utilization of the server's resources to its fullest potential. In addition, the mobility aspect of vehicles is often neglected in these decisions. In this paper, a cloud-edge-vehicle three-tier vehicular edge computing (VEC) system is proposed, where vehicles partially offload their computing tasks to edge or cloud servers while keeping the remaining tasks local to the vehicle terminals. Under the restrictions of vehicle mobility and discrete variables, task scheduling and task offloading proportion are jointly optimized with the objective of minimizing the total system cost. Considering the non-convexity, high-dimensional complex state and continuous action space requirements of the optimization problem, we propose a task offloading decision-making algorithm based on deep deterministic policy gradient (TODM_DDPG). TODM_DDPG algorithm adopts the actor-critic framework in which the actor network outputs floating point numbers to represent deterministic policy, while the critic network evaluates the action output by the actor network, and adjusts the network evaluation policy according to the rewards with the environment to maximize the long-term reward. To explore the algorithm performance, this conduct parameter setting experiments to correct the algorithm core hyper-parameters and select the optimal combination of parameters. In addition, in order to verify algorithm performance, we also carry out a series of comparative experiments with baseline algorithms. The results demonstrate that in terms of reducing system costs, the proposed algorithm outperforms the compared baseline algorithm, such as the deep Q network (DQN) and the actor-critic (AC), and the performance is improved by about 13% on average.}, } @article {pmid37687890, year = {2023}, author = {Zhou, W and Qian, Z and Ni, X and Tang, Y and Guo, H and Zhuang, S}, title = {Dense Convolutional Neural Network for Identification of Raman Spectra.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37687890}, issn = {1424-8220}, support = {21010502900//Science and Technology Commission of Shanghai Municipality/ ; }, abstract = {The rapid development of cloud computing and deep learning makes the intelligent modes of applications widespread in various fields. The identification of Raman spectra can be realized in the cloud, due to its powerful computing, abundant spectral databases and advanced algorithms. Thus, it can reduce the dependence on the performance of the terminal instruments. However, the complexity of the detection environment can cause great interferences, which might significantly decrease the identification accuracies of algorithms. In this paper, a deep learning algorithm based on the Dense network has been proposed to satisfy the realization of this vision. The proposed Dense convolutional neural network has a very deep structure of over 40 layers and plenty of parameters to adjust the weight of different wavebands. In the kernel Dense blocks part of the network, it has a feed-forward fashion of connection for each layer to every other layer. It can alleviate the gradient vanishing or explosion problems, strengthen feature propagations, encourage feature reuses and enhance training efficiency. The network's special architecture mitigates noise interferences and ensures precise identification. The Dense network shows more accuracy and robustness compared to other CNN-based algorithms. We set up a database of 1600 Raman spectra consisting of 32 different types of liquid chemicals. They are detected using different postures as examples of interfered Raman spectra. In the 50 repeated training and testing sets, the Dense network can achieve a weighted accuracy of 99.99%. We have also tested the RRUFF database and the Dense network has a good performance. The proposed approach advances cloud-enabled Raman spectra identification, offering improved accuracy and adaptability for diverse identification tasks.}, } @article {pmid37687870, year = {2023}, author = {Sangaiah, AK and Javadpour, A and Pinto, P and Chiroma, H and Gabralla, LA}, title = {Cost-Effective Resources for Computing Approximation Queries in Mobile Cloud Computing Infrastructure.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37687870}, issn = {1424-8220}, abstract = {Answering a query through a peer-to-peer database presents one of the greatest challenges due to the high cost and time required to obtain a comprehensive response. Consequently, these systems were primarily designed to handle approximation queries. In our research, the primary objective was to develop an intelligent system capable of responding to approximate set-value inquiries. This paper explores the use of particle optimization to enhance the system's intelligence. In contrast to previous studies, our proposed method avoids the use of sampling. Despite the utilization of the best sampling methods, there remains a possibility of error, making it difficult to guarantee accuracy. Nonetheless, achieving a certain degree of accuracy is crucial in handling approximate queries. Various factors influence the accuracy of sampling procedures. The results of our studies indicate that the suggested method has demonstrated improvements in terms of the number of queries issued, the number of peers examined, and its execution time, which is significantly faster than the flood approach. Answering queries poses one of the most arduous challenges in peer-to-peer databases, as obtaining a complete answer is both costly and time-consuming. Consequently, approximation queries have been adopted as a solution in these systems. Our research evaluated several methods, including flood algorithms, parallel diffusion algorithms, and ISM algorithms. When it comes to query transmission, the proposed method exhibits superior cost-effectiveness and execution times.}, } @article {pmid37687784, year = {2023}, author = {Alsemmeari, RA and Dahab, MY and Alturki, B and Alsulami, AA and Alsini, R}, title = {Towards an Effective Service Allocation in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {17}, pages = {}, pmid = {37687784}, issn = {1424-8220}, support = {IFPIP: 1033-611-1443//Deanship of Scientific Research (DSR) at King Abdulaziz University, Jeddah/ ; }, abstract = {The Internet of Things (IoT) generates a large volume of data whenever devices are interconnected and exchange data across a network. Consequently, a variety of services with diverse needs arises, including capacity requirements, data quality, and latency demands. These services operate on fog computing devices, which are limited in power and bandwidth compared to the cloud. The primary challenge lies in determining the optimal location for service implementation: in the fog, in the cloud, or in a hybrid setup. This paper introduces an efficient allocation technique that moves processing closer to the network's fog side. It explores the optimal allocation of devices and services while maintaining resource utilization within an IoT architecture. The paper also examines the significance of allocating services to devices and optimizing resource utilization in fog computing. In IoT scenarios, where a wide range of services and devices coexist, it becomes crucial to effectively assign services to devices. We propose priority-based service allocation (PSA) and sort-based service allocation (SSA) techniques, which are employed to determine the optimal order for the utilizing devices to perform different services. Experimental results demonstrate that our proposed technique reduces data communication over the network by 88%, which is achieved by allocating most services locally in the fog. We increased the distribution of services to fog devices by 96%, while simultaneously minimizing the wastage of fog resources.}, } @article {pmid37679146, year = {2023}, author = {Tian, L and Shang, F and Gan, C}, title = {Optimal control analysis of malware propagation in cloud environments.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {8}, pages = {14502-14517}, doi = {10.3934/mbe.2023649}, pmid = {37679146}, issn = {1551-0018}, abstract = {Cloud computing has become a widespread technology that delivers a broad range of services across various industries globally. One of the crucial features of cloud infrastructure is virtual machine (VM) migration, which plays a pivotal role in resource allocation flexibility and reducing energy consumption, but it also provides convenience for the fast propagation of malware. To tackle the challenge of curtailing the proliferation of malware in the cloud, this paper proposes an effective strategy based on optimal dynamic immunization using a controlled dynamical model. The objective of the research is to identify the most efficient way of dynamically immunizing the cloud to minimize the spread of malware. To achieve this, we define the control strategy and loss and give the corresponding optimal control problem. The optimal control analysis of the controlled dynamical model is examined theoretically and experimentally. Finally, the theoretical and experimental results both demonstrate that the optimal strategy can minimize the incidence of infections at a reasonable loss.}, } @article {pmid37676890, year = {2023}, author = {Niu, S and Dong, R and Fang, L}, title = {Certificateless broadcast signcryption scheme supporting equality test in smart grid.}, journal = {PloS one}, volume = {18}, number = {9}, pages = {e0290666}, pmid = {37676890}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; Internet ; Privacy ; Trust ; }, abstract = {With the development of cloud computing and the application of Internet of Things (IoT) in the smart grid, a massive amount of sensitive data is produced by the terminal equipment. This vast amount of data is subject to various attacks during transmission, from which users must be protected. However, most of the existing schemes require a large amount of network bandwidth resources and cannot ensure the receiver's anonymity. To solve these shortcomings, we construct a broadcast signcryption scheme supporting equality test based on certificateless cryptosystem. The scheme employs a symmetric encryption algorithm to improve encryption and transmission efficiency; The Lagrange interpolation theorem is used to encrypt the user's identity to ensure the privacy preservation of terminal devices; And a trusted third party is used to eliminate duplicated ciphertext for identical messages using an equality test, resulting in efficient network bandwidth utilization. Experimental analysis shows that our work has greater advantages in the field of practical broadcast services.}, } @article {pmid37672552, year = {2023}, author = {, }, title = {Retraction: Construction and optimization of inventory management system via cloud-edge collaborative computing in supply chain environment in the Internet of Things era.}, journal = {PloS one}, volume = {18}, number = {9}, pages = {e0291318}, pmid = {37672552}, issn = {1932-6203}, } @article {pmid37669969, year = {2023}, author = {Zhao, Y and Ye, H}, title = {Power system low delay resource scheduling model based on edge computing node.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {14634}, pmid = {37669969}, issn = {2045-2322}, abstract = {As more and more intelligent devices are put into the field of power system, the number of connected nodes in the power network is increasing exponentially. Under the background of smart grid cooperation across power areas and voltage levels, how to effectively process the massive data generated by smart grid has become a difficult problem to ensure the stable operation of power system. In the complex calculation process of power system, the operation time of complex calculation can not be shortened to the greatest extent, and the execution efficiency can not be improved. Therefore, this paper proposes a two-phase heuristic algorithm based on edge computing. In solving the virtual machine sequence problem, for the main partition and the coordination partition, the critical path algorithm is used to sort the virtual machines to minimize the computing time. For other sub-partitions, the minimum cut algorithm is used to reduce the traffic interaction of each sub-partition. In the second stage of the virtual machine placement process, an improved best fit algorithm is used to avoid poor placement of virtual machines across physical machine configurations, resulting in increased computing time. Through the experiment on the test system, it is proved that the calculation efficiency is improved when the coordinated partition calculation belongs to the target partition. Because the edge computing is closer to the data source, it can save more data transmission time than cloud computing. This paper provides an effective algorithm for power system distributed computing in virtual machine configuration in edge computing, which can effectively reduce the computing time of power system and improve the efficiency of system resource utilization.}, } @article {pmid37662679, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Construction and Clinical Application Effect of General Surgery Patient-Oriented Nursing Information Platform Using Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9784736}, pmid = {37662679}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/8273701.].}, } @article {pmid37649809, year = {2023}, author = {Zainudin, H and Koufos, K and Lee, G and Jiang, L and Dianati, M}, title = {Impact analysis of cooperative perception on the performance of automated driving in unsignalized roundabouts.}, journal = {Frontiers in robotics and AI}, volume = {10}, number = {}, pages = {1164950}, pmid = {37649809}, issn = {2296-9144}, abstract = {This paper reports the implementation and results of a simulation-based analysis of the impact of cloud/edge-enabled cooperative perception on the performance of automated driving in unsignalized roundabouts. This is achieved by comparing the performance of automated driving assisted by cooperative perception to that of a baseline system, where the automated vehicle relies only on its onboard sensing and perception for motion planning and control. The paper first provides the descriptions of the implemented simulation model, which integrates the SUMO road traffic generator and CARLA simulator. This includes descriptions of both the baseline and cooperative perception-assisted automated driving systems. We then define a set of relevant key performance indicators for traffic efficiency, safety, and ride comfort, as well as simulation scenarios to collect relevant data for our analysis. This is followed by the description of simulation scenarios, presentation of the results, and discussions of the insights learned from the results.}, } @article {pmid37631822, year = {2023}, author = {Almudayni, Z and Soh, B and Li, A}, title = {Enhancing Energy Efficiency and Fast Decision Making for Medical Sensors in Healthcare Systems: An Overview and Novel Proposal.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631822}, issn = {1424-8220}, mesh = {*Conservation of Energy Resources ; Physical Phenomena ; *Algorithms ; Industry ; Decision Making ; }, abstract = {In the realm of the Internet of Things (IoT), a network of sensors and actuators collaborates to fulfill specific tasks. As the demand for IoT networks continues to rise, it becomes crucial to ensure the stability of this technology and adapt it for further expansion. Through an analysis of related works, including the feedback-based optimized fuzzy scheduling approach (FOFSA) algorithm, the adaptive task allocation technique (ATAT), and the osmosis load balancing algorithm (OLB), we identify their limitations in achieving optimal energy efficiency and fast decision making. To address these limitations, this research introduces a novel approach to enhance the processing time and energy efficiency of IoT networks. The proposed approach achieves this by efficiently allocating IoT data resources in the Mist layer during the early stages. We apply the approach to our proposed system known as the Mist-based fuzzy healthcare system (MFHS) that demonstrates promising potential to overcome the existing challenges and pave the way for the efficient industrial Internet of healthcare things (IIoHT) of the future.}, } @article {pmid37631769, year = {2023}, author = {Hamzei, M and Khandagh, S and Jafari Navimipour, N}, title = {A Quality-of-Service-Aware Service Composition Method in the Internet of Things Using a Multi-Objective Fuzzy-Based Hybrid Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631769}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) represents a cutting-edge technical domain, encompassing billions of intelligent objects capable of bridging the physical and virtual worlds across various locations. IoT services are responsible for delivering essential functionalities. In this dynamic and interconnected IoT landscape, providing high-quality services is paramount to enhancing user experiences and optimizing system efficiency. Service composition techniques come into play to address user requests in IoT applications, allowing various IoT services to collaborate seamlessly. Considering the resource limitations of IoT devices, they often leverage cloud infrastructures to overcome technological constraints, benefiting from unlimited resources and capabilities. Moreover, the emergence of fog computing has gained prominence, facilitating IoT application processing in edge networks closer to IoT sensors and effectively reducing delays inherent in cloud data centers. In this context, our study proposes a cloud-/fog-based service composition for IoT, introducing a novel fuzzy-based hybrid algorithm. This algorithm ingeniously combines Ant Colony Optimization (ACO) and Artificial Bee Colony (ABC) optimization algorithms, taking into account energy consumption and Quality of Service (QoS) factors during the service selection process. By leveraging this fuzzy-based hybrid algorithm, our approach aims to revolutionize service composition in IoT environments by empowering intelligent decision-making capabilities and ensuring optimal user satisfaction. Our experimental results demonstrate the effectiveness of the proposed strategy in successfully fulfilling service composition requests by identifying suitable services. When compared to recently introduced methods, our hybrid approach yields significant benefits. On average, it reduces energy consumption by 17.11%, enhances availability and reliability by 8.27% and 4.52%, respectively, and improves the average cost by 21.56%.}, } @article {pmid37631746, year = {2023}, author = {Alasmari, MK and Alwakeel, SS and Alohali, YA}, title = {A Multi-Classifiers Based Algorithm for Energy Efficient Tasks Offloading in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631746}, issn = {1424-8220}, abstract = {The IoT has connected a vast number of devices on a massive internet scale. With the rapid increase in devices and data, offloading tasks from IoT devices to remote Cloud data centers becomes unproductive and costly. Optimizing energy consumption in IoT devices while meeting deadlines and data constraints is challenging. Fog Computing aids efficient IoT task processing with proximity to nodes and lower service delay. Cloud task offloading occurs frequently due to Fog Computing's limited resources compared to remote Cloud, necessitating improved techniques for accurate categorization and distribution of IoT device task offloading in a hybrid IoT, Fog, and Cloud paradigm. This article explores relevant offloading strategies in Fog Computing and proposes MCEETO, an intelligent energy-aware allocation strategy, utilizing a multi-classifier-based algorithm for efficient task offloading by selecting optimal Fog Devices (FDs) for module placement. MCEETO decision parameters include task attributes, Fog node characteristics, network latency, and bandwidth. The method is evaluated using the iFogSim simulator and compared with edge-ward and Cloud-only strategies. The proposed solution is more energy-efficient, saving around 11.36% compared to Cloud-only and approximately 9.30% compared to the edge-ward strategy. Additionally, the MCEETO algorithm achieved a 67% and 96% reduction in network usage compared to both strategies.}, } @article {pmid37631678, year = {2023}, author = {Ashraf, M and Shiraz, M and Abbasi, A and Alqahtani, O and Badshah, G and Lasisi, A}, title = {Microservice Application Scheduling in Multi-Tiered Fog-Computing-Enabled IoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631678}, issn = {1424-8220}, support = {Grant number will be provide later.//Funder details will be provided later./ ; RGP2/394/44//Deanship of Scientific Research at King Khalid University/ ; }, abstract = {Fog computing extends mobile cloud computing facilities at the network edge, yielding low-latency application execution. To supplement cloud services, computationally intensive applications can be distributed on resource-constrained mobile devices by leveraging underutilized nearby resources to meet the latency and bandwidth requirements of application execution. Building upon this premise, it is necessary to investigate idle or underutilized resources that are present at the edge of the network. The utilization of a microservice architecture in IoT application development, with its increased granularity in service breakdown, provides opportunities for improved scalability, maintainability, and extensibility. In this research, the proposed schedule tackles the latency requirements of applications by identifying suitable upward migration of microservices within a multi-tiered fog computing infrastructure. This approach enables optimal utilization of network edge resources. Experimental validation is performed using the iFogSim2 simulator and the results are compared with existing baselines. The results demonstrate that compared to the edgewards approach, our proposed technique significantly improves the latency requirements of application execution, network usage, and energy consumption by 66.92%, 69.83%, and 4.16%, respectively.}, } @article {pmid37631666, year = {2023}, author = {Xiong, H and Yu, B and Yi, Q and He, C}, title = {End-Cloud Collaboration Navigation Planning Method for Unmanned Aerial Vehicles Used in Small Areas.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {16}, pages = {}, pmid = {37631666}, issn = {1424-8220}, abstract = {Unmanned aerial vehicle (UAV) collaboration has become the main means of indoor and outdoor regional search, railway patrol, and other tasks, and navigation planning is one of the key, albeit difficult, technologies. The purpose of UAV navigation planning is to plan reasonable trajectories for UAVs to avoid obstacles and reach the task area. Essentially, it is a complex optimization problem that requires the use of navigation planning algorithms to search for path-point solutions that meet the requirements under the guide of objective functions and constraints. At present, there are autonomous navigation modes of UAVs relying on airborne sensors and navigation control modes of UAVs relying on ground control stations (GCSs). However, due to the limitation of airborne processor computing power, and background command and control communication delay, a navigation planning method that takes into account accuracy and timeliness is needed. First, the navigation planning architecture of UAVs of end-cloud collaboration was designed. Then, the background cloud navigation planning algorithm of UAVs was designed based on the improved particle swarm optimization (PSO). Next, the navigation control algorithm of the UAV terminals was designed based on the multi-objective hybrid swarm intelligent optimization algorithm. Finally, the computer simulation and actual indoor-environment flight test based on small rotor UAVs were designed and conducted. The results showed that the proposed method is correct and feasible, and can improve the effectiveness and efficiency of navigation planning of UAVs.}, } @article {pmid37630088, year = {2023}, author = {Chen, J and Qiu, L and Zhu, Z and Sun, N and Huang, H and Ip, WH and Yung, KL}, title = {An Adaptive Infrared Small-Target-Detection Fusion Algorithm Based on Multiscale Local Gradient Contrast for Remote Sensing.}, journal = {Micromachines}, volume = {14}, number = {8}, pages = {}, pmid = {37630088}, issn = {2072-666X}, support = {2022296//Youth Innovation Promition Association of the Chinese Academy of Sciences/ ; }, abstract = {Space vehicles such as missiles and aircraft have relatively long tracking distances. Infrared (IR) detectors are used for small target detection. The target presents point target characteristics, which lack contour, shape, and texture information. The high-brightness cloud edge and high noise have an impact on the detection of small targets because of the complex background of the sky and ground environment. Traditional template-based filtering and local contrast-based methods do not distinguish between different complex background environments, and their strategy is to unify small-target template detection or to use absolute contrast differences; so, it is easy to have a high false alarm rate. It is necessary to study the detection and tracking methods in complex backgrounds and low signal-to-clutter ratios (SCRs). We use the complexity difference as a prior condition for detection in the background of thick clouds and ground highlight buildings. Then, we use the spatial domain filtering and improved local contrast joint algorithm to obtain a significant area. We also provide a new definition of gradient uniformity through the improvement of the local gradient method, which could further enhance the target contrast. It is important to distinguish between small targets, highlighted background edges, and noise. Furthermore, the method can be used for parallel computing. Compared with the traditional space filtering algorithm or local contrast algorithm, the flexible fusion strategy can achieve the rapid detection of small targets with a higher signal-to-clutter ratio gain (SCRG) and background suppression factor (BSF).}, } @article {pmid37627775, year = {2023}, author = {Choi, W and Choi, T and Heo, S}, title = {A Comparative Study of Automated Machine Learning Platforms for Exercise Anthropometry-Based Typology Analysis: Performance Evaluation of AWS SageMaker, GCP VertexAI, and MS Azure.}, journal = {Bioengineering (Basel, Switzerland)}, volume = {10}, number = {8}, pages = {}, pmid = {37627775}, issn = {2306-5354}, support = {INNO-2022-01//National Research Foundation of Korea/ ; }, abstract = {The increasing prevalence of machine learning (ML) and automated machine learning (AutoML) applications across diverse industries necessitates rigorous comparative evaluations of their predictive accuracies under various computational environments. The purpose of this research was to compare and analyze the predictive accuracy of several machine learning algorithms, including RNNs, LSTMs, GRUs, XGBoost, and LightGBM, when implemented on different platforms such as Google Colab Pro, AWS SageMaker, GCP Vertex AI, and MS Azure. The predictive performance of each model within its respective environment was assessed using performance metrics such as accuracy, precision, recall, F1-score, and log loss. All algorithms were trained on the same dataset and implemented on their specified platforms to ensure consistent comparisons. The dataset used in this study comprised fitness images, encompassing 41 exercise types and totaling 6 million samples. These images were acquired from AI-hub, and joint coordinate values (x, y, z) were extracted utilizing the Mediapipe library. The extracted values were then stored in a CSV format. Among the ML algorithms, LSTM demonstrated the highest performance, achieving an accuracy of 73.75%, precision of 74.55%, recall of 73.68%, F1-score of 73.11%, and a log loss of 0.71. Conversely, among the AutoML algorithms, XGBoost performed exceptionally well on AWS SageMaker, boasting an accuracy of 99.6%, precision of 99.8%, recall of 99.2%, F1-score of 99.5%, and a log loss of 0.014. On the other hand, LightGBM exhibited the poorest performance on MS Azure, achieving an accuracy of 84.2%, precision of 82.2%, recall of 81.8%, F1-score of 81.5%, and a log loss of 1.176. The unnamed algorithm implemented on GCP Vertex AI showcased relatively favorable results, with an accuracy of 89.9%, precision of 94.2%, recall of 88.4%, F1-score of 91.2%, and a log loss of 0.268. Despite LightGBM's lackluster performance on MS Azure, the GRU implemented in Google Colab Pro displayed encouraging results, yielding an accuracy of 88.2%, precision of 88.5%, recall of 88.1%, F1-score of 88.4%, and a log loss of 0.44. Overall, this study revealed significant variations in performance across different algorithms and platforms. Particularly, AWS SageMaker's implementation of XGBoost outperformed other configurations, highlighting the importance of carefully considering the choice of algorithm and computational environment in predictive tasks. To gain a comprehensive understanding of the factors contributing to these performance discrepancies, further investigations are recommended.}, } @article {pmid37624874, year = {2022}, author = {Hoang, V and Hung, LH and Perez, D and Deng, H and Schooley, R and Arumilli, N and Yeung, KY and Lloyd, W}, title = {Container Profiler: Profiling resource utilization of containerized big data pipelines.}, journal = {GigaScience}, volume = {12}, number = {}, pages = {}, pmid = {37624874}, issn = {2047-217X}, support = {R01 GM126019/GM/NIGMS NIH HHS/United States ; R03 AI159286/AI/NIAID NIH HHS/United States ; U24 HG012674/HG/NHGRI NIH HHS/United States ; }, mesh = {*Big Data ; *Benchmarking ; Computational Biology ; Software ; Time Factors ; }, abstract = {BACKGROUND: This article presents the Container Profiler, a software tool that measures and records the resource usage of any containerized task. Our tool profiles the CPU, memory, disk, and network utilization of containerized tasks collecting over 60 Linux operating system metrics at the virtual machine, container, and process levels. The Container Profiler supports performing time-series profiling at a configurable sampling interval to enable continuous monitoring of the resources consumed by containerized tasks and pipelines.

RESULTS: To investigate the utility of the Container Profiler, we profile the resource utilization requirements of a multistage bioinformatics analytical pipeline (RNA sequencing using unique molecular identifiers). We examine profiling metrics to assess patterns of CPU, disk, and network resource utilization across the different stages of the pipeline. We also quantify the profiling overhead of our Container Profiler tool to assess the impact of profiling a running pipeline with different levels of profiling granularity, verifying that impacts are negligible.

CONCLUSIONS: The Container Profiler provides a useful tool that can be used to continuously monitor the resource consumption of long and complex containerized applications that run locally or on the cloud. This can help identify bottlenecks where more resources are needed to improve performance.}, } @article {pmid37624836, year = {2023}, author = {Meri, A and Hasan, MK and Dauwed, M and Jarrar, M and Aldujaili, A and Al-Bsheish, M and Shehab, S and Kareem, HM}, title = {Organizational and behavioral attributes' roles in adopting cloud services: An empirical study in the healthcare industry.}, journal = {PloS one}, volume = {18}, number = {8}, pages = {e0290654}, pmid = {37624836}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Health Care Sector ; Behavior Control ; Internet ; Data Analysis ; }, abstract = {The need for cloud services has been raised globally to provide a platform for healthcare providers to efficiently manage their citizens' health records and thus provide treatment remotely. In Iraq, the healthcare records of public hospitals are increasing progressively with poor digital management. While recent works indicate cloud computing as a platform for all sectors globally, a lack of empirical evidence demands a comprehensive investigation to identify the significant factors that influence the utilization of cloud health computing. Here we provide a cost-effective, modular, and computationally efficient model of utilizing cloud computing based on the organization theory and the theory of reasoned action perspectives. A total of 105 key informant data were further analyzed. The partial least square structural equation modeling was used for data analysis to explore the effect of organizational structure variables on healthcare information technicians' behaviors to utilize cloud services. Empirical results revealed that Internet networks, software modularity, hardware modularity, and training availability significantly influence information technicians' behavioral control and confirmation. Furthermore, these factors positively impacted their utilization of cloud systems, while behavioral control had no significant effect. The importance-performance map analysis further confirms that these factors exhibit high importance in shaping user utilization. Our findings can provide a comprehensive and unified guide to policymakers in the healthcare industry by focusing on the significant factors in organizational and behavioral contexts to engage health information technicians in the development and implementation phases.}, } @article {pmid37623505, year = {2023}, author = {Gazerani, P}, title = {Intelligent Digital Twins for Personalized Migraine Care.}, journal = {Journal of personalized medicine}, volume = {13}, number = {8}, pages = {}, pmid = {37623505}, issn = {2075-4426}, abstract = {Intelligent digital twins closely resemble their real-life counterparts. In health and medical care, they enable the real-time monitoring of patients, whereby large amounts of data can be collected to produce actionable information. These powerful tools are constructed with the aid of artificial intelligence, machine learning, and deep learning; the Internet of Things; and cloud computing to collect a diverse range of digital data (e.g., from digital patient journals, wearable sensors, and digitized monitoring equipment or processes), which can provide information on the health conditions and therapeutic responses of their physical twins. Intelligent digital twins can enable data-driven clinical decision making and advance the realization of personalized care. Migraines are a highly prevalent and complex neurological disorder affecting people of all ages, genders, and geographical locations. It is ranked among the top disabling diseases, with substantial negative personal and societal impacts, but the current treatment strategies are suboptimal. Personalized care for migraines has been suggested to optimize their treatment. The implementation of intelligent digital twins for migraine care can theoretically be beneficial in supporting patient-centric care management. It is also expected that the implementation of intelligent digital twins will reduce costs in the long run and enhance treatment effectiveness. This study briefly reviews the concept of digital twins and the available literature on digital twins for health disorders such as neurological diseases. Based on these, the potential construction and utility of digital twins for migraines will then be presented. The potential and challenges when implementing intelligent digital twins for the future management of migraines are also discussed.}, } @article {pmid37621906, year = {2023}, author = {Chemistry, IJOA}, title = {Retracted: Residential Environment Pollution Monitoring System Based on Cloud Computing and Internet of Things.}, journal = {International journal of analytical chemistry}, volume = {2023}, number = {}, pages = {9858523}, pmid = {37621906}, issn = {1687-8760}, abstract = {[This retracts the article DOI: 10.1155/2022/1013300.].}, } @article {pmid37602873, year = {2023}, author = {Ahmed, MW and Hossainy, SJ and Khaliduzzaman, A and Emmert, JL and Kamruzzaman, M}, title = {Non-destructive optical sensing technologies for advancing the egg industry toward Industry 4.0: A review.}, journal = {Comprehensive reviews in food science and food safety}, volume = {22}, number = {6}, pages = {4378-4403}, doi = {10.1111/1541-4337.13227}, pmid = {37602873}, issn = {1541-4337}, mesh = {Animals ; Humans ; *Artificial Intelligence ; Quality Control ; *Animal Welfare ; Big Data ; }, abstract = {The egg is considered one of the best sources of dietary protein, and has an important role in human growth and development. With the increase in the world's population, per capita egg consumption is also increasing. Ground-breaking technological developments have led to numerous inventions like the Internet of Things (IoT), various optical sensors, robotics, artificial intelligence (AI), big data, and cloud computing, transforming the conventional industry into a smart and sustainable egg industry, also known as Egg Industry 4.0 (EI 4.0). The EI 4.0 concept has the potential to improve automation, enhance biosecurity, promote the safeguarding of animal welfare, increase intelligent grading and quality inspection, and increase efficiency. For a sustainable Industry 4.0 transformation, it is important to analyze available technologies, the latest research, existing limitations, and prospects. This review examines the existing non-destructive optical sensing technologies for the egg industry. It provides information and insights on the different components of EI 4.0, including emerging EI 4.0 technologies for egg production, quality inspection, and grading. Furthermore, drawbacks of current EI 4.0 technologies, potential workarounds, and future trends were critically analyzed. This review can help policymakers, industrialists, and academicians to better understand the integration of non-destructive technologies and automation. This integration has the potential to increase productivity, improve quality control, and optimize resource management toward sustainable development of the egg industry.}, } @article {pmid37593602, year = {2023}, author = {Rodrigues de Almeida, C and Garcia, N and Campos, JC and Alírio, J and Arenas-Castro, S and Gonçalves, A and Sillero, N and Teodoro, AC}, title = {Time-series analyses of land surface temperature changes with Google Earth Engine in a mountainous region.}, journal = {Heliyon}, volume = {9}, number = {8}, pages = {e18846}, pmid = {37593602}, issn = {2405-8440}, abstract = {Studying changes in temperature is fundamental for understanding its interactions with the environment and biodiversity. However, studies in mountainous areas are few, due to their complex formation and the difficulty of obtaining local data. We analysed changes in temperature over time in Montesinho Natural Park (MNP) (Bragança, Portugal), an important conservation area due to its high level of biodiversity. Specifically, we aimed to analyse: i) whether temperature increased in MNP over time, ii) what environmental factors influence the Land Surface Temperature (LST), and iii) whether vegetation is related to changes in temperature. We used annual summer and winter mean data acquired from the Moderate-Resolution Imaging Spectroradiometer (MODIS) datasets/products (e.g. LST, gathered at four different times: 11am, 1pm, 10pm and 2am, Enhance vegetation index - EVI, and Evapotranspiration - ET), available on the cloud-based platform Google Earth Engine between 2003 and 2021). We analysed the dynamics of the temporal trend patterns between the LST and local thermal data (from a weather station) by correlations; the trends in LST over time with the Mann-Kendall trend test; and the stability of hot spots and cold spots of LST with Local Statistics of Spatial Association (LISA) tests. The temporal trend patterns between LST and Air Temperature (Tair) data were very similar (ρ > 0.7). The temperature in the MNP remained stable over time during summer but increased during winter nights. The biophysical indices were strongly correlated with the summer LST at 11am and 1pm. The LISA results identified hot and cold zones that remained stable over time. The remote-sensed data proved to be efficient in measuring changes in temperature over time.}, } @article {pmid37593394, year = {2023}, author = {Environmental And Public Health, JO}, title = {Retracted: Sport Resource Classification Algorithm for Health Promotion Based on Cloud Computing: Rhythmic Gymnastics' Example.}, journal = {Journal of environmental and public health}, volume = {2023}, number = {}, pages = {9831318}, pmid = {37593394}, issn = {1687-9813}, abstract = {[This retracts the article DOI: 10.1155/2022/2587169.].}, } @article {pmid37593082, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Computer Security Issues and Legal System Based on Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9892354}, pmid = {37593082}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/8112212.].}, } @article {pmid37587979, year = {2023}, author = {Alomair, L and Abolfotouh, MA}, title = {Awareness and Predictors of the Use of Bioinformatics in Genome Research in Saudi Arabia.}, journal = {International journal of general medicine}, volume = {16}, number = {}, pages = {3413-3425}, pmid = {37587979}, issn = {1178-7074}, abstract = {BACKGROUND: With the advances in genomics research, many countries still need more bioinformatics skills. This study aimed to assess the levels of awareness of bioinformatics and predictors of its use in genomics research among scientists in Saudi Arabia.

METHODS: In a cross-sectional survey, 309 scientists of different biological and biomedical specialties were subjected to a previously validated e-questionnaire to collect data on (1) Knowledge about bioinformatics programming languages and tools, (2) Attitude toward acceptance of bioinformatics resources in genome-related research, and (3) The pattern of information-seeking to online bioinformatics resources. Logistic regression analysis was applied to identify the predictors of using bioinformatics in research. Significance was set at p<0.05.

RESULTS: More than one-half (248, 56.4%) of all scientists reported a lack of bioinformatics knowledge. Most participants had a neutral attitude toward bioinformatics (295, 95.4%). The barriers facing acceptance of bioinformatics tools reported were; lack of training (210, 67.9%), insufficient support (180, 58.2%), and complexity of software (138, 44.6%). The limited experience was reported in; having one or more bioinformatics tools (98, 31.7%), using a supercomputer in their research inside (44, 14.2%) and outside Saudi Arabia (55, 17.8%), the need for developing a program to solve a biological problem (129, 41.7%), working in one or more fields of bioinformatics (93, 30.1%), using web applications (112, 36.2%), and using programming languages (102, 33.0%). Significant predictors of conducting genomics research were; younger scientists (p=0.039), Ph.D. education (p=0.003), more than five years of experience (p<0.05), previous training (p<0.001), and higher bioinformatics knowledge scores (p<0.001).

CONCLUSION: The study revealed a short knowledge, a neutral attitude, a lack of resources, and limited use of bioinformatics resources in genomics research. Education and training during each education level and during the job is recommended. Cloud-based resources may help scientists do research using publicly available Omics data. Further studies are necessary to evaluate collaboration among bioinformatics software developers and biologists.}, } @article {pmid37586146, year = {2023}, author = {Manson, EN and Hasford, F and Trauernicht, C and Ige, TA and Inkoom, S and Inyang, S and Samba, O and Khelassi-Toutaoui, N and Lazarus, G and Sosu, EK and Pokoo-Aikins, M and Stoeva, M}, title = {Africa's readiness for artificial intelligence in clinical radiotherapy delivery: Medical physicists to lead the way.}, journal = {Physica medica : PM : an international journal devoted to the applications of physics to medicine and biology : official journal of the Italian Association of Biomedical Physics (AIFB)}, volume = {113}, number = {}, pages = {102653}, doi = {10.1016/j.ejmp.2023.102653}, pmid = {37586146}, issn = {1724-191X}, mesh = {Humans ; *Artificial Intelligence ; *Radiation Oncology ; Machine Learning ; Curriculum ; Africa ; }, abstract = {BACKGROUND: There have been several proposals by researchers for the introduction of Artificial Intelligence (AI) technology due to its promising role in radiotherapy practice. However, prior to the introduction of the technology, there are certain general recommendations that must be achieved. Also, the current challenges of AI must be addressed. In this review, we assess how Africa is prepared for the integration of AI technology into radiotherapy service delivery.

METHODS: To assess the readiness of Africa for integration of AI in radiotherapy services delivery, a narrative review of the available literature from PubMed, Science Direct, Google Scholar, and Scopus was conducted in the English language using search terms such as Artificial Intelligence, Radiotherapy in Africa, Machine Learning, Deep Learning, and Quality Assurance.

RESULTS: We identified a number of issues that could limit the successful integration of AI technology into radiotherapy practice. The major issues include insufficient data for training and validation of AI models, lack of educational curriculum for AI radiotherapy-related courses, no/limited AI teaching professionals, funding, and lack of AI technology and resources. Solutions identified to facilitate smooth implementation of the technology into radiotherapy practices within the region include: creating an accessible national data bank, integrating AI radiotherapy training programs into Africa's educational curriculum, investing in AI technology and resources such as electronic health records and cloud storage, and creation of legal laws and policies to support the use of the technology. These identified solutions need to be implemented on the background of creating awareness among health workers within the radiotherapy space.

CONCLUSION: The challenges identified in this review are common among all the geographical regions in the African continent. Therefore, all institutions offering radiotherapy education and training programs, management of the medical centers for radiotherapy and oncology, national and regional professional bodies for medical physics, ministries of health, governments, and relevant stakeholders must take keen interest and work together to achieve this goal.}, } @article {pmid37579550, year = {2023}, author = {Aminizadeh, S and Heidari, A and Toumaj, S and Darbandi, M and Navimipour, NJ and Rezaei, M and Talebi, S and Azad, P and Unal, M}, title = {The applications of machine learning techniques in medical data processing based on distributed computing and the Internet of Things.}, journal = {Computer methods and programs in biomedicine}, volume = {241}, number = {}, pages = {107745}, doi = {10.1016/j.cmpb.2023.107745}, pmid = {37579550}, issn = {1872-7565}, mesh = {Humans ; *COVID-19 ; *Internet of Things ; Algorithms ; Cloud Computing ; Machine Learning ; }, abstract = {Medical data processing has grown into a prominent topic in the latest decades with the primary goal of maintaining patient data via new information technologies, including the Internet of Things (IoT) and sensor technologies, which generate patient indexes in hospital data networks. Innovations like distributed computing, Machine Learning (ML), blockchain, chatbots, wearables, and pattern recognition can adequately enable the collection and processing of medical data for decision-making in the healthcare era. Particularly, to assist experts in the disease diagnostic process, distributed computing is beneficial by digesting huge volumes of data swiftly and producing personalized smart suggestions. On the other side, the current globe is confronting an outbreak of COVID-19, so an early diagnosis technique is crucial to lowering the fatality rate. ML systems are beneficial in aiding radiologists in examining the incredible amount of medical images. Nevertheless, they demand a huge quantity of training data that must be unified for processing. Hence, developing Deep Learning (DL) confronts multiple issues, such as conventional data collection, quality assurance, knowledge exchange, privacy preservation, administrative laws, and ethical considerations. In this research, we intend to convey an inclusive analysis of the most recent studies in distributed computing platform applications based on five categorized platforms, including cloud computing, edge, fog, IoT, and hybrid platforms. So, we evaluated 27 articles regarding the usage of the proposed framework, deployed methods, and applications, noting the advantages, drawbacks, and the applied dataset and screening the security mechanism and the presence of the Transfer Learning (TL) method. As a result, it was proved that most recent research (about 43%) used the IoT platform as the environment for the proposed architecture, and most of the studies (about 46%) were done in 2021. In addition, the most popular utilized DL algorithm was the Convolutional Neural Network (CNN), with a percentage of 19.4%. Hence, despite how technology changes, delivering appropriate therapy for patients is the primary aim of healthcare-associated departments. Therefore, further studies are recommended to develop more functional architectures based on DL and distributed environments and better evaluate the present healthcare data analysis models.}, } @article {pmid37576291, year = {2023}, author = {Hu, X}, title = {The role of deep learning in the innovation of smart classroom teaching mode under the background of internet of things and fuzzy control.}, journal = {Heliyon}, volume = {9}, number = {8}, pages = {e18594}, pmid = {37576291}, issn = {2405-8440}, abstract = {Electronic components are rapidly updated in the context of expanding application requirements, and communication protocols used in combination with various electronic devices are also emerging. On this basis, IoT technology has developed a variety of sensor devices and gateways, which are widely used in cities. In the field of wisdom, applying IoT technology to classrooms can effectively improve the deficiencies of traditional teaching models. Fuzzy control theory is usually based on fuzzy sets in mathematics, and is combined with neural network, genetic and probability algorithms to form a calculation method. Fuzzy calculation has the ability to simplify the system input of a variety of complex variables, and its applications in the field of education are mainly: provide evaluation of teachers' teaching effectiveness. The advancement of science and technology has promoted the change and updating of the teaching mode. With the continuous advancement of basic education curriculum reform and the continuous deepening of classroom teaching reform, classroom teaching is also in urgent need of reform, from traditional classrooms to smart classrooms. Smart classrooms combine advanced technology with teachers' teaching. Through the dynamic data, the analysis instantly understands the student's learning situation, and then integrates it into education and teaching in a targeted manner. This paper conducts a questionnaire survey on the current situation of smart classroom teaching, and summarizes the current teaching problems. Then, combining the Internet of Things, fuzzy control and deep learning technology, from the two aspects of school teachers and students, it is proposed for smart classroom to promote students' learning effect. With its novel and new-style teaching advantages, smart classroom has gradually entered the public's vision and gained the attention and support of the majority of educators. Taking Grand Wisdom Classroom as an example, it uses the "Internet +" way of thinking and the new generation of information technology such as big data and cloud computing to create intelligent and efficient classrooms, realizing the whole process of application before, during and after class, and promoting the development of students' wisdom. Under the mobile Internet model, students and teachers can communicate anytime and anywhere. Combined with the analysis and application of our big data technology, data-based precision teaching becomes possible. In a real sense, learning before teaching can be realized and teaching can be determined by learning.}, } @article {pmid37575364, year = {2023}, author = {Sauerwein, N and Orsi, F and Uhrich, P and Bandyopadhyay, S and Mattiotti, F and Cantat-Moltrecht, T and Pupillo, G and Hauke, P and Brantut, JP}, title = {Engineering random spin models with atoms in a high-finesse cavity.}, journal = {Nature physics}, volume = {19}, number = {8}, pages = {1128-1134}, pmid = {37575364}, issn = {1745-2473}, abstract = {All-to-all interacting, disordered quantum many-body models have a wide range of applications across disciplines, from spin glasses in condensed-matter physics over holographic duality in high-energy physics to annealing algorithms in quantum computing. Typically, these models are abstractions that do not find unambiguous physical realizations in nature. Here we realize an all-to-all interacting, disordered spin system by subjecting an atomic cloud in a cavity to a controllable light shift. Adjusting the detuning between atom resonance and cavity mode, we can tune between disordered versions of a central-mode model and a Lipkin-Meshkov-Glick model. By spectroscopically probing the low-energy excitations of the system, we explore the competition of interactions with disorder across a broad parameter range. We show how disorder in the central-mode model breaks the strong collective coupling, making the dark-state manifold cross over to a random distribution of weakly mixed light-matter, 'grey', states. In the Lipkin-Meshkov-Glick model, the ferromagnetic finite-sized ground state evolves towards a paramagnet as disorder is increased. In that regime, semi-localized eigenstates emerge, as we observe by extracting bounds on the participation ratio. These results present substantial steps towards freely programmable cavity-mediated interactions for the design of arbitrary spin Hamiltonians.}, } @article {pmid37571718, year = {2023}, author = {Torres-Hernández, MA and Escobedo-Barajas, MH and Guerrero-Osuna, HA and Ibarra-Pérez, T and Solís-Sánchez, LO and Martínez-Blanco, MDR}, title = {Performance Analysis of Embedded Multilayer Perceptron Artificial Neural Networks on Smart Cyber-Physical Systems for IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571718}, issn = {1424-8220}, support = {CPE/COTEBAL/38/2022//Instituto Politécnico Nacional/ ; 1012152//Consejo Nacional de Ciencia y Tecnología/ ; }, abstract = {At present, modern society is experiencing a significant transformation. Thanks to the digitization of society and manufacturing, mainly because of a combination of technologies, such as the Internet of Things, cloud computing, machine learning, smart cyber-physical systems, etc., which are making the smart factory and Industry 4.0 a reality. Currently, most of the intelligence of smart cyber-physical systems is implemented in software. For this reason, in this work, we focused on the artificial intelligence software design of this technology, one of the most complex and critical. This research aimed to study and compare the performance of a multilayer perceptron artificial neural network designed for solving the problem of character recognition in three implementation technologies: personal computers, cloud computing environments, and smart cyber-physical systems. After training and testing the multilayer perceptron, training time and accuracy tests showed each technology has particular characteristics and performance. Nevertheless, the three technologies have a similar performance of 97% accuracy, despite a difference in the training time. The results show that the artificial intelligence embedded in fog technology is a promising alternative for developing smart cyber-physical systems.}, } @article {pmid37571716, year = {2023}, author = {Fernández-Urrutia, M and Arbelo, M and Gil, A}, title = {Identification of Paddy Croplands and Its Stages Using Remote Sensors: A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571716}, issn = {1424-8220}, abstract = {Rice is a staple food that feeds nearly half of the world's population. With the population of our planet expected to keep growing, it is crucial to carry out accurate mapping, monitoring, and assessments since these could significantly impact food security, climate change, spatial planning, and land management. Using the PRISMA systematic review protocol, this article identified and selected 122 scientific articles (journals papers and conference proceedings) addressing different remote sensing-based methodologies to map paddy croplands, published between 2010 and October 2022. This analysis includes full coverage of the mapping of rice paddies and their various stages of crop maturity. This review paper classifies the methods based on the data source: (a) multispectral (62%), (b) multisource (20%), and (c) radar (18%). Furthermore, it analyses the impact of machine learning on those methodologies and the most common algorithms used. We found that MODIS (28%), Sentinel-2 (18%), Sentinel-1 (15%), and Landsat-8 (11%) were the most used sensors. The impact of Sentinel-1 on multisource solutions is also increasing due to the potential of backscatter information to determine textures in different stages and decrease cloud cover constraints. The preferred solutions include phenology algorithms via the use of vegetation indices, setting thresholds, or applying machine learning algorithms to classify images. In terms of machine learning algorithms, random forest is the most used (17 times), followed by support vector machine (12 times) and isodata (7 times). With the continuous development of technology and computing, it is expected that solutions such as multisource solutions will emerge more frequently and cover larger areas in different locations and at a higher resolution. In addition, the continuous improvement of cloud detection algorithms will positively impact multispectral solutions.}, } @article {pmid37571695, year = {2023}, author = {Ahamed, Z and Khemakhem, M and Eassa, F and Alsolami, F and Basuhail, A and Jambi, K}, title = {Deep Reinforcement Learning for Workload Prediction in Federated Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571695}, issn = {1424-8220}, support = {RG-9-611-38//King Abdulaziz University/ ; }, abstract = {The Federated Cloud Computing (FCC) paradigm provides scalability advantages to Cloud Service Providers (CSP) in preserving their Service Level Agreement (SLA) as opposed to single Data Centers (DC). However, existing research has primarily focused on Virtual Machine (VM) placement, with less emphasis on energy efficiency and SLA adherence. In this paper, we propose a novel solution, Federated Cloud Workload Prediction with Deep Q-Learning (FEDQWP). Our solution addresses the complex VM placement problem, energy efficiency, and SLA preservation, making it comprehensive and beneficial for CSPs. By leveraging the capabilities of deep learning, our FEDQWP model extracts underlying patterns and optimizes resource allocation. Real-world workloads are extensively evaluated to demonstrate the efficacy of our approach compared to existing solutions. The results show that our DQL model outperforms other algorithms in terms of CPU utilization, migration time, finished tasks, energy consumption, and SLA violations. Specifically, our QLearning model achieves efficient CPU utilization with a median value of 29.02, completes migrations in an average of 0.31 units, finishes an average of 699 tasks, consumes the least energy with an average of 1.85 kWh, and exhibits the lowest number of SLA violations with an average of 0.03 violations proportionally. These quantitative results highlight the superiority of our proposed method in optimizing performance in FCC environments.}, } @article {pmid37571606, year = {2023}, author = {Zhang, D and Zhong, Z and Xia, Y and Wang, Z and Xiong, W}, title = {An Automatic Classification System for Environmental Sound in Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571606}, issn = {1424-8220}, abstract = {With the continuous promotion of "smart cities" worldwide, the approach to be used in combining smart cities with modern advanced technologies (Internet of Things, cloud computing, artificial intelligence) has become a hot topic. However, due to the non-stationary nature of environmental sound and the interference of urban noise, it is challenging to fully extract features from the model with a single input and achieve ideal classification results, even with deep learning methods. To improve the recognition accuracy of ESC (environmental sound classification), we propose a dual-branch residual network (dual-resnet) based on feature fusion. Furthermore, in terms of data pre-processing, a loop-padding method is proposed to patch shorter data, enabling it to obtain more useful information. At the same time, in order to prevent the occurrence of overfitting, we use the time-frequency data enhancement method to expand the dataset. After uniform pre-processing of all the original audio, the dual-branch residual network automatically extracts the frequency domain features of the log-Mel spectrogram and log-spectrogram. Then, the two different audio features are fused to make the representation of the audio features more comprehensive. The experimental results show that compared with other models, the classification accuracy of the UrbanSound8k dataset has been improved to different degrees.}, } @article {pmid37571545, year = {2023}, author = {Ali, A and Al-Rimy, BAS and Alsubaei, FS and Almazroi, AA and Almazroi, AA}, title = {HealthLock: Blockchain-Based Privacy Preservation Using Homomorphic Encryption in Internet of Things Healthcare Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571545}, issn = {1424-8220}, support = {MoE-IF-UJ-22-0708-2//Deputyship for Research Innovation, Ministry of Education in Saudi Arabia/ ; }, mesh = {Humans ; *Blockchain ; Privacy ; Computer Security ; *Internet of Things ; Delivery of Health Care ; }, abstract = {The swift advancement of the Internet of Things (IoT), coupled with the growing application of healthcare software in this area, has given rise to significant worries about the protection and confidentiality of critical health data. To address these challenges, blockchain technology has emerged as a promising solution, providing decentralized and immutable data storage and transparent transaction records. However, traditional blockchain systems still face limitations in terms of preserving data privacy. This paper proposes a novel approach to enhancing privacy preservation in IoT-based healthcare applications using homomorphic encryption techniques combined with blockchain technology. Homomorphic encryption facilitates the performance of calculations on encrypted data without requiring decryption, thus safeguarding the data's privacy throughout the computational process. The encrypted data can be processed and analyzed by authorized parties without revealing the actual contents, thereby protecting patient privacy. Furthermore, our approach incorporates smart contracts within the blockchain network to enforce access control and to define data-sharing policies. These smart contracts provide fine-grained permission settings, which ensure that only authorized entities can access and utilize the encrypted data. These settings protect the data from being viewed by unauthorized parties. In addition, our system generates an audit record of all data transactions, which improves both accountability and transparency. We have provided a comparative evaluation with the standard models, taking into account factors such as communication expense, transaction volume, and security. The findings of our experiments suggest that our strategy protects the confidentiality of the data while at the same time enabling effective data processing and analysis. In conclusion, the combination of homomorphic encryption and blockchain technology presents a solution that is both resilient and protective of users' privacy for healthcare applications integrated with IoT. This strategy offers a safe and open setting for the management and exchange of sensitive patient medical data, while simultaneously preserving the confidentiality of the patients involved.}, } @article {pmid37571543, year = {2023}, author = {Waleed, M and Kamal, T and Um, TW and Hafeez, A and Habib, B and Skouby, KE}, title = {Unlocking Insights in IoT-Based Patient Monitoring: Methods for Encompassing Large-Data Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571543}, issn = {1424-8220}, support = {2020-0-00833//This work was supported by the Institute of Information & communications Technology Planning & Evaluation (IITP) grant funded by the South Korea government (MSIT)/ ; }, mesh = {Humans ; *Internet of Things ; Data Collection ; Data Visualization ; Health Personnel ; Monitoring, Physiologic ; }, abstract = {The remote monitoring of patients using the internet of things (IoT) is essential for ensuring continuous observation, improving healthcare, and decreasing the associated costs (i.e., reducing hospital admissions and emergency visits). There has been much emphasis on developing methods and approaches for remote patient monitoring using IoT. Most existing frameworks cover parts or sub-parts of the overall system but fail to provide a detailed and well-integrated model that covers different layers. The leverage of remote monitoring tools and their coupling with health services requires an architecture that handles data flow and enables significant interventions. This paper proposes a cloud-based patient monitoring model that enables IoT-generated data collection, storage, processing, and visualization. The system has three main parts: sensing (IoT-enabled data collection), network (processing functions and storage), and application (interface for health workers and caretakers). In order to handle the large IoT data, the sensing module employs filtering and variable sampling. This pre-processing helps reduce the data received from IoT devices and enables the observation of four times more patients compared to not using edge processing. We also discuss the flow of data and processing, thus enabling the deployment of data visualization services and intelligent applications.}, } @article {pmid37571451, year = {2023}, author = {Saeed, S and Altamimi, SA and Alkayyal, NA and Alshehri, E and Alabbad, DA}, title = {Digital Transformation and Cybersecurity Challenges for Businesses Resilience: Issues and Recommendations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {15}, pages = {}, pmid = {37571451}, issn = {1424-8220}, support = {000//Saudi Aramco Cybersecurity Chair, Imam Abdulrahman Bin Faisal University/ ; }, abstract = {This systematic literature review explores the digital transformation (DT) and cybersecurity implications for achieving business resilience. DT involves transitioning organizational processes to IT solutions, which can result in significant changes across various aspects of an organization. However, emerging technologies such as artificial intelligence, big data and analytics, blockchain, and cloud computing drive digital transformation worldwide while increasing cybersecurity risks for businesses undergoing this process. This literature survey article highlights the importance of comprehensive knowledge of cybersecurity threats during DT implementation to prevent interruptions due to malicious activities or unauthorized access by attackers aiming at sensitive information alteration, destruction, or extortion from users. Cybersecurity is essential to DT as it protects digital assets from cyber threats. We conducted a systematic literature review using the PRISMA methodology in this research. Our literature review found that DT has increased efficiency and productivity but poses new challenges related to cybersecurity risks, such as data breaches and cyber-attacks. We conclude by discussing future vulnerabilities associated with DT implementation and provide recommendations on how organizations can mitigate these risks through effective cybersecurity measures. The paper recommends a staged cybersecurity readiness framework for business organizations to be prepared to pursue digital transformation.}, } @article {pmid37566992, year = {2023}, author = {Abdul-Rahman, T and Ghosh, S and Lukman, L and Bamigbade, GB and Oladipo, OV and Amarachi, OR and Olanrewaju, OF and Toluwalashe, S and Awuah, WA and Aborode, AT and Lizano-Jubert, I and Audah, KA and Teslyk, TP}, title = {Inaccessibility and low maintenance of medical data archive in low-middle income countries: Mystery behind public health statistics and measures.}, journal = {Journal of infection and public health}, volume = {16}, number = {10}, pages = {1556-1561}, doi = {10.1016/j.jiph.2023.07.001}, pmid = {37566992}, issn = {1876-035X}, mesh = {Humans ; *Developing Countries ; *Public Health ; Retrospective Studies ; Africa ; }, abstract = {INTRODUCTION: Africa bears the largest burden of communicable and non-communicable diseases globally, yet it contributes only about 1 % of global research output, partly because of inaccessibility and low maintenance of medical data. Data is widely recognized as a crucial tool for improvement of population health. Despite the introduction of electronic health data systems in low-and middle-income countries (LMICs) to improve data quality, some LMICs still lack an efficient system to collect and archive data. This study aims to examine the underlying causes of data archive inaccessibility and poor maintenance in LMICS, and to highlight sustainable mitigation measures.

METHOD: Authors conducted a comprehensive search on PubMed, Google scholar, organization websites using the search string "data archive" or "medical data" or "public health statistics" AND "challenges" AND "maintenance" AND "Low Middle Income Countries" or "LMIC". to Identify relevant studies and reports to be included in our review. All articles related data archive in low and middle income countries were considered without restrictions due to scarcity of data.

RESULT: Medical data archives in LMICs face challenges impacting data quality. Insufficient training, organizational constraints, and limited infrastructure hinder archive maintenance. To improve, support for public datasets, digital literacy, and technology infrastructure is needed. Standardization, cloud solutions, and advanced technologies can enhance data management, while capacity building and training programs are crucial.

CONCLUSION: The creation and maintenance of data archives to facilitate the storage of retrospective datasets is critical to create reliable and consistent data to better equip the development of resilient health systems and surveillance of diseases in LMICs.}, } @article {pmid37566590, year = {2023}, author = {H S, M and T, SK and Gupta, P and McArdle, G}, title = {A Harris Hawk Optimisation system for energy and resource efficient virtual machine placement in cloud data centers.}, journal = {PloS one}, volume = {18}, number = {8}, pages = {e0289156}, pmid = {37566590}, issn = {1932-6203}, mesh = {Animals ; *Algorithms ; Cloud Computing ; Computer Simulation ; Workload ; *Falconiformes ; }, abstract = {Virtualisation is a major technology in cloud computing for optimising the cloud data centre's power usage. In the current scenario, most of the services are migrated to the cloud, putting more load on the cloud data centres. As a result, the data center's size expands resulting in increased energy usage. To address this problem, a resource allocation optimisation method that is both efficient and effective is necessary. The optimal utilisation of cloud infrastructure and optimisation algorithms plays a vital role. The cloud resources rely on the allocation policy of the virtual machine on cloud resources. A virtual machine placement technique, based on the Harris Hawk Optimisation (HHO) model for the cloud data centre is presented in this paper. The proposed HHO model aims to find the best place for virtual machines on suitable hosts with the least load and power consumption. PlanetLab's real-time workload traces are used for performance evaluation with existing PSO (Particle Swarm Optimisation) and PABFD (Best Fit Decreasing). The performance evaluation of the proposed method is done using power consumption, SLA, CPU utilisation, RAM utilisation, Execution time (ms) and the number of VM migrations. The performance evaluation is done using two simulation scenarios with scaling workload in scenario 1 and increasing resources for the virtual machine to study the performance in underloaded and overloaded conditions. Experimental results show that the proposed HHO algorithm improved execution time(ms) by 4%, had a 27% reduction in power consumption, a 16% reduction in SLA violation and an increase in resource utilisation by 17%. The HHO algorithm is also effective in handling dynamic and uncertain environments, making it suitable for real-world cloud infrastructures.}, } @article {pmid37564538, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Cloud Computing to Tourism Economic Data Scheduling Algorithm under the Background of Image and Video.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9815205}, pmid = {37564538}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/3948221.].}, } @article {pmid37564503, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Construction of Economic Security Early Warning System Based on Cloud Computing and Data Mining.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9831835}, pmid = {37564503}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/2080840.].}, } @article {pmid37564485, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Public Security Video Image Detection System Construction Platform in Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9873483}, pmid = {37564485}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/4113803.].}, } @article {pmid37560357, year = {2023}, author = {Ouyang, W and Eliceiri, KW and Cimini, BA}, title = {Moving beyond the desktop: prospects for practical bioimage analysis via the web.}, journal = {Frontiers in bioinformatics}, volume = {3}, number = {}, pages = {1233748}, pmid = {37560357}, issn = {2673-7647}, support = {P41 GM135019/GM/NIGMS NIH HHS/United States ; }, abstract = {As biological imaging continues to rapidly advance, it results in increasingly complex image data, necessitating a reevaluation of conventional bioimage analysis methods and their accessibility. This perspective underscores our belief that a transition from desktop-based tools to web-based bioimage analysis could unlock immense opportunities for improved accessibility, enhanced collaboration, and streamlined workflows. We outline the potential benefits, such as reduced local computational demands and solutions to common challenges, including software installation issues and limited reproducibility. Furthermore, we explore the present state of web-based tools, hurdles in implementation, and the significance of collective involvement from the scientific community in driving this transition. In acknowledging the potential roadblocks and complexity of data management, we suggest a combined approach of selective prototyping and large-scale workflow application for optimal usage. Embracing web-based bioimage analysis could pave the way for the life sciences community to accelerate biological research, offering a robust platform for a more collaborative, efficient, and democratized science.}, } @article {pmid37556340, year = {2023}, author = {Liu, X and Zhao, X and Xia, Z and Feng, Q and Yu, P and Weng, J}, title = {Secure Outsourced SIFT: Accurate and Efficient Privacy-Preserving Image SIFT Feature Extraction.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {32}, number = {}, pages = {4635-4648}, doi = {10.1109/TIP.2023.3295741}, pmid = {37556340}, issn = {1941-0042}, abstract = {Cloud computing has become an important IT infrastructure in the big data era; more and more users are motivated to outsource the storage and computation tasks to the cloud server for convenient services. However, privacy has become the biggest concern, and tasks are expected to be processed in a privacy-preserving manner. This paper proposes a secure SIFT feature extraction scheme with better integrity, accuracy and efficiency than the existing methods. SIFT includes lots of complex steps, including the construction of DoG scale space, extremum detection, extremum location adjustment, rejecting of extremum point with low contrast, eliminating of the edge response, orientation assignment, and descriptor generation. These complex steps need to be disassembled into elementary operations such as addition, multiplication, comparison for secure implementation. We adopt a serial of secret-sharing protocols for better accuracy and efficiency. In addition, we design a secure absolute value comparison protocol to support absolute value comparison operations in the secure SIFT feature extraction. The SIFT feature extraction steps are completely implemented in the ciphertext domain. And the communications between the clouds are appropriately packed to reduce the communication rounds. We carefully analyzed the accuracy and efficiency of our scheme. The experimental results show that our scheme outperforms the existing state-of-the-art.}, } @article {pmid37554555, year = {2023}, author = {Liu, X and Li, X and Gao, L and Zhang, J and Qin, D and Wang, K and Li, Z}, title = {Early-season and refined mapping of winter wheat based on phenology algorithms - a case of Shandong, China.}, journal = {Frontiers in plant science}, volume = {14}, number = {}, pages = {1016890}, pmid = {37554555}, issn = {1664-462X}, abstract = {Winter wheat is one of the major food crops in China, and timely and effective early-season identification of winter wheat is crucial for crop yield estimation and food security. However, traditional winter wheat mapping is based on post-season identification, which has a lag and relies heavily on sample data. Early-season identification of winter wheat faces the main difficulties of weak remote sensing response of the vegetation signal at the early growth stage, difficulty of acquiring sample data on winter wheat in the current season in real time, interference of crops in the same period, and limited image resolution. In this study, an early-season refined mapping method with winter wheat phenology information as priori knowledge is developed based on the Google Earth Engine cloud platform by using Sentinel-2 time series data as the main data source; these data are automated and highly interpretable. The normalized differential phenology index (NDPI) is adopted to enhance the weak vegetation signal at the early growth stage of winter wheat, and two winter wheat phenology feature enhancement indices based on NDPI, namely, wheat phenology differential index (WPDI) and normalized differential wheat phenology index (NDWPI) are developed. To address the issue of " different objects with the same spectra characteristics" between winter wheat and garlic, a plastic mulched index (PMI) is established through quantitative spectral analysis based on the differences in early planting patterns between winter wheat and garlic. The identification accuracy of the method is 82.64% and 88.76% in the early overwintering and regreening periods, respectively, These results were consistent with official statistics (R2 = 0.96 and 0.98, respectively). Generalization analysis demonstrated the spatiotemporal transferability of the method across different years and regions. In conclusion, the proposed methodology can obtain highly precise spatial distribution and planting area information of winter wheat 4_6 months before harvest. It provides theoretical and methodological guidance for early crop identification and has good scientific research and application value.}, } @article {pmid37549337, year = {2023}, author = {Wei, L and Xu, M and Liu, Z and Jiang, C and Lin, X and Hu, Y and Wen, X and Zou, R and Peng, C and Lin, H and Wang, G and Yang, L and Fang, L and Yang, M and Zhang, P}, title = {Hit Identification Driven by Combining Artificial Intelligence and Computational Chemistry Methods: A PI5P4K-β Case Study.}, journal = {Journal of chemical information and modeling}, volume = {63}, number = {16}, pages = {5341-5355}, doi = {10.1021/acs.jcim.3c00543}, pmid = {37549337}, issn = {1549-960X}, mesh = {*Artificial Intelligence ; *Computational Chemistry ; Drug Design ; Drug Discovery/methods ; }, abstract = {Computer-aided drug design (CADD), especially artificial intelligence-driven drug design (AIDD), is increasingly used in drug discovery. In this paper, a novel and efficient workflow for hit identification was developed within the ID4Inno drug discovery platform, featuring innovative artificial intelligence, high-accuracy computational chemistry, and high-performance cloud computing. The workflow was validated by discovering a few potent hit compounds (best IC50 is ∼0.80 μM) against PI5P4K-β, a novel anti-cancer target. Furthermore, by applying the tools implemented in ID4Inno, we managed to optimize these hit compounds and finally obtained five hit series with different scaffolds, all of which showed high activity against PI5P4K-β. These results demonstrate the effectiveness of ID4inno in driving hit identification based on artificial intelligence, computational chemistry, and cloud computing.}, } @article {pmid37549000, year = {2023}, author = {Guan, V and Zhou, C and Wan, H and Zhou, R and Zhang, D and Zhang, S and Yang, W and Voutharoja, BP and Wang, L and Win, KT and Wang, P}, title = {A Novel Mobile App for Personalized Dietary Advice Leveraging Persuasive Technology, Computer Vision, and Cloud Computing: Development and Usability Study.}, journal = {JMIR formative research}, volume = {7}, number = {}, pages = {e46839}, pmid = {37549000}, issn = {2561-326X}, abstract = {BACKGROUND: The Australian Dietary Guidelines (ADG) translate the best available evidence in nutrition into food choice recommendations. However, adherence to the ADG is poor in Australia. Given that following a healthy diet can be a potentially cost-effective strategy for lowering the risk of chronic diseases, there is an urgent need to develop novel technologies for individuals to improve their adherence to the ADG.

OBJECTIVE: This study describes the development process and design of a prototype mobile app for personalized dietary advice based on the ADG for adults in Australia, with the aim of exploring the usability of the prototype. The goal of the prototype was to provide personalized, evidence-based support for self-managing food choices in real time.

METHODS: The guidelines of the design science paradigm were applied to guide the design, development, and evaluation of a progressive web app using Amazon Web Services Elastic Compute Cloud services via iterations. The food layer of the Nutrition Care Process, the strategies of cognitive behavioral theory, and the ADG were translated into prototype features guided by the Persuasive Systems Design model. A gain-framed approach was adopted to promote positive behavior changes. A cross-modal image-to-recipe retrieval model under an Apache 2.0 license was deployed for dietary assessment. A survey using the Mobile Application Rating Scale and semistructured in-depth interviews were conducted to explore the usability of the prototype through convenience sampling (N=15).

RESULTS: The prominent features of the prototype included the use of image-based dietary assessment, food choice tracking with immediate feedback leveraging gamification principles, personal goal setting for food choices, and the provision of recipe ideas and information on the ADG. The overall prototype quality score was "acceptable," with a median of 3.46 (IQR 2.78-3.81) out of 5 points. The median score of the perceived impact of the prototype on healthy eating based on the ADG was 3.83 (IQR 2.75-4.08) out of 5 points. In-depth interviews identified the use of gamification for tracking food choices and innovation in the image-based dietary assessment as the main drivers of the positive user experience of using the prototype.

CONCLUSIONS: A novel evidence-based prototype mobile app was successfully developed by leveraging a cross-disciplinary collaboration. A detailed description of the development process and design of the prototype enhances its transparency and provides detailed insights into its creation. This study provides a valuable example of the development of a novel, evidence-based app for personalized dietary advice on food choices using recent advancements in computer vision. A revised version of this prototype is currently under development.}, } @article {pmid37541856, year = {2023}, author = {Xi, N and Liu, J and Li, Y and Qin, B}, title = {Decentralized access control for secure microservices cooperation with blockchain.}, journal = {ISA transactions}, volume = {141}, number = {}, pages = {44-51}, doi = {10.1016/j.isatra.2023.07.018}, pmid = {37541856}, issn = {1879-2022}, abstract = {With the rapid advancement of cloud-native computing, the microservice with high concurrency and low coupling has ushered in an unprecedented period of vigorous development. However, due to the mutability and complexity of cooperation procedures, it is difficult to realize high-efficient security management on these microservices. Traditional centralized access control has the defects of relying on a centralized cloud manager and a single point of failure. Meanwhile, decentralized mechanisms are defective by inconsistent policies defined by different participants. This paper first proposes a blockchain-based distributed access control policies and scheme, especially for microservices cooperation with dynamic access policies. We store the authorized security policies on the blockchain to solve the inconsistent policy problem while enabling individual management of personalized access policies by the providers rather than a central authority. Then we propose a graph-based decision-making scheme to achieve an efficient access control for microservices cooperation. Through the evaluations and experiments, it shows that our solution can realize effective distributed access control at an affordable cost.}, } @article {pmid37540975, year = {2023}, author = {Faber, DA and Hinman, JM and Knauer, EM and Hechenbleikner, EM and Badell, IR and Lin, E and Srinivasan, JK and Chahine, AA and Papandria, DJ}, title = {Implementation of an Online Intraoperative Assessment of Technical Performance for Surgical Trainees.}, journal = {The Journal of surgical research}, volume = {291}, number = {}, pages = {574-585}, doi = {10.1016/j.jss.2023.07.008}, pmid = {37540975}, issn = {1095-8673}, mesh = {*Internship and Residency ; Clinical Competence ; Education, Medical, Graduate/methods ; Feedback ; Educational Measurement/methods ; *General Surgery/education ; }, abstract = {INTRODUCTION: Assessment of surgical resident technical performance is an integral component of any surgical training program. Timely assessment delivered in a structured format is a critical step to enhance technical skills, but residents often report that the quality and quantity of timely feedback received is lacking. Moreover, the absence of written feedback with specificity can allow residents to seemingly progress in their operative milestones as a junior resident, but struggle as they progress into their postgraduate year 3 and above. We therefore designed and implemented a web-based intraoperative assessment tool and corresponding summary "dashboard" to facilitate real-time assessment and documentation of technical performance.

MATERIALS AND METHODS: A web form was designed leveraging a cloud computing platform and implementing a modified Ottawa Surgical Competency Operating Room Evaluation instrument; this included additional, procedure-specific criteria for select operations. A link to this was provided to residents via email and to all surgical faculty as a Quick Response code. Residents open and complete a portion of the form on a smartphone, then relinquish the device to an attending surgeon who then completes and submits the assessment. The data are then transferred to a secure web-based reporting interface; each resident (together with a faculty advisor) can then access and review all completed assessments.

RESULTS: The Assessment form was activated in June 2021 and formally introduced to all residents in July 2021, with residents required to complete at least one assessment per month. Residents with less predictable access to operative procedures (night float or Intensive Care Unit) were exempted from the requirement on those months. To date a total of 559 assessments have been completed for operations performed by 56 trainees, supervised by 122 surgical faculty and senior trainees. The mean number of procedures assessed per resident was 10.0 and the mean number per assessor was 4.6. Resident initiation of Intraoperative Assessments has increased since the tool was introduced and scores for technical and nontechnical performance reliably differentiate residents by seniority.

CONCLUSIONS: This novel system demonstrates that an online, resident-initiated technical assessment tool is feasible to implement and scale. This model's requirement that the attending enter performance ratings into the trainee's electronic device ensures that feedback is delivered directly to the trainee. Whether this aspect of our assessment ensures more direct and specific (and therefore potentially actionable) feedback is a focus for future study. Our use of commercial cloud computing services should permit cost-effective adoption of similar systems at other training programs.}, } @article {pmid37538659, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: Optimization of Online Course Platform for Piano Preschool Education Based on Internet Cloud Computing System.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9856831}, pmid = {37538659}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/6525866.].}, } @article {pmid37538595, year = {2023}, author = {Intelligence And Neuroscience, C}, title = {Retracted: The Use of Internet of Things and Cloud Computing Technology in the Performance Appraisal Management of Innovation Capability of University Scientific Research Team.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {9806856}, pmid = {37538595}, issn = {1687-5273}, abstract = {[This retracts the article DOI: 10.1155/2022/9423718.].}, } @article {pmid37529586, year = {2023}, author = {Aja, D and Miyittah, M and Angnuureng, DB}, title = {Nonparametric assessment of mangrove ecosystem in the context of coastal resilience in Ghana.}, journal = {Ecology and evolution}, volume = {13}, number = {8}, pages = {e10388}, pmid = {37529586}, issn = {2045-7758}, abstract = {Cloud cover effects make it difficult to evaluate the mangrove ecosystem in tropical locations using solely optical satellite data. Therefore, it is essential to conduct a more precise evaluation using data from several sources and appropriate models in order to manage the mangrove ecosystem as effectively as feasible. In this study, the status of the mangrove ecosystem and its potential contribution to coastal resilience were evaluated using the Google Earth Engine (GEE) and the InVEST model. The GEE was used to map changes in mangrove and other land cover types for the years 2009 and 2019 by integrating both optical and radar data. The quantity allocation disagreement index (QADI) was used to assess the classification accuracy. Mangrove height and aboveground biomass density were estimated using GEE by extracting their values from radar image clipped with a digital elevation model and mangrove vector file. A universal allometric equation that relates canopy height to aboveground biomass was applied. The InVEST model was used to calculate a hazard index of every 250 m of the shoreline with and without mangrove ecosystem. Our result showed that about 16.9% and 21% of mangrove and other vegetation cover were lost between 2009 and 2019. However, water body and bare land/built-up areas increased by 7% and 45%, respectively. The overall accuracy of 2009 and 2019 classifications was 99.6% (QADI = 0.00794) and 99.1% (QADI = 0.00529), respectively. Mangrove height and aboveground biomass generally decreased from 12.7 to 6.3 m and from 105 to 88 Mg/ha on average. The vulnerability index showed that 23%, 51% and 26% of the coastal segment in the presence of mangrove fall under very low/low, moderate and high risks, respectively. Whereas in the absence of mangrove, 8%, 38%, 39% and 15% fall under low, moderate, high and very high-risk zones, respectively. This study will among other things help the stakeholders in coastal management and marine spatial planning to identify the need to focus on conservation practices.}, } @article {pmid37521954, year = {2023}, author = {Bommu, S and M, AK and Babburu, K and N, S and Thalluri, LN and G, VG and Gopalan, A and Mallapati, PK and Guha, K and Mohammad, HR and S, SK}, title = {Smart City IoT System Network Level Routing Analysis and Blockchain Security Based Implementation.}, journal = {Journal of electrical engineering & technology}, volume = {18}, number = {2}, pages = {1351-1368}, pmid = {37521954}, issn = {2093-7423}, abstract = {This paper demonstrates, network-level performance analysis and implementation of smart city Internet of Things (IoT) system with Infrastructure as a Service (IaaS) level cloud computing architecture. The smart city IoT network topology performance is analyzed at the simulation level using the NS3 simulator by extracting most of the performance-deciding parameters. The performance-enhanced smart city topology is practically implemented in IaaS level architecture. The intended smart city IoT system can monitor the principal parameters like video surveillance with a thermal camera (to identify the virus-like COVID-19 infected people), transport, water quality, solar radiation, sound pollution, air quality (O3, NO2, CO, Particles), parking zones, iconic places, E-suggestions, PRO information over low power wide area network in 61.88 km × 61.88 km range. Primarily we have addressed the IoT network-level routing and quality of service (QoS) challenges and implementation level security challenges. The simulation level network topology analysis is performed to improve the routing and QoS. Blockchain technology-based decentralization is adopted to enrich the IoT system performance in terms of security.}, } @article {pmid37514843, year = {2023}, author = {Montiel-Caminos, J and Hernandez-Gonzalez, NG and Sosa, J and Montiel-Nelson, JA}, title = {Integer Arithmetic Algorithm for Fundamental Frequency Identification of Oceanic Currents.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {14}, pages = {}, pmid = {37514843}, issn = {1424-8220}, support = {PID2020-117251RB-C21//Ministerio de Ciencia e Innovación de España - Agencia Estatal de Investigación/ ; TED2021-131470B-I00//Ministerio de Ciencia e Innovación de España - Agencia Estatal de Investigación/ ; }, abstract = {Underwater sensor networks play a crucial role in collecting valuable data to monitor offshore aquaculture infrastructures. The number of deployed devices not only impacts the bandwidth for a highly constrained communication environment, but also the cost of the sensor network. On the other hand, industrial and literature current meters work as raw data loggers, and most of the calculations to determine the fundamental frequencies are performed offline on a desktop computer or in the cloud. Belonging to the edge computing research area, this paper presents an algorithm to extract the fundamental frequencies of water currents in an underwater sensor network deployed in offshore aquaculture infrastructures. The target sensor node is based on a commercial ultra-low-power microcontroller. The proposed fundamental frequency identification algorithm only requires the use of an integer arithmetic unit. Our approach exploits the mathematical properties of the finite impulse response (FIR) filtering in the integer domain. The design and implementation of the presented algorithm are discussed in detail in terms of FIR tuning/coefficient selection, memory usage and variable domain for its mathematical formulation aimed at reducing the computational effort required. The approach is validated using a shallow water current model and real-world raw data from an offshore aquaculture infrastructure. The extracted frequencies have a maximum error below a 4%.}, } @article {pmid37514672, year = {2023}, author = {Zhang, M and Chen, Y and Qian, C}, title = {Fooling Examples: Another Intriguing Property of Neural Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {14}, pages = {}, pmid = {37514672}, issn = {1424-8220}, support = {6142111//Foundation of National Key Laboratory of Science and Technology on Information System Security/ ; }, abstract = {Neural networks have been proven to be vulnerable to adversarial examples; these are examples that can be recognized by both humans and neural networks, although neural networks give incorrect predictions. As an intriguing property of neural networks, adversarial examples pose a serious threat to the secure application of neural networks. In this article, we present another intriguing property of neural networks: the fact that well-trained models believe some examples to be recognizable objects (often with high confidence), while humans cannot recognize such examples. We refer to these as "fooling examples". Specifically, we take inspiration from the construction of adversarial examples and develop an iterative method for generating fooling examples. The experimental results show that fooling examples can not only be easily generated, with a success rate of nearly 100% in the white-box scenario, but also exhibit strong transferability across different models in the black-box scenario. Tests on the Google Cloud Vision API show that fooling examples can also be recognized by real-world computer vision systems. Our findings reveal a new cognitive deficit of neural networks, and we hope that these potential security threats will be addressed in future neural network applications.}, } @article {pmid37514557, year = {2023}, author = {Alzuhair, A and Alghaihab, A}, title = {The Design and Optimization of an Acoustic and Ambient Sensing AIoT Platform for Agricultural Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {14}, pages = {}, pmid = {37514557}, issn = {1424-8220}, support = {IFKSUOR3-109-1//Ministry of Education, Saudi Arabia/ ; }, abstract = {The use of technology in agriculture has been gaining significant attention recently. By employing advanced tools and automation and leveraging the latest advancements in the Internet of Things (IoT) and artificial intelligence (AI), the agricultural sector is witnessing improvements in its crop yields and overall efficiency. This paper presents the design and performance analysis of a machine learning (ML) model for agricultural applications involving acoustic sensing. This model is integrated into an efficient Artificial Intelligence of Things (AIoT) platform tailored for agriculture. The model is then used in the design of a communication network architecture and for determining the distribution of the computing load between edge devices and the cloud. The study focuses on the design, analysis, and optimization of AI deployment for reliable classification models in agricultural applications. Both the architectural level and hardware implementation are taken into consideration when designing the radio module and computing unit. Additionally, the study encompasses the design and performance analysis of the hardware used to implement the sensor node specifically developed for sound classification in agricultural applications. The novelty of this work lies in the optimization of the integrated sensor node, which combines the proposed ML model and wireless network, resulting in an agricultural-specific AIoT platform. This co-design enables significant improvements in the performance and efficiency for acoustic and ambient sensing applications.}, } @article {pmid37510005, year = {2023}, author = {Fu, M and Zhang, C and Hu, C and Wu, T and Dong, J and Zhu, L}, title = {Achieving Verifiable Decision Tree Prediction on Hybrid Blockchains.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {7}, pages = {}, pmid = {37510005}, issn = {1099-4300}, support = {No. 62202051//National Natural Science Foundation of China/ ; Nos. 2021M700435, 2021TQ0042//China Postdoctoral Science Foundation/ ; Grant Nos. 2021YFB2700500 and 2021YFB2700503//National Key R&D Program of China/ ; No. 2022B1212010005//Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies/ ; }, abstract = {Machine learning has become increasingly popular in academic and industrial communities and has been widely implemented in various online applications due to its powerful ability to analyze and use data. Among all the machine learning models, decision tree models stand out due to their great interpretability and simplicity, and have been implemented in cloud computing services for various purposes. Despite its great success, the integrity issue of online decision tree prediction is a growing concern. The correctness and consistency of decision tree predictions in cloud computing systems need more security guarantees since verifying the correctness of the model prediction remains challenging. Meanwhile, blockchain has a promising prospect in two-party machine learning services as the immutable and traceable characteristics satisfy the verifiable settings in machine learning services. In this paper, we initiate the study of decision tree prediction services on blockchain systems and propose VDT, a Verifiable Decision Tree prediction scheme for decision tree prediction. Specifically, by leveraging the Merkle tree and hash function, the scheme allows the service provider to generate a verification proof to convince the client that the output of the decision tree prediction is correctly computed on a particular data sample. It is further extended to an update method for a verifiable decision tree to modify the decision tree model efficiently. We prove the security of the proposed VDT schemes and evaluate their performance using real datasets. Experimental evaluations show that our scheme requires less than one second to produce verifiable proof.}, } @article {pmid37509978, year = {2023}, author = {Ye, C and Tan, S and Wang, Z and Shi, B and Shi, L}, title = {Hybridized Hierarchical Watermarking and Selective Encryption for Social Image Security.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {7}, pages = {}, pmid = {37509978}, issn = {1099-4300}, support = {61502154//National Natural Science Foundation of China/ ; }, abstract = {With the advent of cloud computing and social multimedia communication, more and more social images are being collected on social media platforms, such as Facebook, TikTok, Flirk, and YouTube. The amount of social images produced and disseminated is rapidly increasing. Meanwhile, cloud computing-assisted social media platforms have made social image dissemination more and more efficient. There exists an unstoppable trend of fake/unauthorized social image dissemination. The growth of social image sharing underscores potential security risks for illegal use, such as image forgery, malicious copying, piracy exposure, plagiarism, and misappropriation. Therefore, secure social image dissemination has become urgent and critical on social media platforms. The authors propose a secure scheme for social image dissemination on social media platforms. The main objective is to make a map between the tree structure Haar (TSH) transform and the hierarchical community structure of a social network. First, perform the TSH transform on a social image using social network analysis (SNA). Second, all users in a social media platform are coded using SNA. Third, watermarking and encryption are performed in a compressed domain for protecting social image dissemination. Finally, the encrypted and watermarked contents are delivered to users via a hybrid multicast-unicast scheme. The use of encryption along with watermarking can provide double protection for social image dissemination. The theory analysis and experimental results demonstrate the effectiveness of the proposed scheme.}, } @article {pmid37503119, year = {2023}, author = {Hitz, BC and Lee, JW and Jolanki, O and Kagda, MS and Graham, K and Sud, P and Gabdank, I and Strattan, JS and Sloan, CA and Dreszer, T and Rowe, LD and Podduturi, NR and Malladi, VS and Chan, ET and Davidson, JM and Ho, M and Miyasato, S and Simison, M and Tanaka, F and Luo, Y and Whaling, I and Hong, EL and Lee, BT and Sandstrom, R and Rynes, E and Nelson, J and Nishida, A and Ingersoll, A and Buckley, M and Frerker, M and Kim, DS and Boley, N and Trout, D and Dobin, A and Rahmanian, S and Wyman, D and Balderrama-Gutierrez, G and Reese, F and Durand, NC and Dudchenko, O and Weisz, D and Rao, SSP and Blackburn, A and Gkountaroulis, D and Sadr, M and Olshansky, M and Eliaz, Y and Nguyen, D and Bochkov, I and Shamim, MS and Mahajan, R and Aiden, E and Gingeras, T and Heath, S and Hirst, M and Kent, WJ and Kundaje, A and Mortazavi, A and Wold, B and Cherry, JM}, title = {The ENCODE Uniform Analysis Pipelines.}, journal = {Research square}, volume = {}, number = {}, pages = {}, pmid = {37503119}, support = {R01 HG009318/HG/NHGRI NIH HHS/United States ; }, abstract = {The Encyclopedia of DNA elements (ENCODE) project is a collaborative effort to create a comprehensive catalog of functional elements in the human genome. The current database comprises more than 19000 functional genomics experiments across more than 1000 cell lines and tissues using a wide array of experimental techniques to study the chromatin structure, regulatory and transcriptional landscape of the Homo sapiens and Mus musculus genomes. All experimental data, metadata, and associated computational analyses created by the ENCODE consortium are submitted to the Data Coordination Center (DCC) for validation, tracking, storage, and distribution to community resources and the scientific community. The ENCODE project has engineered and distributed uniform processing pipelines in order to promote data provenance and reproducibility as well as allow interoperability between genomic resources and other consortia. All data files, reference genome versions, software versions, and parameters used by the pipelines are captured and available via the ENCODE Portal. The pipeline code, developed using Docker and Workflow Description Language (WDL; https://openwdl.org/) is publicly available in GitHub, with images available on Dockerhub (https://hub.docker.com), enabling access to a diverse range of biomedical researchers. ENCODE pipelines maintained and used by the DCC can be installed to run on personal computers, local HPC clusters, or in cloud computing environments via Cromwell. Access to the pipelines and data via the cloud allows small labs the ability to use the data or software without access to institutional compute clusters. Standardization of the computational methodologies for analysis and quality control leads to comparable results from different ENCODE collections - a prerequisite for successful integrative analyses.}, } @article {pmid37501454, year = {2023}, author = {Swathi, HY and Shivakumar, G}, title = {Audio-visual multi-modality driven hybrid feature learning model for crowd analysis and classification.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {7}, pages = {12529-12561}, doi = {10.3934/mbe.2023558}, pmid = {37501454}, issn = {1551-0018}, abstract = {The high pace emergence in advanced software systems, low-cost hardware and decentralized cloud computing technologies have broadened the horizon for vision-based surveillance, monitoring and control. However, complex and inferior feature learning over visual artefacts or video streams, especially under extreme conditions confine majority of the at-hand vision-based crowd analysis and classification systems. Retrieving event-sensitive or crowd-type sensitive spatio-temporal features for the different crowd types under extreme conditions is a highly complex task. Consequently, it results in lower accuracy and hence low reliability that confines existing methods for real-time crowd analysis. Despite numerous efforts in vision-based approaches, the lack of acoustic cues often creates ambiguity in crowd classification. On the other hand, the strategic amalgamation of audio-visual features can enable accurate and reliable crowd analysis and classification. Considering it as motivation, in this research a novel audio-visual multi-modality driven hybrid feature learning model is developed for crowd analysis and classification. In this work, a hybrid feature extraction model was applied to extract deep spatio-temporal features by using Gray-Level Co-occurrence Metrics (GLCM) and AlexNet transferrable learning model. Once extracting the different GLCM features and AlexNet deep features, horizontal concatenation was done to fuse the different feature sets. Similarly, for acoustic feature extraction, the audio samples (from the input video) were processed for static (fixed size) sampling, pre-emphasis, block framing and Hann windowing, followed by acoustic feature extraction like GTCC, GTCC-Delta, GTCC-Delta-Delta, MFCC, Spectral Entropy, Spectral Flux, Spectral Slope and Harmonics to Noise Ratio (HNR). Finally, the extracted audio-visual features were fused to yield a composite multi-modal feature set, which is processed for classification using the random forest ensemble classifier. The multi-class classification yields a crowd-classification accurac12529y of (98.26%), precision (98.89%), sensitivity (94.82%), specificity (95.57%), and F-Measure of 98.84%. The robustness of the proposed multi-modality-based crowd analysis model confirms its suitability towards real-world crowd detection and classification tasks.}, } @article {pmid37491843, year = {2023}, author = {Kong, HJ}, title = {Classification of dental implant systems using cloud-based deep learning algorithm: an experimental study.}, journal = {Journal of Yeungnam medical science}, volume = {40}, number = {Suppl}, pages = {S29-S36}, pmid = {37491843}, issn = {2799-8010}, abstract = {BACKGROUND: This study aimed to evaluate the accuracy and clinical usability of implant system classification using automated machine learning on a Google Cloud platform.

METHODS: Four dental implant systems were selected: Osstem TSIII, Osstem USII, Biomet 3i Os-seotite External, and Dentsply Sirona Xive. A total of 4,800 periapical radiographs (1,200 for each implant system) were collected and labeled based on electronic medical records. Regions of interest were manually cropped to 400×800 pixels, and all images were uploaded to Google Cloud storage. Approximately 80% of the images were used for training, 10% for validation, and 10% for testing. Google automated machine learning (AutoML) Vision automatically executed a neural architecture search technology to apply an appropriate algorithm to the uploaded data. A single-label image classification model was trained using AutoML. The performance of the mod-el was evaluated in terms of accuracy, precision, recall, specificity, and F1 score.

RESULTS: The accuracy, precision, recall, specificity, and F1 score of the AutoML Vision model were 0.981, 0.963, 0.961, 0.985, and 0.962, respectively. Osstem TSIII had an accuracy of 100%. Osstem USII and 3i Osseotite External were most often confused in the confusion matrix.

CONCLUSION: Deep learning-based AutoML on a cloud platform showed high accuracy in the classification of dental implant systems as a fine-tuned convolutional neural network. Higher-quality images from various implant systems will be required to improve the performance and clinical usability of the model.}, } @article {pmid37486501, year = {2023}, author = {Didachos, C and Kintos, DP and Fousteris, M and Mylonas, P and Kanavos, A}, title = {An Optimized Cloud Computing Method for Extracting Molecular Descriptors.}, journal = {Advances in experimental medicine and biology}, volume = {1424}, number = {}, pages = {247-254}, pmid = {37486501}, issn = {0065-2598}, mesh = {*Cloud Computing ; *Algorithms ; }, abstract = {Extracting molecular descriptors from chemical compounds is an essential preprocessing phase for developing accurate classification models. Supervised machine learning algorithms offer the capability to detect "hidden" patterns that may exist in a large dataset of compounds, which are represented by their molecular descriptors. Assuming that molecules with similar structure tend to share similar physicochemical properties, large chemical libraries can be screened by applying similarity sourcing techniques in order to detect potential bioactive compounds against a molecular target. However, the process of generating these compound features is time-consuming. Our proposed methodology not only employs cloud computing to accelerate the process of extracting molecular descriptors but also introduces an optimized approach to utilize the computational resources in the most efficient way.}, } @article {pmid37475814, year = {2023}, author = {International, BR}, title = {Retracted: Medical Big Data and Postoperative Nursing of Fracture Patients Based on Cloud Computing.}, journal = {BioMed research international}, volume = {2023}, number = {}, pages = {9768264}, pmid = {37475814}, issn = {2314-6141}, abstract = {[This retracts the article DOI: 10.1155/2022/4090235.].}, } @article {pmid37469244, year = {2023}, author = {Pant, A and Miri, N and Bhagroo, S and Mathews, JA and Nazareth, DP}, title = {Monitor unit verification for Varian TrueBeam VMAT plans using Monte Carlo calculations and phase space data.}, journal = {Journal of applied clinical medical physics}, volume = {24}, number = {10}, pages = {e14063}, pmid = {37469244}, issn = {1526-9914}, mesh = {Humans ; *Radiotherapy, Intensity-Modulated/methods ; Computer Simulation ; Software ; Particle Accelerators ; Radiotherapy Dosage ; Monte Carlo Method ; Radiotherapy Planning, Computer-Assisted/methods ; }, abstract = {To use the open-source Monte Carlo (MC) software calculations for TPS monitor unit verification of VMAT plans, delivered with the Varian TrueBeam linear accelerator, and compare the results with a commercial software product, following the guidelines set in AAPM Task Group 219. The TrueBeam is modeled in EGSnrc using the Varian-provided phase-space files. Thirteen VMAT TrueBeam treatment plans representing various anatomical regions were evaluated, comprising 37 treatment arcs. VMAT plans simulations were performed on a computing cluster, using 10[7] -10[9] particle histories per arc. Point dose differences at five reference points per arc were compared between Eclipse, MC, and the commercial software, MUCheck. MC simulation with 5 × 10[7] histories per arc offered good agreement with Eclipse and a reasonable average calculation time of 9-18 min per full plan. The average absolute difference was 3.0%, with only 22% of all points exceeding the 5% action limit. In contrast, the MUCheck average absolute difference was 8.4%, with 60% of points exceeding the 5% dose difference. Lung plans were particularly problematic for MUCheck, with an average absolute difference of approximately 16%. Our EGSnrc-based MC framework can be used for the MU verification of VMAT plans calculated for the Varian TrueBeam; furthermore, our phase space approach can be adapted to other treatment devices by using appropriate phase space files. The use of 5 × 10[7] histories consistently satisfied the 5% action limit across all plan types for the majority of points, performing significantly better than a commercial MU verification system, MUCheck. As faster processors and cloud computing facilities become even more widely available, this approach can be readily implemented in clinical settings.}, } @article {pmid37467974, year = {2023}, author = {Mhanna, S and Halloran, LJS and Zwahlen, F and Asaad, AH and Brunner, P}, title = {Using machine learning and remote sensing to track land use/land cover changes due to armed conflict.}, journal = {The Science of the total environment}, volume = {898}, number = {}, pages = {165600}, doi = {10.1016/j.scitotenv.2023.165600}, pmid = {37467974}, issn = {1879-1026}, mesh = {*Remote Sensing Technology ; *Conservation of Natural Resources/methods ; Agriculture/methods ; Environmental Monitoring/methods ; Climate ; }, abstract = {Armed conflicts have detrimental impacts on the environment, including land systems. The prevailing understanding of the relation between Land Use/Land Cover (LULC) and armed conflict fails to fully recognize the complexity of their dynamics - a shortcoming that could undermine food security and sustainable land/water resources management in conflict settings. The Syrian portion of the transboundary Orontes River Basin (ORB) has been a site of violent conflict since 2013. Correspondingly, the Lebanese and Turkish portions of the ORB have seen large influxes of refugees. A major challenge in any geoscientific investigation in this region, specifically the Syrian portion, is the unavailability of directly-measured "ground truth" data. To circumvent this problem, we develop a novel methodology that combines remote sensing products, machine learning techniques and quasi-experimental statistical analysis to better understand LULC changes in the ORB between 2004 and 2022. Through analysis of the resulting annual LULC maps, we can draw several quantitative conclusions. Cropland areas decreased by 21-24 % in Syria's conflict hotspot zones after 2013, whereas a 3.4-fold increase was detected in Lebanon. The development of refugee settlements was also tracked in Lebanon and on the Syrian/Turkish borders, revealing different LULC patterns that depend on settlement dynamics. The results highlight the importance of understanding the heterogenous spatio-temporal LULC changes in conflict-affected and refugee-hosting countries. The developed methodology is a flexible, cloud-based approach that can be applied to wide variety of LULC investigations related to conflict, policy and climate.}, } @article {pmid37453975, year = {2024}, author = {Venkataswamy, R and Janamala, V and Cherukuri, RC}, title = {Realization of Humanoid Doctor and Real-Time Diagnostics of Disease Using Internet of Things, Edge Impulse Platform, and ChatGPT.}, journal = {Annals of biomedical engineering}, volume = {52}, number = {4}, pages = {738-740}, pmid = {37453975}, issn = {1573-9686}, mesh = {Humans ; *Artificial Intelligence ; *Internet of Things ; Health Personnel ; }, abstract = {Humanoid doctor is an AI-based robot that featured remote bi-directional communication and is embedded with disruptive technologies. Accurate and real-time responses are the main characteristics of a humanoid doctor which diagnoses disease in a patient. The patient details are obtained by Internet of Things devices, edge devices, and text formats. The inputs from the patient are processed by the humanoid doctor, and it provides its opinion to the patient. The historical patient data are trained using cloud artificial intelligence platform and the model is tested against the patient sample data acquired using medical IoT and edge devices. Disease is identified at three different stages and analyzed. The humanoid doctor is expected to identify the diseases well in comparison with human healthcare professionals. The humanoid doctor is under-trusted because of the lack of a multi-featured accurate model, accessibility, availability, and standardization. In this letter, patient input, artificial intelligence, and response zones are encapsulated and the humanoid doctor is realized.}, } @article {pmid37448004, year = {2023}, author = {Mangalampalli, S and Swain, SK and Chakrabarti, T and Chakrabarti, P and Karri, GR and Margala, M and Unhelkar, B and Krishnan, SB}, title = {Prioritized Task-Scheduling Algorithm in Cloud Computing Using Cat Swarm Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37448004}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Algorithms ; Workload ; }, abstract = {Effective scheduling algorithms are needed in the cloud paradigm to leverage services to customers seamlessly while minimizing the makespan, energy consumption and SLA violations. The ineffective scheduling of resources while not considering the suitability of tasks will affect the quality of service of the cloud provider, and much more energy will be consumed in the running of tasks by the inefficient provisioning of resources, thereby taking an enormous amount of time to process tasks, which affects the makespan. Minimizing SLA violations is an important aspect that needs to be addressed as it impacts the makespans, energy consumption, and also the quality of service in a cloud environment. Many existing studies have solved task-scheduling problems, and those algorithms gave near-optimal solutions from their perspective. In this manuscript, we developed a novel task-scheduling algorithm that considers the task priorities coming onto the cloud platform, calculates their task VM priorities, and feeds them to the scheduler. Then, the scheduler will choose appropriate tasks for the VMs based on the calculated priorities. To model this scheduling algorithm, we used the cat swarm optimization algorithm, which was inspired by the behavior of cats. It was implemented on the Cloudsim tool and OpenStack cloud platform. Extensive experimentation was carried out using real-time workloads. When compared to the baseline PSO, ACO and RATS-HM approaches and from the results, it is evident that our proposed approach outperforms all of the baseline algorithms in view of the above-mentioned parameters.}, } @article {pmid37447987, year = {2023}, author = {Chen, CL and Lai, JL}, title = {An Experimental Detection of Distributed Denial of Service Attack in CDX 3 Platform Based on Snort.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447987}, issn = {1424-8220}, mesh = {*Algorithms ; *Internet ; Machine Learning ; }, abstract = {Distributed Denial of Service (DDoS) attacks pose a significant threat to internet and cloud security. Our study utilizes a Poisson distribution model to efficiently detect DDoS attacks with a computational complexity of O(n). Unlike Machine Learning (ML)-based algorithms, our method only needs to set up one or more Poisson models for legitimate traffic based on the granularity of the time periods during preprocessing, thus eliminating the need for training time. We validate this approach with four virtual machines on the CDX 3.0 platform, each simulating different aspects of DDoS attacks for offensive, monitoring, and defense evaluation purposes. The study further analyzes seven diverse DDoS attack methods. When compared with existing methods, our approach demonstrates superior performance, highlighting its potential effectiveness in real-world DDoS attack detection.}, } @article {pmid37447969, year = {2023}, author = {Hsieh, TM and Chen, KY}, title = {Knowledge Development Trajectory of the Internet of Vehicles Domain Based on Main Path Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447969}, issn = {1424-8220}, mesh = {*Internet ; Cloud Computing ; Automation ; *Blockchain ; Cities ; }, abstract = {The Internet of vehicles (IoV) is an Internet-of-things-based network in the area of transportation. It comprises sensors, network communication, automation control, and data processing and enables connectivity between vehicles and other objects. This study performed main path analysis (MPA) to investigate the trajectory of research regarding the IoV. Studies were extracted from the Web of Science database, and citation networks among these studies were generated. MPA revealed that research in this field has mainly covered media access control, vehicle-to-vehicle channels, device-to-device communications, layers, non-orthogonal multiple access, and sixth-generation communications. Cluster analysis and data mining revealed that the main research topics related to the IoV included wireless channels, communication protocols, vehicular ad hoc networks, security and privacy, resource allocation and optimization, autonomous cruise control, deep learning, and edge computing. By using data mining and statistical analysis, we identified emerging research topics related to the IoV, namely blockchains, deep learning, edge computing, cloud computing, vehicular dynamics, and fifth- and sixth-generation mobile communications. These topics are likely to help drive innovation and the further development of IoV technologies and contribute to smart transportation, smart cities, and other applications. On the basis of the present results, this paper offers several predictions regarding the future of research regarding the IoV.}, } @article {pmid37447967, year = {2023}, author = {Adnan, M and Slavic, G and Martin Gomez, D and Marcenaro, L and Regazzoni, C}, title = {Systematic and Comprehensive Review of Clustering and Multi-Target Tracking Techniques for LiDAR Point Clouds in Autonomous Driving Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447967}, issn = {1424-8220}, support = {PID2019-104793RB-C31//Spanish Government under Grants/ ; PDC2021-1215-17-C31//Spanish Government under Grants/ ; PID2021-124335OB-C21//Spanish Government under Grants/ ; TED2021-129485B-C44//Spanish Government under Grants/ ; P2018/EMT-4362//Comunidad de Madrid under Grant SEGVAUTO-4.0-CM/ ; }, mesh = {Reproducibility of Results ; *Autonomous Vehicles ; Cluster Analysis ; Databases, Factual ; *Evidence Gaps ; }, abstract = {Autonomous vehicles (AVs) rely on advanced sensory systems, such as Light Detection and Ranging (LiDAR), to function seamlessly in intricate and dynamic environments. LiDAR produces highly accurate 3D point clouds, which are vital for the detection, classification, and tracking of multiple targets. A systematic review and classification of various clustering and Multi-Target Tracking (MTT) techniques are necessary due to the inherent challenges posed by LiDAR data, such as density, noise, and varying sampling rates. As part of this study, the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) methodology was employed to examine the challenges and advancements in MTT techniques and clustering for LiDAR point clouds within the context of autonomous driving. Searches were conducted in major databases such as IEEE Xplore, ScienceDirect, SpringerLink, ACM Digital Library, and Google Scholar, utilizing customized search strategies. We identified and critically reviewed 76 relevant studies based on rigorous screening and evaluation processes, assessing their methodological quality, data handling adequacy, and reporting compliance. As a result of this comprehensive review and classification, we were able to provide a detailed overview of current challenges, research gaps, and advancements in clustering and MTT techniques for LiDAR point clouds, thus contributing to the field of autonomous driving. Researchers and practitioners working in the field of autonomous driving will benefit from this study, which was characterized by transparency and reproducibility on a systematic basis.}, } @article {pmid37447966, year = {2023}, author = {Kaur, A and Kumar, S and Gupta, D and Hamid, Y and Hamdi, M and Ksibi, A and Elmannai, H and Saini, S}, title = {Algorithmic Approach to Virtual Machine Migration in Cloud Computing with Updated SESA Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447966}, issn = {1424-8220}, support = {PNURSP2023R125//Princess Nourah bint Abdulrahman University Researchers Supporting Project/ ; }, mesh = {Humans ; *Cloud Computing ; *Algorithms ; }, abstract = {Cloud computing plays an important role in every IT sector. Many tech giants such as Google, Microsoft, and Facebook as deploying their data centres around the world to provide computation and storage services. The customers either submit their job directly or they take the help of the brokers for the submission of the jobs to the cloud centres. The preliminary aim is to reduce the overall power consumption which was ignored in the early days of cloud development. This was due to the performance expectations from cloud servers as they were supposed to provide all the services through their services layers IaaS, PaaS, and SaaS. As time passed and researchers came up with new terminologies and algorithmic architecture for the reduction of power consumption and sustainability, other algorithmic anarchies were also introduced, such as statistical oriented learning and bioinspired algorithms. In this paper, an indepth focus has been done on multiple approaches for migration among virtual machines and find out various issues among existing approaches. The proposed work utilizes elastic scheduling inspired by the smart elastic scheduling algorithm (SESA) to develop a more energy-efficient VM allocation and migration algorithm. The proposed work uses cosine similarity and bandwidth utilization as additional utilities to improve the current performance in terms of QoS. The proposed work is evaluated for overall power consumption and service level agreement violation (SLA-V) and is compared with related state of art techniques. A proposed algorithm is also presented in order to solve problems found during the survey.}, } @article {pmid37447902, year = {2023}, author = {Botez, R and Pasca, AG and Sferle, AT and Ivanciu, IA and Dobrota, V}, title = {Efficient Network Slicing with SDN and Heuristic Algorithm for Low Latency Services in 5G/B5G Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447902}, issn = {1424-8220}, mesh = {Humans ; *Heuristics ; *Algorithms ; Communication ; Records ; }, abstract = {This paper presents a novel approach for network slicing in 5G backhaul networks, targeting services with low or very low latency requirements. We propose a modified A* algorithm that incorporates network quality of service parameters into a composite metric. The algorithm's efficiency outperforms that of Dijkstra's algorithm using a precalculated heuristic function and a real-time monitoring strategy for congestion management. We integrate the algorithm into an SDN module called a path computation element, which computes the optimal path for the network slices. Experimental results show that the proposed algorithm significantly reduces processing time compared to Dijkstra's algorithm, particularly in complex topologies, with an order of magnitude improvement. The algorithm successfully adjusts paths in real-time to meet low latency requirements, preventing packet delay from exceeding the established threshold. The end-to-end measurements using the Speedtest client validate the algorithm's performance in differentiating traffic with and without delay requirements. These results demonstrate the efficacy of our approach in achieving ultra-reliable low-latency communication (URLLC) in 5G backhaul networks.}, } @article {pmid37447888, year = {2023}, author = {Fereira, R and Ranaweera, C and Lee, K and Schneider, JG}, title = {Energy Efficient Node Selection in Edge-Fog-Cloud Layered IoT Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447888}, issn = {1424-8220}, mesh = {*Conservation of Energy Resources ; *Internet of Things ; Autonomous Vehicles ; Communication ; Computer Systems ; }, abstract = {Internet of Things (IoT) architectures generally focus on providing consistent performance and reliable communications. The convergence of IoT, edge, fog, and cloud aims to improve the quality of service of applications, which does not typically emphasize energy efficiency. Considering energy in IoT architectures would reduce the energy impact from billions of IoT devices. The research presented in this paper proposes an optimization framework that considers energy consumption of nodes when selecting a node for processing an IoT request in edge-fog-cloud layered architecture. The IoT use cases considered in this paper include smart grid, autonomous vehicles, and eHealth. The proposed framework is evaluated using CPLEX simulations. The results provide insights into mechanisms that can be used to select nodes energy-efficiently whilst meeting the application requirements and other network constraints in multi-layered IoT architectures.}, } @article {pmid37447786, year = {2023}, author = {Li, H and Liu, X and Zhao, W}, title = {Research on Lightweight Microservice Composition Technology in Cloud-Edge Device Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447786}, issn = {1424-8220}, support = {2022YFB330570//National Key R&D Program of China/ ; 21511104302//Shanghai Science and Technology Innovation Action/ ; }, mesh = {*Software ; *Technology ; Workflow ; }, abstract = {In recent years, cloud-native technology has become popular among Internet companies. Microservice architecture solves the complexity problem for multiple service methods by decomposing a single application so that each service can be independently developed, independently deployed, and independently expanded. At the same time, domestic industrial Internet construction is still in its infancy, and small and medium-sized enterprises still face many problems in the process of digital transformation, such as difficult resource integration, complex control equipment workflow, slow development and deployment process, and shortage of operation and maintenance personnel. The existing traditional workflow architecture is mainly aimed at the cloud scenario, which consumes a lot of resources and cannot be used in resource-limited scenarios at the edge. Moreover, traditional workflow is not efficient enough to transfer data and often needs to rely on various storage mechanisms. In this article, a lightweight and efficient workflow architecture is proposed to optimize the defects of these traditional workflows by combining cloud-edge scene. By orchestrating a lightweight workflow engine with a Kubernetes Operator, the architecture can significantly reduce workflow execution time and unify data flow between cloud microservices and edge devices.}, } @article {pmid37447769, year = {2023}, author = {Shruti, and Rani, S and Sah, DK and Gianini, G}, title = {Attribute-Based Encryption Schemes for Next Generation Wireless IoT Networks: A Comprehensive Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447769}, issn = {1424-8220}, support = {MUSA CUP G43C22001370007, Code ECS000000//European Union/ ; SERICS PE0000001//European Union/ ; }, mesh = {*Computer Security ; *Internet of Things ; Privacy ; Cloud Computing ; Delivery of Health Care ; }, abstract = {Most data nowadays are stored in the cloud; therefore, cloud computing and its extension-fog computing-are the most in-demand services at the present time. Cloud and fog computing platforms are largely used by Internet of Things (IoT) applications where various mobile devices, end users, PCs, and smart objects are connected to each other via the internet. IoT applications are common in several application areas, such as healthcare, smart cities, industries, logistics, agriculture, and many more. Due to this, there is an increasing need for new security and privacy techniques, with attribute-based encryption (ABE) being the most effective among them. ABE provides fine-grained access control, enables secure storage of data on unreliable storage, and is flexible enough to be used in different systems. In this paper, we survey ABE schemes, their features, methodologies, benefits/drawbacks, attacks on ABE, and how ABE can be used with IoT and its applications. This survey reviews ABE models suitable for IoT platforms, taking into account the desired features and characteristics. We also discuss various performance indicators used for ABE and how they affect efficiency. Furthermore, some selected schemes are analyzed through simulation to compare their efficiency in terms of different performance indicators. As a result, we find that some schemes simultaneously perform well in one or two performance indicators, whereas none shines in all of them at once. The work will help researchers identify the characteristics of different ABE schemes quickly and recognize whether they are suitable for specific IoT applications. Future work that may be helpful for ABE is also discussed.}, } @article {pmid37447635, year = {2023}, author = {Irugalbandara, C and Naseem, AS and Perera, S and Kiruthikan, S and Logeeshan, V}, title = {A Secure and Smart Home Automation System with Speech Recognition and Power Measurement Capabilities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {13}, pages = {}, pmid = {37447635}, issn = {1424-8220}, mesh = {Humans ; Aged ; *Speech Perception ; Speech ; *Voice ; *Disabled Persons ; Automation ; }, abstract = {The advancement in the internet of things (IoT) technologies has made it possible to control and monitor electronic devices at home with just the touch of a button. This has made people lead much more comfortable lifestyles. Elderly people and those with disabilities have especially benefited from voice-assisted home automation systems that allow them to control their devices with simple voice commands. However, the widespread use of cloud-based services in these systems, such as those offered by Google and Amazon, has made them vulnerable to cyber-attacks. To ensure the proper functioning of these systems, a stable internet connection and a secure environment free from cyber-attacks are required. However, the quality of the internet is often low in developing countries, which makes it difficult to access the services these systems offer. Additionally, the lack of localization in voice assistants prevents people from using voice-assisted home automation systems in these countries. To address these challenges, this research proposes an offline home automation system. Since the internet and cloud services are not required for an offline system, it can perform its essential functions, while ensuring protection against cyber-attacks and can provide quick responses. It offers additional features, such as power usage tracking and the optimization of linked devices.}, } @article {pmid37447111, year = {2023}, author = {Yan, Y and Xin, Z and Bai, X and Zhan, H and Xi, J and Xie, J and Cheng, Y}, title = {Analysis of Growing Season Normalized Difference Vegetation Index Variation and Its Influencing Factors on the Mongolian Plateau Based on Google Earth Engine.}, journal = {Plants (Basel, Switzerland)}, volume = {12}, number = {13}, pages = {}, pmid = {37447111}, issn = {2223-7747}, support = {31870706//National Natural Science Foundation of China/ ; }, abstract = {Frequent dust storms on the Mongolian Plateau have adversely affected the ecological environmental quality of East Asia. Studying the dynamic changes in vegetation coverage is one of the important means of evaluating ecological environmental quality in the region. In this study, we used Landsat remote sensing images from 2000 to 2019 on the Mongolian Plateau to extract yearly Normalized Difference Vegetation Index (NDVI) data during the growing season. We used partial correlation analysis and the Hurst index to analyze the spatiotemporal characteristics of the NDVI before and after the establishment of nature reserves and their influencing factors on the GEE cloud platform. The results showed that (1) the proportion of the region with an upwards trend of NDVI increased from 52.21% during 2000-2009 to 67.93% during 2010-2019, indicating a clear improvement in vegetation due to increased precipitation; (2) the increase in precipitation and positive human activities drove the increase in the NDVI in the study region from 2000 to 2019; and (3) the overall trend of the NDVI in the future is expected to be stable with a slight decrease, and restoration potential is greater for water bodies and grasslands. Therefore, it is imperative to strengthen positive human activities to safeguard vegetation. These findings furnish scientific evidence for environmental management and the development of ecological engineering initiatives on the Mongolian Plateau.}, } @article {pmid37443425, year = {2024}, author = {Hotchkiss, JT and Ridderman, E and Bufkin, W}, title = {Development of a model and method for hospice quality assessment from natural language processing (NLP) analysis of online caregiver reviews.}, journal = {Palliative & supportive care}, volume = {22}, number = {1}, pages = {19-30}, doi = {10.1017/S1478951523001001}, pmid = {37443425}, issn = {1478-9523}, mesh = {Humans ; United States ; *Hospice Care/psychology ; *Hospices/methods ; Caregivers/psychology ; Retrospective Studies ; Natural Language Processing ; }, abstract = {OBJECTIVES: With a fraction of hospices having their Consumer Assessment of Healthcare Providers and Systems (CAHPS®) scores on Hospice Compare, a significant reservoir of hospice quality data remains in online caregiver reviews. The purpose of this study was to develop a method and model of hospice quality assessment from caregiver reviews using Watson's carative model.

METHODS: Retrospective mixed methods of pilot qualitative thematic analysis and sentiment analysis using NLP of Google and Yelp caregiver reviews between 2013 and 2023. We employed stratified sampling, weighted according to hospice size, to emulate the daily census of enrollees across the United States. Sentiment analysis was performed (n = 3393) using Google NLP.

RESULTS: Two themes with the highest prevalence had moderately positive sentiments (S): Caring staff (+.47) and Care quality, comfort and cleanliness (+.41). Other positive sentiment scores with high prevalence were Gratitude and thanks (+.81), "Treating the patient with respect" (+.54), and "Emotional, spiritual, bereavement support" (+.60). Lowest sentiment scores were "Insurance, administrative or billing" (-.37), "Lack of staffing" (-.32), and "Communication with the family" (-.01).

SIGNIFICANCE OF RESULTS: In the developed quality model, caregivers recommended hospices with caring staff, providing quality care, responsive to requests, and offering family support, including bereavement care. All ten Watson's carative factors and all eight CAHPS measures were presented in the discovered review themes of the quality model. Close-ended CAHPS scores and open-ended online reviews have substantial conceptual overlap and complementary insights. Future hospice quality research should explore caregiver expectations and compare review themes by profit status.}, } @article {pmid37441434, year = {2023}, author = {Gemborn Nilsson, M and Tufvesson, P and Heskebeck, F and Johansson, M}, title = {An open-source human-in-the-loop BCI research framework: method and design.}, journal = {Frontiers in human neuroscience}, volume = {17}, number = {}, pages = {1129362}, pmid = {37441434}, issn = {1662-5161}, abstract = {Brain-computer interfaces (BCIs) translate brain activity into digital commands for interaction with the physical world. The technology has great potential in several applied areas, ranging from medical applications to entertainment industry, and creates new conditions for basic research in cognitive neuroscience. The BCIs of today, however, offer only crude online classification of the user's current state of mind, and more sophisticated decoding of mental states depends on time-consuming offline data analysis. The present paper addresses this limitation directly by leveraging a set of improvements to the analytical pipeline to pave the way for the next generation of online BCIs. Specifically, we introduce an open-source research framework that features a modular and customizable hardware-independent design. This framework facilitates human-in-the-loop (HIL) model training and retraining, real-time stimulus control, and enables transfer learning and cloud computing for the online classification of electroencephalography (EEG) data. Stimuli for the subject and diagnostics for the researcher are shown on separate displays using web browser technologies. Messages are sent using the Lab Streaming Layer standard and websockets. Real-time signal processing and classification, as well as training of machine learning models, is facilitated by the open-source Python package Timeflux. The framework runs on Linux, MacOS, and Windows. While online analysis is the main target of the BCI-HIL framework, offline analysis of the EEG data can be performed with Python, MATLAB, and Julia through packages like MNE, EEGLAB, or FieldTrip. The paper describes and discusses desirable properties of a human-in-the-loop BCI research platform. The BCI-HIL framework is released under MIT license with examples at: bci.lu.se/bci-hil (or at: github.com/bci-hil/bci-hil).}, } @article {pmid37437364, year = {2023}, author = {Baek, H and Yun, WJ and Park, S and Kim, J}, title = {Stereoscopic scalable quantum convolutional neural networks.}, journal = {Neural networks : the official journal of the International Neural Network Society}, volume = {165}, number = {}, pages = {860-867}, doi = {10.1016/j.neunet.2023.06.027}, pmid = {37437364}, issn = {1879-2782}, mesh = {*Computing Methodologies ; *Quantum Theory ; Neural Networks, Computer ; Algorithms ; Cloud Computing ; }, abstract = {As the noisy intermediate-scale quantum (NISQ) era has begun, a quantum neural network (QNN) is definitely a promising solution to many problems that classical neural networks cannot solve. In addition, a quantum convolutional neural network (QCNN) is now receiving a lot of attention because it can process high dimensional inputs comparing to QNN. However, due to the nature of quantum computing, it is difficult to scale up the QCNN to extract a sufficient number of features due to barren plateaus. This is especially challenging in classification operations with high-dimensional data input. However, due to the nature of quantum computing, it is difficult to scale up the QCNN to extract a sufficient number of features due to barren plateaus. This is especially challenging in classification operations with high dimensional data input. Motivated by this, a novel stereoscopic 3D scalable QCNN (sQCNN-3D) is proposed for point cloud data processing in classification applications. Furthermore, reverse fidelity training (RF-Train) is additionally considered on top of sQCNN-3D for diversifying features with a limited number of qubits using the fidelity of quantum computing. Our data-intensive performance evaluation verifies that the proposed algorithm achieves desired performance.}, } @article {pmid37434236, year = {2023}, author = {Glauser, R and Holm, J and Bender, M and Bürkle, T}, title = {How can social robot use cases in healthcare be pushed - with an interoperable programming interface.}, journal = {BMC medical informatics and decision making}, volume = {23}, number = {1}, pages = {118}, pmid = {37434236}, issn = {1472-6947}, mesh = {Humans ; *Robotics ; Social Interaction ; Health Facilities ; Speech ; Delivery of Health Care ; }, abstract = {INTRODUCTION: Research into current robot middleware has revealed that most of them are either too complicated or outdated. These facts have motivated the development of a new middleware to meet the requirements of usability by non-experts. The proposed middleware is based on Android and is intended to be placed over existing robot SDKs and middleware. It runs on the android tablet of the Cruzr robot. Various toolings have been developed, such as a web component to control the robot via a webinterface, which facilitates its use.

METHODS: The middleware was developed using Android Java and runs on the Cruzr tablet as an app. It features a WebSocket server that interfaces with the robot and allows control via Python or other WebSocket-compatible languages. The speech interface utilizes Google Cloud Voice text-to-speech and speech-to-text services. The interface was implemented in Python, allowing for easy integration with existing robotics development workflows, and a web interface was developed for direct control of the robot via the web.

RESULTS: The new robot middleware was created and deployed on a Cruzr robot, relying on the WebSocket API and featuring a Python implementation. It supports various robot functions, such as text-to-speech, speech-to-text, navigation, displaying content and scanning bar codes. The system's architecture allows for porting the interface to other robots and platforms, showcasing its adaptability. It has been demonstrated that the middleware can be run on a Pepper robot, although not all functions have been implemented yet. The middleware was utilized to implement healthcare use cases and received good feedback.

CONCLUSION: Cloud and local speech services were discussed in regard to the middleware's needs, to run without having to change any code on other robots. An outlook on how the programming interface can further be simplified by using natural text to code generators has been/is given. For other researchers using the aforementioned platforms (Cruzr, Pepper), the new middleware can be utilized for testing human-robot interaction. It can be used in a teaching setting, as well as be adapted to other robots using the same interface and philosophy regarding simple methods.}, } @article {pmid37433723, year = {2023}, author = {Vinjerui, KH and Sarheim Anthun, K and Asheim, A and Carlsen, F and Mjølstad, BP and Nilsen, SM and Pape, K and Bjorngaard, JH}, title = {General practitioners ending their practice and impact on patients' health, healthcare use and mortality: a protocol for national registry cohort studies in Norway, 2008 to 2021.}, journal = {BMJ open}, volume = {13}, number = {7}, pages = {e072220}, pmid = {37433723}, issn = {2044-6055}, mesh = {Humans ; *General Practitioners ; *General Practice ; Norway ; Cohort Studies ; Registries ; }, abstract = {INTRODUCTION: Continuous general practitioner (GP) and patient relations associate with positive health outcomes. Termination of GP practice is unavoidable, while consequences of final breaks in relations are less explored. We will study how an ended GP relation affects patient's healthcare utilisation and mortality compared with patients with a continuous GP relation.

METHODS AND ANALYSIS: We link national registries data on individual GP affiliation, sociodemographic characteristics, healthcare use and mortality. From 2008 to 2021, we identify patients whose GP stopped practicing and will compare acute and elective, primary and specialist healthcare use and mortality, with patients whose GP did not stop practicing. We match GP-patient pairs on age and sex (both), immigrant status and education (patients), and number of patients and practice period (GPs). We analyse the outcomes before and after an ended GP-patient relation, using Poisson regression with high-dimensional fixed effects.

ETHICS AND DISSEMINATION: This study protocol is part of the approved project Improved Decisions with Causal Inference in Health Services Research, 2016/2159/REK Midt (the Regional Committees for Medical and Health Research Ethics) and does not require consent. HUNT Cloud provides secure data storage and computing. We will report using the STROBE guideline for observational case-control studies and publish in peer-reviewed journals, accessible in NTNU Open and present at scientific conferences. To reach a broader audience, we will summarise articles in the project's web page, regular and social media, and disseminate to relevant stakeholders.}, } @article {pmid37430789, year = {2023}, author = {Peniak, P and Bubeníková, E and Kanáliková, A}, title = {Validation of High-Availability Model for Edge Devices and IIoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430789}, issn = {1424-8220}, abstract = {Competitiveness in industry requires smooth, efficient, and high-quality operation. For some industrial applications or process control and monitoring applications, it is necessary to achieve high availability and reliability because, for example, the failure of availability in industrial production can have serious consequences for the operation and profitability of the company, as well as for the safety of employees and the surrounding environment. At present, many new technologies that use data obtained from various sensors for evaluation or decision-making require the minimization of data processing latency to meet the needs of real-time applications. Cloud/Fog and Edge computing technologies have been proposed to overcome latency issues and to increase computing power. However, industrial applications also require the high availability and reliability of devices and systems. The potential malfunction of Edge devices can cause a failure of applications, and the unavailability of Edge computing results can have a significant impact on manufacturing processes. Therefore, our article deals with the creation and validation of an enhanced Edge device model, which in contrast to the current solutions, is aimed not only at the integration of various sensors within manufacturing solutions, but also brings the required redundancy to enable the high availability of Edge devices. In the model, we use Edge computing, which performs the recording of sensed data from various types of sensors, synchronizes them, and makes them available for decision making by applications in the Cloud. We focus on creating a suitable Edge device model that works with the redundancy, by using either mirroring or duplexing via a secondary Edge device. This enables high Edge device availability and rapid system recovery in the event of a failure of the primary Edge device. The created model of high availability is based on the mirroring and duplexing of the Edge devices, which support two protocols: OPC UA and MQTT. The models were implemented in the Node-Red software, tested, and subsequently validated and compared to confirm the required recovery time and 100% redundancy of the Edge device. In the contrast to the currently available Edge solutions, our proposed extended model based on Edge mirroring is able to address most of the critical cases, where fast recovery is required, and no adjustments are needed for critical applications. The maturity level of Edge high availability can be further extended by applying Edge duplexing for process control.}, } @article {pmid37430776, year = {2023}, author = {Tahir, A and Bai, S and Shen, M}, title = {A Wearable Multi-Modal Digital Upper Limb Assessment System for Automatic Musculoskeletal Risk Evaluation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430776}, issn = {1424-8220}, mesh = {Humans ; Upper Extremity ; Algorithms ; Computers, Handheld ; Databases, Factual ; *Musculoskeletal Diseases/diagnosis ; *Wearable Electronic Devices ; }, abstract = {Continuous ergonomic risk assessment of the human body is critical to avoid various musculoskeletal disorders (MSDs) for people involved in physical jobs. This paper presents a digital upper limb assessment (DULA) system that automatically performs rapid upper limb assessment (RULA) in real-time for the timely intervention and prevention of MSDs. While existing approaches require human resources for computing the RULA score, which is highly subjective and untimely, the proposed DULA achieves automatic and objective assessment of musculoskeletal risks using a wireless sensor band embedded with multi-modal sensors. The system continuously tracks and records upper limb movements and muscle activation levels and automatically generates musculoskeletal risk levels. Moreover, it stores the data in a cloud database for in-depth analysis by a healthcare expert. Limb movements and muscle fatigue levels can also be visually seen using any tablet/computer in real-time. In the paper, algorithms of robust limb motion detection are developed, and an explanation of the system is provided along with the presentation of preliminary results, which validate the effectiveness of the new technology.}, } @article {pmid37430596, year = {2023}, author = {Xu, Q and Zhang, G and Wang, J}, title = {Research on Cloud-Edge-End Collaborative Computing Offloading Strategy in the Internet of Vehicles Based on the M-TSA Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430596}, issn = {1424-8220}, support = {202101AS070016//the "Yunnan Xingdian Talents Support Plan" project of Yunnan and Key Projects of Yunnan Basic Research Plan/ ; 202101AS070016//the Yunnan Province Basic Research Program Key Funding Project/ ; }, abstract = {In the Internet of Vehicles scenario, the in-vehicle terminal cannot meet the requirements of computing tasks in terms of delay and energy consumption; the introduction of cloud computing and MEC is an effective way to solve the above problem. The in-vehicle terminal requires a high task processing delay, and due to the high delay of cloud computing to upload computing tasks to the cloud, the MEC server has limited computing resources, which will increase the task processing delay when there are more tasks. To solve the above problems, a vehicle computing network based on cloud-edge-end collaborative computing is proposed, in which cloud servers, edge servers, service vehicles, and task vehicles themselves can provide computing services. A model of the cloud-edge-end collaborative computing system for the Internet of Vehicles is constructed, and a computational offloading strategy problem is given. Then, a computational offloading strategy based on the M-TSA algorithm and combined with task prioritization and computational offloading node prediction is proposed. Finally, comparative experiments are conducted under task instances simulating real road vehicle conditions to demonstrate the superiority of our network, where our offloading strategy significantly improves the utility of task offloading and reduces offloading delay and energy consumption.}, } @article {pmid37430552, year = {2023}, author = {Mostafa, N and Kotb, Y and Al-Arnaout, Z and Alabed, S and Shdefat, AY}, title = {Replicating File Segments between Multi-Cloud Nodes in a Smart City: A Machine Learning Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430552}, issn = {1424-8220}, abstract = {The design and management of smart cities and the IoT is a multidimensional problem. One of those dimensions is cloud and edge computing management. Due to the complexity of the problem, resource sharing is one of the vital and major components that when enhanced, the performance of the whole system is enhanced. Research in data access and storage in multi-clouds and edge servers can broadly be classified to data centers and computational centers. The main aim of data centers is to provide services for accessing, sharing and modifying large databases. On the other hand, the aim of computational centers is to provide services for sharing resources. Present and future distributed applications need to deal with very large multi-petabyte datasets and increasing numbers of associated users and resources. The emergence of IoT-based, multi-cloud systems as a potential solution for large computational and data management problems has initiated significant research activity in the area. Due to the considerable increase in data production and data sharing within scientific communities, the need for improvements in data access and data availability cannot be overlooked. It can be argued that the current approaches of large dataset management do not solve all problems associated with big data and large datasets. The heterogeneity and veracity of big data require careful management. One of the issues for managing big data in a multi-cloud system is the scalability and expendability of the system under consideration. Data replication ensures server load balancing, data availability and improved data access time. The proposed model minimises the cost of data services through minimising a cost function that takes storage cost, host access cost and communication cost into consideration. The relative weights between different components is learned through history and it is different from a cloud to another. The model ensures that data are replicated in a way that increases availability while at the same time decreasing the overall cost of data storage and access time. Using the proposed model avoids the overheads of the traditional full replication techniques. The proposed model is mathematically proven to be sound and valid.}, } @article {pmid37430518, year = {2023}, author = {Duong, PN and Lee, H}, title = {Pipelined Key Switching Accelerator Architecture for CKKS-Based Fully Homomorphic Encryption.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {10}, pages = {}, pmid = {37430518}, issn = {1424-8220}, abstract = {The increasing ubiquity of big data and cloud-based computing has led to increased concerns regarding the privacy and security of user data. In response, fully homomorphic encryption (FHE) was developed to address this issue by enabling arbitrary computation on encrypted data without decryption. However, the high computational costs of homomorphic evaluations restrict the practical application of FHE schemes. To tackle these computational and memory challenges, a variety of optimization approaches and acceleration efforts are actively being pursued. This paper introduces the KeySwitch module, a highly efficient and extensively pipelined hardware architecture designed to accelerate the costly key switching operation in homomorphic computations. Built on top of an area-efficient number-theoretic transform design, the KeySwitch module exploited the inherent parallelism of key switching operation and incorporated three main optimizations: fine-grained pipelining, on-chip resource usage, and high-throughput implementation. An evaluation on the Xilinx U250 FPGA platform demonstrated a 1.6× improvement in data throughput compared to previous work with more efficient hardware resource utilization. This work contributes to the development of advanced hardware accelerators for privacy-preserving computations and promoting the adoption of FHE in practical applications with enhanced efficiency.}, } @article {pmid37424120, year = {2023}, author = {Balla, Y and Tirunagari, S and Windridge, D}, title = {Pediatrics in Artificial Intelligence Era: A Systematic Review on Challenges, Opportunities, and Explainability.}, journal = {Indian pediatrics}, volume = {60}, number = {7}, pages = {561-569}, pmid = {37424120}, issn = {0974-7559}, mesh = {*Artificial Intelligence ; *Pediatrics ; *Clinical Decision-Making ; Humans ; Child, Preschool ; Child ; Deep Learning ; }, abstract = {BACKGROUND: The emergence of artificial intelligence (AI) tools such as ChatGPT and Bard is disrupting a broad swathe of fields, including medicine. In pediatric medicine, AI is also increasingly being used across multiple subspecialties. However, the practical application of AI still faces a number of key challenges. Consequently, there is a requirement for a concise overview of the roles of AI across the multiple domains of pediatric medicine, which the current study seeks to address.

AIM: To systematically assess the challenges, opportunities, and explainability of AI in pediatric medicine.

METHODOLOGY: A systematic search was carried out on peer-reviewed databases, PubMed Central, Europe PubMed Central, and grey literature using search terms related to machine learning (ML) and AI for the years 2016 to 2022 in the English language. A total of 210 articles were retrieved that were screened with PRISMA for abstract, year, language, context, and proximal relevance to research aims. A thematic analysis was carried out to extract findings from the included studies.

RESULTS: Twenty articles were selected for data abstraction and analysis, with three consistent themes emerging from these articles. In particular, eleven articles address the current state-of-the-art application of AI in diagnosing and predicting health conditions such as behavioral and mental health, cancer, syndromic and metabolic diseases. Five articles highlight the specific challenges of AI deployment in pediatric medicines: data security, handling, authentication, and validation. Four articles set out future opportunities for AI to be adapted: the incorporation of Big Data, cloud computing, precision medicine, and clinical decision support systems. These studies collectively critically evaluate the potential of AI in overcoming current barriers to adoption.

CONCLUSION: AI is proving disruptive within pediatric medicine and is presently associated with challenges, opportunities, and the need for explainability. AI should be viewed as a tool to enhance and support clinical decision-making rather than a substitute for human judgement and expertise. Future research should consequently focus on obtaining comprehensive data to ensure the generalizability of research findings.}, } @article {pmid37420777, year = {2023}, author = {Mari, D and Camuffo, E and Milani, S}, title = {CACTUS: Content-Aware Compression and Transmission Using Semantics for Automotive LiDAR Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {12}, pages = {}, pmid = {37420777}, issn = {1424-8220}, support = {PE0000001//Italian National Recovery and Resilience Plan (NRRP) of NextGenerationEU, partnership on "Telecommunications of the Future" (PE0000001 - program "RESTART")./ ; SID 2018 project SartreMR//University of Padua/ ; Project SYCURI//University of Padova/ ; }, mesh = {*Semantics ; Awareness ; *Data Compression ; Physical Phenomena ; }, abstract = {Many recent cloud or edge computing strategies for automotive applications require transmitting huge amounts of Light Detection and Ranging (LiDAR) data from terminals to centralized processing units. As a matter of fact, the development of effective Point Cloud (PC) compression strategies that preserve semantic information, which is critical for scene understanding, proves to be crucial. Segmentation and compression have always been treated as two independent tasks; however, since not all the semantic classes are equally important for the end task, this information can be used to guide data transmission. In this paper, we propose Content-Aware Compression and Transmission Using Semantics (CACTUS), which is a coding framework that exploits semantic information to optimize the data transmission, partitioning the original point set into separate data streams. Experimental results show that differently from traditional strategies, the independent coding of semantically consistent point sets preserves class information. Additionally, whenever semantic information needs to be transmitted to the receiver, using the CACTUS strategy leads to gains in terms of compression efficiency, and more in general, it improves the speed and flexibility of the baseline codec used to compress the data.}, } @article {pmid37420742, year = {2023}, author = {Prauzek, M and Kucova, T and Konecny, J and Adamikova, M and Gaiova, K and Mikus, M and Pospisil, P and Andriukaitis, D and Zilys, M and Martinkauppi, B and Koziorek, J}, title = {IoT Sensor Challenges for Geothermal Energy Installations Monitoring: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {12}, pages = {}, pmid = {37420742}, issn = {1424-8220}, support = {SP2023/009//Student Grant System, VSB-TU Ostrava/ ; No. 856670//European Union's Horizon 2020 research and innovation programme/ ; }, mesh = {*Geothermal Energy ; Cloud Computing ; Information Technology ; *Internet of Things ; Technology ; }, abstract = {Geothermal energy installations are becoming increasingly common in new city developments and renovations. With a broad range of technological applications and improvements in this field, the demand for suitable monitoring technologies and control processes for geothermal energy installations is also growing. This article identifies opportunities for the future development and deployment of IoT sensors applied to geothermal energy installations. The first part of the survey describes the technologies and applications of various sensor types. Sensors that monitor temperature, flow rate and other mechanical parameters are presented with a technological background and their potential applications. The second part of the article surveys Internet-of-Things (IoT), communication technology and cloud solutions applicable to geothermal energy monitoring, with a focus on IoT node designs, data transmission technologies and cloud services. Energy harvesting technologies and edge computing methods are also reviewed. The survey concludes with a discussion of research challenges and an outline of new areas of application for monitoring geothermal installations and innovating technologies to produce IoT sensor solutions.}, } @article {pmid37409083, year = {2023}, author = {Sun, X and Li, S}, title = {Multi-sensor network tracking research utilizing searchable encryption algorithm in the cloud computing environment.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1433}, pmid = {37409083}, issn = {2376-5992}, abstract = {Presently, the focus of target detection is shifting towards the integration of information acquired from multiple sensors. When faced with a vast amount of data from various sensors, ensuring data security during transmission and storage in the cloud becomes a primary concern. Data files can be encrypted and stored in the cloud. When using data, the required data files can be returned through ciphertext retrieval, and then searchable encryption technology can be developed. However, the existing searchable encryption algorithms mainly ignore the data explosion problem in a cloud computing environment. The issue of authorised access under cloud computing has yet to be solved uniformly, resulting in a waste of computing power by data users when processing more and more data. Furthermore, to save computing resources, ECS (encrypted cloud storage) may only return a fragment of results in response to a search query, lacking a practical and universal verification mechanism. Therefore, this article proposes a lightweight, fine-grained searchable encryption scheme tailored to the cloud edge computing environment. We generate ciphertext and search trap gates for terminal devices based on bilinear pairs and introduce access policies to restrict ciphertext search permissions, which improves the efficiency of ciphertext generation and retrieval. This scheme allows for encryption and trapdoor calculation generation on auxiliary terminal devices, with complex calculations carried out on edge devices. The resulting method ensures secure data access, fast search in multi-sensor network tracking, and accelerates computing speed while maintaining data security. Ultimately, experimental comparisons and analyses demonstrate that the proposed method improves data retrieval efficiency by approximately 62%, reduces the storage overhead of the public key, ciphertext index, and verifiable searchable ciphertext by half, and effectively mitigates delays in data transmission and computation processes.}, } @article {pmid37406681, year = {2023}, author = {Li, J and Liu, Z}, title = {Sensor-based cloud computing data system and long distance running fatigue assessment.}, journal = {Preventive medicine}, volume = {173}, number = {}, pages = {107604}, doi = {10.1016/j.ypmed.2023.107604}, pmid = {37406681}, issn = {1096-0260}, mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; Data Systems ; *Running ; }, abstract = {Wireless sensor networks are widely used in sports training, medical and health care, smart home, environmental monitoring, cloud data and other fields because of their large scale, self-organization, reliability, dynamic, integration and data centralization. Based on this point, this article conducts a comprehensive analysis and research on cloud computing data systems, and designs and implements a dynamic replication strategy. Since different users have different demands for different data at different times, it is necessary to record and analyze recent users' data access, so as to actively adjust the number and location of data blocks. Subsequently, a multi-source blockchain transmission method was proposed and implemented, which can significantly reduce the time cost of data migration and improve the overall performance of cloud storage data systems. Finally, the article provides an in-depth analysis of long-distance running fatigue. This study will design a simulated specialized exercise load experiment to reproduce the load characteristics of excellent athletes during mid to long distance running, in order to induce exercise fatigue in the main muscles of different parts of their bodies. At the same time, the amplitude frequency joint analysis of the surface changes of EMG signal in this process is carried out. This article conducts research on sensor based cloud computing data systems and long-distance running fatigue assessment, promoting the development of cloud computing data systems and improving long-distance running fatigue assessment methods.}, } @article {pmid37398279, year = {2023}, author = {Huang, X and Struck, TJ and Davey, SW and Gutenkunst, RN}, title = {dadi-cli: Automated and distributed population genetic model inference from allele frequency spectra.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37398279}, support = {R01 GM127348/GM/NIGMS NIH HHS/United States ; }, abstract = {SUMMARY: dadi is a popular software package for inferring models of demographic history and natural selection from population genomic data. But using dadi requires Python scripting and manual parallelization of optimization jobs. We developed dadi-cli to simplify dadi usage and also enable straighforward distributed computing.

dadi-cli is implemented in Python and released under the Apache License 2.0. The source code is available at https://github.com/xin-huang/dadi-cli . dadi-cli can be installed via PyPI and conda, and is also available through Cacao on Jetstream2 https://cacao.jetstream-cloud.org/ .}, } @article {pmid37396881, year = {2023}, author = {Chaudhari, PB and Banga, A}, title = {Writing strategies for improving the access of medical literature.}, journal = {World journal of experimental medicine}, volume = {13}, number = {3}, pages = {50-58}, doi = {10.5493/wjem.v13.i3.50}, pmid = {37396881}, issn = {2220-315X}, abstract = {When conducting a literature review, medical authors typically search for relevant keywords in bibliographic databases or on search engines like Google. After selecting the most pertinent article based on the title's relevance and the abstract's content, they download or purchase the article and cite it in their manuscript. Three major elements influence whether an article will be cited in future manuscripts: the keywords, the title, and the abstract. This indicates that these elements are the "key dissemination tools" for research papers. If these three elements are not determined judiciously by authors, it may adversely affect the manuscript's retrievability, readability, and citation index, which can negatively impact both the author and the journal. In this article, we share our informed perspective on writing strategies to enhance the searchability and citation of medical articles. These strategies are adopted from the principles of search engine optimization, but they do not aim to cheat or manipulate the search engine. Instead, they adopt a reader-centric content writing methodology that targets well-researched keywords to the readers who are searching for them. Reputable journals, such as Nature and the British Medical Journal, emphasize "online searchability" in their author guidelines. We hope that this article will encourage medical authors to approach manuscript drafting from the perspective of "looking inside-out." In other words, they should not only draft manuscripts around what they want to convey to fellow researchers but also integrate what the readers want to discover. It is a call-to-action to better understand and engage search engine algorithms, so they yield information in a desired and self-learning manner because the "Cloud" is the new stakeholder.}, } @article {pmid37396052, year = {2023}, author = {Qureshi, R and Irfan, M and Gondal, TM and Khan, S and Wu, J and Hadi, MU and Heymach, J and Le, X and Yan, H and Alam, T}, title = {AI in drug discovery and its clinical relevance.}, journal = {Heliyon}, volume = {9}, number = {7}, pages = {e17575}, pmid = {37396052}, issn = {2405-8440}, abstract = {The COVID-19 pandemic has emphasized the need for novel drug discovery process. However, the journey from conceptualizing a drug to its eventual implementation in clinical settings is a long, complex, and expensive process, with many potential points of failure. Over the past decade, a vast growth in medical information has coincided with advances in computational hardware (cloud computing, GPUs, and TPUs) and the rise of deep learning. Medical data generated from large molecular screening profiles, personal health or pathology records, and public health organizations could benefit from analysis by Artificial Intelligence (AI) approaches to speed up and prevent failures in the drug discovery pipeline. We present applications of AI at various stages of drug discovery pipelines, including the inherently computational approaches of de novo design and prediction of a drug's likely properties. Open-source databases and AI-based software tools that facilitate drug design are discussed along with their associated problems of molecule representation, data collection, complexity, labeling, and disparities among labels. How contemporary AI methods, such as graph neural networks, reinforcement learning, and generated models, along with structure-based methods, (i.e., molecular dynamics simulations and molecular docking) can contribute to drug discovery applications and analysis of drug responses is also explored. Finally, recent developments and investments in AI-based start-up companies for biotechnology, drug design and their current progress, hopes and promotions are discussed in this article.}, } @article {pmid37383277, year = {2022}, author = {Parks, DF and Voitiuk, K and Geng, J and Elliott, MAT and Keefe, MG and Jung, EA and Robbins, A and Baudin, PV and Ly, VT and Hawthorne, N and Yong, D and Sanso, SE and Rezaee, N and Sevetson, JL and Seiler, ST and Currie, R and Pollen, AA and Hengen, KB and Nowakowski, TJ and Mostajo-Radji, MA and Salama, SR and Teodorescu, M and Haussler, D}, title = {IoT cloud laboratory: Internet of Things architecture for cellular biology.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {20}, number = {}, pages = {}, pmid = {37383277}, issn = {2542-6605}, support = {R01 MH120295/MH/NIMH NIH HHS/United States ; RM1 HG011543/HG/NHGRI NIH HHS/United States ; T32 HG008345/HG/NHGRI NIH HHS/United States ; }, abstract = {The Internet of Things (IoT) provides a simple framework to control online devices easily. IoT is now a commonplace tool used by technology companies but is rarely used in biology experiments. IoT can benefit cloud biology research through alarm notifications, automation, and the real-time monitoring of experiments. We developed an IoT architecture to control biological devices and implemented it in lab experiments. Lab devices for electrophysiology, microscopy, and microfluidics were created from the ground up to be part of a unified IoT architecture. The system allows each device to be monitored and controlled from an online web tool. We present our IoT architecture so other labs can replicate it for their own experiments.}, } @article {pmid37370966, year = {2023}, author = {Nancy, AA and Ravindran, D and Vincent, DR and Srinivasan, K and Chang, CY}, title = {Fog-Based Smart Cardiovascular Disease Prediction System Powered by Modified Gated Recurrent Unit.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {13}, number = {12}, pages = {}, pmid = {37370966}, issn = {2075-4418}, support = {Intelligent Recognition Industry Service Research Center from the Featured Areas Research Center Program within the framework of the Higher Education Sprout Project//Ministry of Education, Taiwan/ ; MOST 109-2221-E-224-048-MY2//Ministry of Science and Technology,Taiwan/ ; }, abstract = {The ongoing fast-paced technology trend has brought forth ceaseless transformation. In this regard, cloud computing has long proven to be the paramount deliverer of services such as computing power, software, networking, storage, and databases on a pay-per-use basis. The cloud is a big proponent of the internet of things (IoT), furnishing the computation and storage requisite to address internet-of-things applications. With the proliferating IoT devices triggering a continual data upsurge, the cloud-IoT interaction encounters latency, bandwidth, and connectivity restraints. The inclusion of the decentralized and distributed fog computing layer amidst the cloud and IoT layer extends the cloud's processing, storage, and networking services close to end users. This hierarchical edge-fog-cloud model distributes computation and intelligence, yielding optimal solutions while tackling constraints like massive data volume, latency, delay, and security vulnerability. The healthcare domain, warranting time-critical functionalities, can reap benefits from the cloud-fog-IoT interplay. This research paper propounded a fog-assisted smart healthcare system to diagnose heart or cardiovascular disease. It combined a fuzzy inference system (FIS) with the recurrent neural network model's variant of the gated recurrent unit (GRU) for pre-processing and predictive analytics tasks. The proposed system showcases substantially improved performance results, with classification accuracy at 99.125%. With major processing of healthcare data analytics happening at the fog layer, it is observed that the proposed work reveals optimized results concerning delays in terms of latency, response time, and jitter, compared to the cloud. Deep learning models are adept at handling sophisticated tasks, particularly predictive analytics. Time-critical healthcare applications reap benefits from deep learning's exclusive potential to furnish near-perfect results, coupled with the merits of the decentralized fog model, as revealed by the experimental results.}, } @article {pmid37369699, year = {2023}, author = {Mateo-Garcia, G and Veitch-Michaelis, J and Purcell, C and Longepe, N and Reid, S and Anlind, A and Bruhn, F and Parr, J and Mathieu, PP}, title = {In-orbit demonstration of a re-trainable machine learning payload for processing optical imagery.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {10391}, pmid = {37369699}, issn = {2045-2322}, support = {PID2019-109026RB-I00, ERDF//Ministerio de Ciencia e Innovación/ ; }, abstract = {Cognitive cloud computing in space (3CS) describes a new frontier of space innovation powered by Artificial Intelligence, enabling an explosion of new applications in observing our planet and enabling deep space exploration. In this framework, machine learning (ML) payloads-isolated software capable of extracting high level information from onboard sensors-are key to accomplish this vision. In this work we demonstrate, in a satellite deployed in orbit, a ML payload called 'WorldFloods' that is able to send compressed flood maps from sensed images. In particular, we perform a set of experiments to: (1) compare different segmentation models on different processing variables critical for onboard deployment, (2) show that we can produce, onboard, vectorised polygons delineating the detected flood water from a full Sentinel-2 tile, (3) retrain the model with few images of the onboard sensor downlinked to Earth and (4) demonstrate that this new model can be uplinked to the satellite and run on new images acquired by its camera. Overall our work demonstrates that ML-based models deployed in orbit can be updated if new information is available, paving the way for agile integration of onboard and onground processing and "on the fly" continuous learning.}, } @article {pmid37362704, year = {2023}, author = {Mahajan, HB and Junnarkar, AA}, title = {Smart healthcare system using integrated and lightweight ECC with private blockchain for multimedia medical data processing.}, journal = {Multimedia tools and applications}, volume = {}, number = {}, pages = {1-24}, pmid = {37362704}, issn = {1380-7501}, abstract = {Cloud-based Healthcare 4.0 systems have research challenges with secure medical data processing, especially biomedical image processing with privacy protection. Medical records are generally text/numerical or multimedia. Multimedia data includes X-ray scans, Computed Tomography (CT) scans, Magnetic Resonance Imaging (MRI) scans, etc. Transferring biomedical multimedia data to medical authorities raises various security concerns. This paper proposes a one-of-a-kind blockchain-based secure biomedical image processing system that maintains anonymity. The integrated Healthcare 4.0 assisted multimedia image processing architecture includes an edge layer, fog computing layer, cloud storage layer, and blockchain layer. The edge layer collects and sends periodic medical information from the patient to the higher layer. The multimedia data from the edge layer is securely preserved in blockchain-assisted cloud storage through fog nodes using lightweight cryptography. Medical users then safely search such data for medical treatment or monitoring. Lightweight cryptographic procedures are proposed by employing Elliptic Curve Cryptography (ECC) with Elliptic Curve Diffie-Hellman (ECDH) and Elliptic Curve Digital Signature (ECDS) algorithm to secure biomedical image processing while maintaining privacy (ECDSA). The proposed technique is experimented with using publically available chest X-ray and CT images. The experimental results revealed that the proposed model shows higher computational efficiency (encryption and decryption time), Peak to Signal Noise Ratio (PSNR), and Meas Square Error (MSE).}, } @article {pmid37361469, year = {2023}, author = {Ansari, M and Alam, M}, title = {An Intelligent IoT-Cloud-Based Air Pollution Forecasting Model Using Univariate Time-Series Analysis.}, journal = {Arabian journal for science and engineering}, volume = {}, number = {}, pages = {1-28}, pmid = {37361469}, issn = {2193-567X}, abstract = {Air pollution is a significant environmental issue affecting public health and ecosystems worldwide, resulting from various sources such as industrial activities, vehicle emissions, and fossil fuel burning. Air pollution contributes to climate change and can cause several health problems, such as respiratory illnesses, cardiovascular disease, and cancer. A potential solution to this problem has been proposed by using different artificial intelligence (AI) and time-series models. These models are implemented in the cloud environment to forecast the Air Quality Index (AQI) utilizing Internet of things (IoT) devices. The recent influx of IoT-enabled time-series air pollution data poses challenges for traditional models. Various approaches have been explored to forecast AQI in the cloud environment using IoT devices. The primary objective of this study is to assess the efficacy of an IoT-Cloud-based model for forecasting the AQI under different meteorological conditions. To achieve this, we proposed a novel BO-HyTS approach that combines seasonal autoregressive integrated moving average (SARIMA) and long short-term memory (LSTM) and fine-tuned it by using Bayesian optimization to predict air pollution levels. The proposed BO-HyTS model can capture both linear and nonlinear characteristics of the time-series data, thus augmenting the accuracy of the forecasting process. Additionally, several AQI forecasting models, including classical time-series, machine learning, and deep learning, are employed to forecast air quality from time-series data. Five statistical evaluation metrics are incorporated to evaluate the effectiveness of models. While comparing the various algorithms among themselves becomes difficult, a non-parametric statistical significance test (Friedman test) is applied to assess the performance of the different machine learning, time-series, and deep learning models. The findings reveal that the proposed BO-HyTS model produced significantly better results than their competitor's, providing the most accurate and efficient forecasting model, with an MSE of 632.200, RMSE of 25.14, Med AE of 19.11, Max Error of 51.52, and MAE of 20.49. The results of this study provide insights into the future patterns of AQI in various Indian states and set a standard for these states as governments develop their healthcare policies accordingly. The proposed BO-HyTS model has the potential to inform policy decisions and enable governments and organizations to protect better and manage the environment beforehand.}, } @article {pmid37361138, year = {2023}, author = {Li, X and Pan, L and Liu, S}, title = {A DRL-based online VM scheduler for cost optimization in cloud brokers.}, journal = {World wide web}, volume = {}, number = {}, pages = {1-27}, pmid = {37361138}, issn = {1573-1413}, abstract = {The virtual machine (VM) scheduling problem in cloud brokers that support cloud bursting is fraught with uncertainty due to the on-demand nature of Infrastructure as a Service (IaaS) VMs. Until a VM request is received, the scheduler does not know in advance when it will arrive or what configurations it demands. Even when a VM request is received, the scheduler does not know when the VM's lifecycle expires. Existing studies begin to use deep reinforcement learning (DRL) to solve such scheduling problems. However, they do not address how to guarantee the QoS of user requests. In this paper, we investigate a cost optimization problem for online VM scheduling in cloud brokers for cloud bursting to minimize the cost spent on public clouds while satisfying specified QoS restrictions. We propose DeepBS, a DRL-based online VM scheduler in a cloud broker which learns from experience to adaptively improve scheduling strategies in environments with non-smooth and uncertain user requests. We evaluate the performance of DeepBS under two request arrival patterns which are respectively based on Google and Alibaba cluster traces, and the experiments show that DeepBS has a significant advantage over other benchmark algorithms in terms of cost optimization.}, } @article {pmid37361100, year = {2023}, author = {Pandey, NK and Kumar, K and Saini, G and Mishra, AK}, title = {Security issues and challenges in cloud of things-based applications for industrial automation.}, journal = {Annals of operations research}, volume = {}, number = {}, pages = {1-20}, pmid = {37361100}, issn = {0254-5330}, abstract = {Due to the COVID-19 outbreak, industries have gained a thrust on contactless processing for computing technologies and industrial automation. Cloud of Things (CoT) is one of the emerging computing technologies for such applications. CoT combines the most emerging cloud computing and the Internet of Things. The development in industrial automation made them highly interdependent because the cloud computing works like a backbone in IoT technology. This supports the data storage, analytics, processing, commercial application development, deployment, and security compliances. Now amalgamation of cloud technologies with IoT is making utilities more useful, smart, service-oriented, and secure application for sustainable development of industrial processes. As the pandemic has increased access to computing utilities remotely, cyber-attacks have been increased exponentially. This paper reviews the CoT's contribution to industrial automation and the various security features provided by different tools and applications used for the circular economy. The in-depth analysis of security threats, availability of different features corresponding the security issues in traditional and non-traditional CoT platforms used in industrial automation have been analysed. The security issues and challenges faced by IIoT and AIoT in industrial automation have also been addressed.}, } @article {pmid37360775, year = {2023}, author = {Lucia, C and Zhiwei, G and Michele, N}, title = {Biometrics for Industry 4.0: a survey of recent applications.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {}, number = {}, pages = {1-23}, pmid = {37360775}, issn = {1868-5137}, abstract = {The Fourth Industrial Revolution, also known as Industry 4.0, represents the rise of digital industrial technology that is propagating at an exponential rate compared to the previous three revolutions. Interoperability is a basis of production, where there is a continuous exchange of information between machines and production units that act autonomously and intelligently. Workers play a central role in making autonomous decisions and using advanced technological tools. It may involve using measures that distinguish individuals, and their behaviours and reactions. Increasing the level of security, allowing only authorized personnel access to designated areas, and promoting worker welfare can have a positive impact on the entire assembly line. Thus, capturing biometric information, with or without individuals' knowledge, could allow identity verification and monitoring of of their emotional and cognitive states during the daily actions of work life. From the study of the literature, we outline three macro categories in which the principles of Industry 4.0 are merged and the functionalities of biometric systems are exploited: security, health monitoring, and quality work life analysis. In this review, we present an overview of all biometric features used in the context of Industry 4.0 with a focus on their advantages, limitations, and practical use. Attention is also paid to future research directions for which new answers are being explored.}, } @article {pmid37360142, year = {2023}, author = {Hemdan, EE and El-Shafai, W and Sayed, A}, title = {Integrating Digital Twins with IoT-Based Blockchain: Concept, Architecture, Challenges, and Future Scope.}, journal = {Wireless personal communications}, volume = {}, number = {}, pages = {1-24}, pmid = {37360142}, issn = {0929-6212}, abstract = {In recent years, there have been concentrations on the Digital Twin from researchers and companies due to its advancement in IT, communication systems, Cloud Computing, Internet-of-Things (IoT), and Blockchain. The main concept of the DT is to provide a comprehensive tangible, and operational explanation of any element, asset, or system. However, it is an extremely dynamic taxonomy developing in complication during the life cycle that produces an enormous quantity of the engendered data and information from them. Likewise, with the development of the Blockchain, the digital twins have the potential to redefine and could be a key strategy to support the IoT-based digital twin's applications for transferring data and value onto the Internet with full transparency besides promising accessibility, trusted traceability, and immutability of transactions. Therefore, the integration of digital twins with the IoT and blockchain technologies has the potential to revolutionize various industries by providing enhanced security, transparency, and data integrity. Thus, this work presents a survey on the innovative theme of digital twins with the integration of Blockchain for various applications. Also, provides challenges and future research directions on this subject. In addition, in this paper, we propose a concept and architecture for integrating digital twins with IoT-based blockchain archives, which allows for real-time monitoring and control of physical assets and processes in a secure and decentralized manner. We also discuss the challenges and limitations of this integration, including issues related to data privacy, scalability, and interoperability. Finally, we provide insights into the future scope of this technology and discuss potential research directions for further improving the integration of digital twins with IoT-based blockchain archives. Overall, this paper provides a comprehensive overview of the potential benefits and challenges of integrating digital twins with IoT-based blockchain and lays the foundation for future research in this area.}, } @article {pmid37360131, year = {2023}, author = {Gupta, A and Singh, A}, title = {Prediction Framework on Early Urine Infection in IoT-Fog Environment Using XGBoost Ensemble Model.}, journal = {Wireless personal communications}, volume = {}, number = {}, pages = {1-19}, pmid = {37360131}, issn = {0929-6212}, abstract = {Urine infections are one of the most prevalent concerns for the healthcare industry that may impair the functioning of the kidney and other renal organs. As a result, early diagnosis and treatment of such infections are essential to avert any future complications. Conspicuously, in the current work, an intelligent system for the early prediction of urine infections has been presented. The proposed framework uses IoT-based sensors for data collection, followed by data encoding and infectious risk factor computation using the XGBoost algorithm over the fog computing platform. Finally, the analysis results along with the health-related information of users are stored in the cloud repository for future analysis. For performance validation, extensive experiments have been carried out, and results are calculated based on real-time patient data. The statistical findings of accuracy (91.45%), specificity (95.96%), sensitivity (84.79%), precision (95.49%), and f-score(90.12%) reveal the significantly improved performance of the proposed strategy over other baseline techniques.}, } @article {pmid37358302, year = {2023}, author = {Castronova, AM and Nassar, A and Knoben, W and Fienen, MN and Arnal, L and Clark, M}, title = {Community Cloud Computing Infrastructure to Support Equitable Water Research and Education.}, journal = {Ground water}, volume = {61}, number = {5}, pages = {612-616}, doi = {10.1111/gwat.13337}, pmid = {37358302}, issn = {1745-6584}, support = {1849458//Division of Earth Sciences/ ; }, mesh = {*Cloud Computing ; *Groundwater ; Software ; }, } @article {pmid37352938, year = {2023}, author = {Wang, D}, title = {Internet of things sports information collection and sports action simulation based on cloud computing data platform.}, journal = {Preventive medicine}, volume = {173}, number = {}, pages = {107579}, doi = {10.1016/j.ypmed.2023.107579}, pmid = {37352938}, issn = {1096-0260}, mesh = {Humans ; *Cloud Computing ; *Internet of Things ; Computer Simulation ; Computers ; Models, Theoretical ; Internet ; }, abstract = {In recent years, cloud computing technology has shown exponential growth, and the upgrading of hardware and the improvement of computing performance have brought significant changes to the Internet of Things industry. With the changes of the times and the emergence of many new demands, data platforms under cloud computing platforms must make corresponding changes according to the new demands. Among them, the construction of cross regional data centers is particularly important, especially in commercial environments. How to reduce the cost of data centers on cloud computing platforms while ensuring business quality has become a crucial issue. Based on the above situation, this article has optimized the bandwidth cost of the data center and solved the problem of big data transmission based on delayed big data windows and multi delay windows. A mathematical model for optimizing bandwidth cost under multi delay windows is proposed. This article also studied sports action simulation, which plays an important role in sports research, film animation, and virtual reality. Simulation actions are usually implemented based on data capture methods. However, these methods typically do not have interactivity with the environment. To enhance the authenticity and interactive ability of simulation action information collection, this article adopts reinforcement learning method for training and design, and applies a system of functions such as collecting human sports data processing. This article applies cloud computing data platforms and sports information collection to sports action simulation, making progress in the development of sports action simulation.}, } @article {pmid37346721, year = {2023}, author = {Zhang, X}, title = {Optimization design of railway logistics center layout based on mobile cloud edge computing.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1298}, pmid = {37346721}, issn = {2376-5992}, abstract = {With the development of the economy, the importance of railway freight transportation has become essential. The efficiency of a railway logistics center depends on the types, quantities, information exchange, and layout optimization. Edge collaboration technology can consider the advantages of cloud computing's rich computing storage resources and low latency. It can also provide additional computing power and real-time requirements for intelligent railway logistics construction. However, the cloud-side collaboration technology will introduce the wireless communication delay between the mobile terminal and the edge computing server. We designed a two-tier unloading strategy algorithm and solved the optimization problem by determining the unloading decision of each task. The cost of every task is calculated in the onboard device calculation, vehicular edge computing (VEC), and cloud computing server calculation. Simulation results show that the proposed method can save about 40% time delay compared to other unloading strategies.}, } @article {pmid37346641, year = {2023}, author = {Abd Rahman, NH and Mohamad Zaki, MH and Hasikin, K and Abd Razak, NA and Ibrahim, AK and Lai, KW}, title = {Predicting medical device failure: a promise to reduce healthcare facilities cost through smart healthcare management.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1279}, pmid = {37346641}, issn = {2376-5992}, abstract = {BACKGROUND: The advancement of biomedical research generates myriad healthcare-relevant data, including medical records and medical device maintenance information. The COVID-19 pandemic significantly affects the global mortality rate, creating an enormous demand for medical devices. As information technology has advanced, the concept of intelligent healthcare has steadily gained prominence. Smart healthcare utilises a new generation of information technologies, such as the Internet of Things (loT), big data, cloud computing, and artificial intelligence, to completely transform the traditional medical system. With the intention of presenting the concept of smart healthcare, a predictive model is proposed to predict medical device failure for intelligent management of healthcare services.

METHODS: Present healthcare device management can be improved by proposing a predictive machine learning model that prognosticates the tendency of medical device failures toward smart healthcare. The predictive model is developed based on 8,294 critical medical devices from 44 different types of equipment extracted from 15 healthcare facilities in Malaysia. The model classifies the device into three classes; (i) class 1, where the device is unlikely to fail within the first 3 years of purchase, (ii) class 2, where the device is likely to fail within 3 years from purchase date, and (iii) class 3 where the device is likely to fail more than 3 years after purchase. The goal is to establish a precise maintenance schedule and reduce maintenance and resource costs based on the time to the first failure event. A machine learning and deep learning technique were compared, and the best robust model for smart healthcare was proposed.

RESULTS: This study compares five algorithms in machine learning and three optimizers in deep learning techniques. The best optimized predictive model is based on ensemble classifier and SGDM optimizer, respectively. An ensemble classifier model produces 77.90%, 87.60%, and 75.39% for accuracy, specificity, and precision compared to 70.30%, 83.71%, and 67.15% for deep learning models. The ensemble classifier model improves to 79.50%, 88.36%, and 77.43% for accuracy, specificity, and precision after significant features are identified. The result concludes although machine learning has better accuracy than deep learning, more training time is required, which is 11.49 min instead of 1 min 5 s when deep learning is applied. The model accuracy shall be improved by introducing unstructured data from maintenance notes and is considered the author's future work because dealing with text data is time-consuming. The proposed model has proven to improve the devices' maintenance strategy with a Malaysian Ringgit (MYR) cost reduction of approximately MYR 326,330.88 per year. Therefore, the maintenance cost would drastically decrease if this smart predictive model is included in the healthcare management system.}, } @article {pmid37346547, year = {2023}, author = {Zhang, Y}, title = {A novel fast pedestrian recognition algorithm based on point cloud compression and boundary extraction.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1426}, pmid = {37346547}, issn = {2376-5992}, abstract = {REASON: Pedestrian recognition has great practical value and is a vital step toward applying path planning and intelligent obstacle avoidance in autonomous driving. In recent years, laser radar has played an essential role in pedestrian detection and recognition in unmanned driving. More accurate high spatial dimension and high-resolution data could be obtained by building a three-dimensional point cloud. However, the point cloud data collected by laser radar is often massive and contains a lot of redundancy, which is not conducive to transmission and storage. So, the processing speed grows slow when the original point cloud data is used for recognition. On the other hand, the compression processing of many laser radar point clouds could save computing power and speed up the recognition processing.

METHODOLOGY: The article utilizes the fusion point cloud data from laser radar to investigate the fast pedestrian recognition algorithm. The focus is to compress the collected point cloud data based on the boundary and feature value extraction and then use the point cloud pedestrian recognition algorithm based on image mapping to detect pedestrians. This article proposes a point cloud data compression method based on feature point extraction and reduced voxel grid.

RESULTS: The Karlsruhe Institute of Technology and Toyota Technological Institute data set is used to investigate the proposed algorithm experimentally. The outcomes indicate that the peak signal-to-noise ratio of the compression algorithm is improved by 6.02%. The recognition accuracy is improved by 16.93%, 17.2%, and 16.12%, corresponding to simple, medium, and difficult scenes, respectively, when compared with the point cloud pedestrian recognition method based on image mapping, which uses the random sampling method to compress the point cloud data.

CONCLUSION: The proposed method could achieve data compression better and ensure that many feature points are retained in the compressed Point Cloud Data (PCD). Thus, the compressed PCD achieves pedestrian recognition through an image-based mapping recognition algorithm.}, } @article {pmid37346511, year = {2023}, author = {Zhou, J and Liu, B and Gao, J}, title = {A task scheduling algorithm with deadline constraints for distributed clouds in smart cities.}, journal = {PeerJ. Computer science}, volume = {9}, number = {}, pages = {e1346}, pmid = {37346511}, issn = {2376-5992}, abstract = {Computing technologies and 5G are helpful for the development of smart cities. Cloud computing has become an essential smart city technology. With artificial intelligence technologies, it can be used to integrate data from various devices, such as sensors and cameras, over the network in a smart city for management of the infrastructure and processing of Internet of Things (IoT) data. Cloud computing platforms provide services to users. Task scheduling in the cloud environment is an important technology to shorten computing time and reduce user cost, and thus has many important applications. Recently, a hierarchical distributed cloud service network model for the smart city has been proposed where distributed (micro) clouds, and core clouds are considered to achieve a better network architecture. Task scheduling in the model has attracted many researchers. In this article, we study a task scheduling problem with deadline constraints in the distributed cloud model and aim to reduce the communication network's data load and provide low-latency services from the cloud server in the local area, hence promoting the efficiency of cloud computing services for local users. To solve the task scheduling problem efficiently, we present an efficient local search algorithm to solve the problem. In the algorithm, a greedy search strategy is proposed to improve the current solutions iteratively. Moreover, randomized methods are used in selecting tasks and virtual machines for reassigning tasks. We carried out extensive computational experiments to evaluate the performance of our algorithm and compared experimental results with Swarm-based approaches, such as GA and PSO. The comparative results show that the proposed local search algorithm performs better than the comparative algorithms on the task scheduling problem.}, } @article {pmid37342652, year = {2023}, author = {Nguyen, T and Bian, X and Roberson, D and Khanna, R and Chen, Q and Yan, C and Beck, R and Worman, Z and Meerzaman, D}, title = {Multi-omics Pathways Workflow (MOPAW): An Automated Multi-omics Workflow on the Cancer Genomics Cloud.}, journal = {Cancer informatics}, volume = {22}, number = {}, pages = {11769351231180992}, pmid = {37342652}, issn = {1176-9351}, abstract = {INTRODUCTION: In the era of big data, gene-set pathway analyses derived from multi-omics are exceptionally powerful. When preparing and analyzing high-dimensional multi-omics data, the installation process and programing skills required to use existing tools can be challenging. This is especially the case for those who are not familiar with coding. In addition, implementation with high performance computing solutions is required to run these tools efficiently.

METHODS: We introduce an automatic multi-omics pathway workflow, a point and click graphical user interface to Multivariate Single Sample Gene Set Analysis (MOGSA), hosted on the Cancer Genomics Cloud by Seven Bridges Genomics. This workflow leverages the combination of different tools to perform data preparation for each given data types, dimensionality reduction, and MOGSA pathway analysis. The Omics data includes copy number alteration, transcriptomics data, proteomics and phosphoproteomics data. We have also provided an additional workflow to help with downloading data from The Cancer Genome Atlas and Clinical Proteomic Tumor Analysis Consortium and preprocessing these data to be used for this multi-omics pathway workflow.

RESULTS: The main outputs of this workflow are the distinct pathways for subgroups of interest provided by users, which are displayed in heatmaps if identified. In addition to this, graphs and tables are provided to users for reviewing.

CONCLUSION: Multi-omics Pathway Workflow requires no coding experience. Users can bring their own data or download and preprocess public datasets from The Cancer Genome Atlas and Clinical Proteomic Tumor Analysis Consortium using our additional workflow based on the samples of interest. Distinct overactivated or deactivated pathways for groups of interest can be found. This useful information is important in effective therapeutic targeting.}, } @article {pmid37338245, year = {2024}, author = {Hotchkiss, J and Ridderman, E and Buftin, W}, title = {Overall US Hospice Quality According to Decedent Caregivers-Natural Language Processing and Sentiment Analysis of 3389 Online Caregiver Reviews.}, journal = {The American journal of hospice & palliative care}, volume = {41}, number = {5}, pages = {527-544}, doi = {10.1177/10499091231185593}, pmid = {37338245}, issn = {1938-2715}, mesh = {Aged ; Humans ; United States ; *Hospice Care/psychology ; *Hospices ; Caregivers/psychology ; Sentiment Analysis ; Natural Language Processing ; Medicare ; Pain ; }, abstract = {Objectives: With an untapped quality resource in online hospice reviews, study aims were exploring hospice caregiver experiences and assessing their expectations of the hospice Medicare benefit. Methods: Topical and sentiment analysis was conducted using natural language processing (NLP) of Google and Yelp caregiver reviews (n = 3393) between 2013-2023 using Google NLP. Stratified sampling weighted by hospice size to approximate the daily census of US hospice enrollees. Results: Overall caregiver sentiment of hospice care was neutral (S = .14). Therapeutic, achievable expectations and misperceptions, unachievable expectations were, respectively, the most and least prevalent domains. Four topics with the highest prevalence, all had moderately positive sentiments: caring staff, staff professionalism and knowledge; emotional, spiritual, bereavement support; and responsive, timely or helpful. Lowest sentiments scores were lack of staffing; promises made, but not kept, pain, symptoms and medications; sped-up death, hasted, or sedated; and money, staff motivations. Significance of Results: Caregivers overall rating of hospice was neutral, largely due to moderate sentiment on achievable expectations in two-thirds of reviews mixed with unachievable expectations in one-sixth of reviews. Hospice caregivers were most likely to recommend hospices with caring staff, providing quality care, responsive to requests, and offering family support. Lack of staff, inadequate pain-symptom management were the two biggest barriers to hospice quality. All eight CAHPS measures were found in the discovered review topics. Close-ended CAHPS scores and open-ended online reviews have complementary insights. Future research should explore associations between CAHPS and review insights.}, } @article {pmid37334641, year = {2023}, author = {Schnabel, B and Gebert, J and Schneider, R and Helwig, P}, title = {Towards the simulation of bone-implant systems with a stratified material model.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {31}, number = {4}, pages = {1555-1566}, doi = {10.3233/THC-237001}, pmid = {37334641}, issn = {1878-7401}, mesh = {Humans ; Computer Simulation ; *Femur ; Finite Element Analysis ; *Image Processing, Computer-Assisted ; Reproducibility of Results ; }, abstract = {BACKGROUND: The clinical performance of medical devices is becoming increasingly important for the requirements of modern development processes and the associated regulations. However, the evidence for this performance can often only be obtained very late in the development process via clinical trials or studies.

OBJECTIVE: The purpose of the presented work is to show that the simulation of bone-implant systems has advanced in various aspects, including cloud-based execution, Virtual Clinical Trials, and material modeling towards a point where and widespread utilization in healthcare for procedure planning and enhancing practices seems feasible. But this will only hold true if the virtual cohort data build from clinical Computer Tomography data are collected and analysed with care.

METHODS: An overview of the principal steps necessary to perform Finite Element Method based structural mechanical simulations of bone-implant systems based on clinical imaging data is presented. Since these data form the baseline for virtual cohort construction, we present an enhancement method to make them more accurate and reliable.

RESULTS: The findings of our work comprise the initial step towards a virtual cohort for the evaluation of proximal femur implants. In addition, results of our proposed enhancement methodology for clinical Computer Tomography data that demonstrate the necessity for the usage of multiple image reconstructions are presented.

CONCLUSION: Simulation methodologies and pipelines nowadays are mature and have turnaround times that allow for a day-to-day use. However, small changes in the imaging and the preprocessing of data can have a significant impact on the obtaind results. Consequently, first steps towards virtual clinical trials, like collecting bone samples, are done, but the reliability of the input data remains subject to further research and development.}, } @article {pmid37328705, year = {2023}, author = {Yang, M and Bo, Z and Xu, T and Xu, B and Wang, D and Zheng, H}, title = {Uni-GBSA: an open-source and web-based automatic workflow to perform MM/GB(PB)SA calculations for virtual screening.}, journal = {Briefings in bioinformatics}, volume = {24}, number = {4}, pages = {}, doi = {10.1093/bib/bbad218}, pmid = {37328705}, issn = {1477-4054}, mesh = {Workflow ; Entropy ; *Molecular Dynamics Simulation ; *Drug Discovery ; Ligands ; Internet ; Protein Binding ; }, abstract = {Binding free energy calculation of a ligand to a protein receptor is a fundamental objective in drug discovery. Molecular mechanics/Generalized-Born (Poisson-Boltzmann) surface area (MM/GB(PB)SA) is one of the most popular methods for binding free energy calculations. It is more accurate than most scoring functions and more computationally efficient than alchemical free energy methods. Several open-source tools for performing MM/GB(PB)SA calculations have been developed, but they have limitations and high entry barriers to users. Here, we introduce Uni-GBSA, a user-friendly automatic workflow to perform MM/GB(PB)SA calculations, which can perform topology preparation, structure optimization, binding free energy calculation and parameter scanning for MM/GB(PB)SA calculations. It also offers a batch mode that evaluates thousands of molecules against one protein target in parallel for efficient application in virtual screening. The default parameters are selected after systematic testing on the PDBBind-2011 refined dataset. In our case studies, Uni-GBSA produced a satisfactory correlation with the experimental binding affinities and outperformed AutoDock Vina in molecular enrichment. Uni-GBSA is available as an open-source package at https://github.com/dptech-corp/Uni-GBSA. It can also be accessed for virtual screening from the Hermite web platform at https://hermite.dp.tech. A free Uni-GBSA web server of a lab version is available at https://labs.dp.tech/projects/uni-gbsa/. This increases user-friendliness because the web server frees users from package installations and provides users with validated workflows for input data and parameter settings, cloud computing resources for efficient job completions, a user-friendly interface and professional support and maintenance.}, } @article {pmid37317615, year = {2023}, author = {Rathke, BH and Yu, H and Huang, H}, title = {What Remains Now That the Fear Has Passed? Developmental Trajectory Analysis of COVID-19 Pandemic for Co-occurrences of Twitter, Google Trends, and Public Health Data.}, journal = {Disaster medicine and public health preparedness}, volume = {17}, number = {}, pages = {e471}, doi = {10.1017/dmp.2023.101}, pmid = {37317615}, issn = {1938-744X}, mesh = {Humans ; United States/epidemiology ; *COVID-19/epidemiology/psychology ; Pandemics ; Public Health ; *Social Media ; Search Engine ; Fear ; }, abstract = {OBJECTIVE: The rapid onset of coronavirus disease 2019 (COVID-19) created a complex virtual collective consciousness. Misinformation and polarization were hallmarks of the pandemic in the United States, highlighting the importance of studying public opinion online. Humans express their thoughts and feelings more openly than ever before on social media; co-occurrence of multiple data sources have become valuable for monitoring and understanding public sentimental preparedness and response to an event within our society.

METHODS: In this study, Twitter and Google Trends data were used as the co-occurrence data for the understanding of the dynamics of sentiment and interest during the COVID-19 pandemic in the United States from January 2020 to September 2021. Developmental trajectory analysis of Twitter sentiment was conducted using corpus linguistic techniques and word cloud mapping to reveal 8 positive and negative sentiments and emotions. Machine learning algorithms were used to implement the opinion mining how Twitter sentiment was related to Google Trends interest with historical COVID-19 public health data.

RESULTS: The sentiment analysis went beyond polarity to detect specific feelings and emotions during the pandemic.

CONCLUSIONS: The discoveries on the behaviors of emotions at each stage of the pandemic were presented from the emotion detection when associated with the historical COVID-19 data and Google Trends data.}, } @article {pmid37315445, year = {2023}, author = {Guzman, NA and Guzman, DE and Blanc, T}, title = {Advancements in portable instruments based on affinity-capture-migration and affinity-capture-separation for use in clinical testing and life science applications.}, journal = {Journal of chromatography. A}, volume = {1704}, number = {}, pages = {464109}, doi = {10.1016/j.chroma.2023.464109}, pmid = {37315445}, issn = {1873-3778}, mesh = {Humans ; *Pandemics ; *COVID-19/diagnosis ; Laboratories ; Smartphone ; Immunoassay/methods ; COVID-19 Testing ; }, abstract = {The shift from testing at centralized diagnostic laboratories to remote locations is being driven by the development of point-of-care (POC) instruments and represents a transformative moment in medicine. POC instruments address the need for rapid results that can inform faster therapeutic decisions and interventions. These instruments are especially valuable in the field, such as in an ambulance, or in remote and rural locations. The development of telehealth, enabled by advancements in digital technologies like smartphones and cloud computing, is also aiding in this evolution, allowing medical professionals to provide care remotely, potentially reducing healthcare costs and improving patient longevity. One notable POC device is the lateral flow immunoassay (LFIA), which played a major role in addressing the COVID-19 pandemic due to its ease of use, rapid analysis time, and low cost. However, LFIA tests exhibit relatively low analytical sensitivity and provide semi-quantitative information, indicating either a positive, negative, or inconclusive result, which can be attributed to its one-dimensional format. Immunoaffinity capillary electrophoresis (IACE), on the other hand, offers a two-dimensional format that includes an affinity-capture step of one or more matrix constituents followed by release and electrophoretic separation. The method provides greater analytical sensitivity, and quantitative information, thereby reducing the rate of false positives, false negatives, and inconclusive results. Combining LFIA and IACE technologies can thus provide an effective and economical solution for screening, confirming results, and monitoring patient progress, representing a key strategy in advancing diagnostics in healthcare.}, } @article {pmid37313273, year = {2023}, author = {Alshammari, A and Fayez Alanazi, M}, title = {Use of Technology in Enhancing Learning Among Nurses in Saudi Arabia; a Systematic Review.}, journal = {Journal of multidisciplinary healthcare}, volume = {16}, number = {}, pages = {1587-1599}, pmid = {37313273}, issn = {1178-2390}, abstract = {The landscape of teaching and learning, particularly in the realm of technology-supported education, is being transformed by the ongoing presence of portable digital assistant devices and other technological tools. Such technologies have become an integral aspect of learning these days. The use of Virtual Reality, Augmented Reality, cloud computing, and social media through platforms such as Twitter, Dropbox, Google Apps, and YouTube has become the norm in modern education and has greatly enhanced the quality of higher nursing education. Therefore, this study aims to synthesize evidence on the effectiveness of technology in nursing education in Saudi Arabia. The study used a systematic review methodology to extract relevant studies from databases and reference lists of related literature reviews. Two independent reviewers screened the title, abstract, and full texts based on predefined eligibility criteria. The review identified four themes from the data retrieved from 15 published articles. The themes include attitude towards e-learning, challenges and quality related to e-learning, social media and smart phones usage, virtual reality and simulation experience. Mixed attitudes have been identified among the participants of the selected studies. Various challenges linked with e-learning, social media usage, smart phones, and simulation have been identified inclusive of technical issues, lack of awareness, lack of training, etc. The findings have also stated that awareness level should be increased related to e-learning for better outcomes in Saudi Arabia. The findings suggest that technology has the potential to improve learning outcomes for nurses, including those involved in research. Therefore, it is crucial to ensure that both educators and students receive adequate training on how to effectively use the upcoming technology in Saudi Arabia.}, } @article {pmid37312944, year = {2023}, author = {Farooq, MS and Riaz, S and Tehseen, R and Farooq, U and Saleem, K}, title = {Role of Internet of things in diabetes healthcare: Network infrastructure, taxonomy, challenges, and security model.}, journal = {Digital health}, volume = {9}, number = {}, pages = {20552076231179056}, pmid = {37312944}, issn = {2055-2076}, abstract = {The Internet of things (IoT) is an emerging technology that enables ubiquitous devices to connect with the Internet. IoT technology has revolutionized the medical and healthcare industry by interconnecting smart devices and sensors. IoT-based devices and biosensors are ideal to detect diabetes disease by collecting the accurate value of glucose continuously. Diabetes is one of the well-known and major chronic diseases that has a worldwide social impact on community life. Blood glucose monitoring is a challenging task, and there is a need to propose a proper architecture of the noninvasive glucose sensing and monitoring mechanism, which could make diabetic people aware of self-management techniques. This survey presents a rigorous discussion of diabetes types and presents detection techniques based on IoT technology. In this research, an IoT-based healthcare network infrastructure has been proposed for monitoring diabetes disease based on big data analytics, cloud computing, and machine learning. The proposed infrastructure could handle the symptoms of diabetes, collect data, analyze it, and then transmit the results to the server for the next action. Besides, presented an inclusive survey on IoT-based diabetes monitoring applications, services, and proposed solutions. Furthermore, based on IoT technology the diabetes disease management taxonomy has also been presented. Finally, presented the attacks taxonomy as well as discussed challenges, and proposed a lightweight security model in order to secure the patient's health data.}, } @article {pmid37310789, year = {2023}, author = {Campi, D and Mounet, N and Gibertini, M and Pizzi, G and Marzari, N}, title = {Expansion of the Materials Cloud 2D Database.}, journal = {ACS nano}, volume = {17}, number = {12}, pages = {11268-11278}, pmid = {37310789}, issn = {1936-086X}, abstract = {Two-dimensional (2D) materials are among the most promising candidates for beyond-silicon electronic, optoelectronic, and quantum computing applications. Recently, their recognized importance sparked a push to discover and characterize novel 2D materials. Within a few years, the number of experimentally exfoliated or synthesized 2D materials went from a few to more than a hundred, with the number of theoretically predicted compounds reaching a few thousand. In 2018 we first contributed to this effort with the identification of 1825 compounds that are either easily (1036) or potentially (789) exfoliable from experimentally known 3D compounds. Here, we report on a major expansion of this 2D portfolio thanks to the extension of the screening protocol to an additional experimental database (MPDS) as well as the updated versions of the two databases (ICSD and COD) used in our previous work. This expansion leads to the discovery of an additional 1252 monolayers, bringing the total to 3077 compounds and, notably, almost doubling the number of easily exfoliable materials to 2004. We optimize the structural properties of all these monolayers and explore their electronic structure with a particular emphasis on those rare large-bandgap 2D materials that could be precious in isolating 2D field-effect-transistor channels. Finally, for each material containing up to 6 atoms per unit cell, we identify the best candidates to form commensurate heterostructures, balancing requirements on supercell size and minimal strain.}, } @article {pmid37304830, year = {2023}, author = {Song, Z and Ma, H and Sun, S and Xin, Y and Zhang, R}, title = {Rainbow: reliable personally identifiable information retrieval across multi-cloud.}, journal = {Cybersecurity}, volume = {6}, number = {1}, pages = {19}, pmid = {37304830}, issn = {2523-3246}, abstract = {Personally identifiable information (PII) refers to any information that links to an individual. Sharing PII is extremely useful in public affairs yet hard to implement due to the worries about privacy violations. Building a PII retrieval service over multi-cloud, which is a modern strategy to make services stable where multiple servers are deployed, seems to be a promising solution. However, three major technical challenges remain to be solved. The first is the privacy and access control of PII. In fact, each entry in PII can be shared to different users with different access rights. Hence, flexible and fine-grained access control is needed. Second, a reliable user revocation mechanism is required to ensure that users can be revoked efficiently, even if few cloud servers are compromised or collapse, to avoid data leakage. Third, verifying the correctness of received PII and locating a misbehaved server when wrong data are returned is crucial to guarantee user's privacy, but challenging to realize. In this paper, we propose Rainbow, a secure and practical PII retrieval scheme to solve the above issues. In particular, we design an important cryptographic tool, called Reliable Outsourced Attribute Based Encryption (ROABE) which provides data privacy, flexible and fine-grained access control, reliable immediate user revocation and verification for multiple servers simultaneously, to support Rainbow. Moreover, we present how to build Rainbow with ROABE and several necessary cloud techniques in real world. To evaluate the performance, we deploy Rainbow on multiple mainstream clouds, namely, AWS, GCP and Microsoft Azure, and experiment in browsers on mobile phones and computers. Both theoretical analysis and experimental results indicate that Rainbow is secure and practical.}, } @article {pmid37303980, year = {2023}, author = {Rodrigues, VF and da Rosa Righi, R and da Costa, CA and Zeiser, FA and Eskofier, B and Maier, A and Kim, D}, title = {Digital health in smart cities: Rethinking the remote health monitoring architecture on combining edge, fog, and cloud.}, journal = {Health and technology}, volume = {13}, number = {3}, pages = {449-472}, pmid = {37303980}, issn = {2190-7188}, abstract = {PURPOSE: Smart cities that support the execution of health services are more and more in evidence today. Here, it is mainstream to use IoT-based vital sign data to serve a multi-tier architecture. The state-of-the-art proposes the combination of edge, fog, and cloud computing to support critical health applications efficiently. However, to the best of our knowledge, initiatives typically present the architectures, not bringing adaptation and execution optimizations to address health demands fully.

METHODS: This article introduces the VitalSense model, which provides a hierarchical multi-tier remote health monitoring architecture in smart cities by combining edge, fog, and cloud computing.

RESULTS: Although using a traditional composition, our contributions appear in handling each infrastructure level. We explore adaptive data compression and homomorphic encryption at the edge, a multi-tier notification mechanism, low latency health traceability with data sharding, a Serverless execution engine to support multiple fog layers, and an offloading mechanism based on service and person computing priorities.

CONCLUSIONS: This article details the rationale behind these topics, describing VitalSense use cases for disruptive healthcare services and preliminary insights regarding prototype evaluation.}, } @article {pmid37300076, year = {2023}, author = {Aqeel, I and Khormi, IM and Khan, SB and Shuaib, M and Almusharraf, A and Alam, S and Alkhaldi, NA}, title = {Load Balancing Using Artificial Intelligence for Cloud-Enabled Internet of Everything in Healthcare Domain.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37300076}, issn = {1424-8220}, mesh = {Animals ; Horses ; *Artificial Intelligence ; *Algorithms ; Intelligence ; Awareness ; Internet ; }, abstract = {The emergence of the Internet of Things (IoT) and its subsequent evolution into the Internet of Everything (IoE) is a result of the rapid growth of information and communication technologies (ICT). However, implementing these technologies comes with certain obstacles, such as the limited availability of energy resources and processing power. Consequently, there is a need for energy-efficient and intelligent load-balancing models, particularly in healthcare, where real-time applications generate large volumes of data. This paper proposes a novel, energy-aware artificial intelligence (AI)-based load balancing model that employs the Chaotic Horse Ride Optimization Algorithm (CHROA) and big data analytics (BDA) for cloud-enabled IoT environments. The CHROA technique enhances the optimization capacity of the Horse Ride Optimization Algorithm (HROA) using chaotic principles. The proposed CHROA model balances the load, optimizes available energy resources using AI techniques, and is evaluated using various metrics. Experimental results show that the CHROA model outperforms existing models. For instance, while the Artificial Bee Colony (ABC), Gravitational Search Algorithm (GSA), and Whale Defense Algorithm with Firefly Algorithm (WD-FA) techniques attain average throughputs of 58.247 Kbps, 59.957 Kbps, and 60.819 Kbps, respectively, the CHROA model achieves an average throughput of 70.122 Kbps. The proposed CHROA-based model presents an innovative approach to intelligent load balancing and energy optimization in cloud-enabled IoT environments. The results highlight its potential to address critical challenges and contribute to developing efficient and sustainable IoT/IoE solutions.}, } @article {pmid37299993, year = {2023}, author = {Iqbal, F and Altaf, A and Waris, Z and Aray, DG and Flores, MAL and Díez, IT and Ashraf, I}, title = {Blockchain-Modeled Edge-Computing-Based Smart Home Monitoring System with Energy Usage Prediction.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299993}, issn = {1424-8220}, support = {N/A//European University of Atlantics/ ; }, mesh = {*Blockchain ; *Internet of Things ; Machine Learning ; Memory, Long-Term ; Microwaves ; }, abstract = {Internet of Things (IoT) has made significant strides in energy management systems recently. Due to the continually increasing cost of energy, supply-demand disparities, and rising carbon footprints, the need for smart homes for monitoring, managing, and conserving energy has increased. In IoT-based systems, device data are delivered to the network edge before being stored in the fog or cloud for further transactions. This raises worries about the data's security, privacy, and veracity. It is vital to monitor who accesses and updates this information to protect IoT end-users linked to IoT devices. Smart meters are installed in smart homes and are susceptible to numerous cyber attacks. Access to IoT devices and related data must be secured to prevent misuse and protect IoT users' privacy. The purpose of this research was to design a blockchain-based edge computing method for securing the smart home system, in conjunction with machine learning techniques, in order to construct a secure smart home system with energy usage prediction and user profiling. The research proposes a blockchain-based smart home system that can continuously monitor IoT-enabled smart home appliances such as smart microwaves, dishwashers, furnaces, and refrigerators, among others. An approach based on machine learning was utilized to train the auto-regressive integrated moving average (ARIMA) model for energy usage prediction, which is provided in the user's wallet, to estimate energy consumption and maintain user profiles. The model was tested using the moving average statistical model, the ARIMA model, and the deep-learning-based long short-term memory (LSTM) model on a dataset of smart-home-based energy usage under changing weather conditions. The findings of the analysis reveal that the LSTM model accurately forecasts the energy usage of smart homes.}, } @article {pmid37299938, year = {2023}, author = {Yavari, A and Korala, H and Georgakopoulos, D and Kua, J and Bagha, H}, title = {Sazgar IoT: A Device-Centric IoT Framework and Approximation Technique for Efficient and Scalable IoT Data Processing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299938}, issn = {1424-8220}, mesh = {Humans ; *COVID-19/diagnosis ; *Internet of Things ; Data Analysis ; Research Design ; }, abstract = {The Internet of Things (IoT) plays a fundamental role in monitoring applications; however, existing approaches relying on cloud and edge-based IoT data analysis encounter issues such as network delays and high costs, which can adversely impact time-sensitive applications. To address these challenges, this paper proposes an IoT framework called Sazgar IoT. Unlike existing solutions, Sazgar IoT leverages only IoT devices and IoT data analysis approximation techniques to meet the time-bounds of time-sensitive IoT applications. In this framework, the computing resources onboard the IoT devices are utilised to process the data analysis tasks of each time-sensitive IoT application. This eliminates the network delays associated with transferring large volumes of high-velocity IoT data to cloud or edge computers. To ensure that each task meets its application-specific time-bound and accuracy requirements, we employ approximation techniques for the data analysis tasks of time-sensitive IoT applications. These techniques take into account the available computing resources and optimise the processing accordingly. To evaluate the effectiveness of Sazgar IoT, experimental validation has been conducted. The results demonstrate that the framework successfully meets the time-bound and accuracy requirements of the COVID-19 citizen compliance monitoring application by effectively utilising the available IoT devices. The experimental validation further confirms that Sazgar IoT is an efficient and scalable solution for IoT data processing, addressing existing network delay issues for time-sensitive applications and significantly reducing the cost related to cloud and edge computing devices procurement, deployment, and maintenance.}, } @article {pmid37299817, year = {2023}, author = {Zhang, X and Cheng, Z and Du, L and Du, Y}, title = {Progressive Classifier Mechanism for Bridge Expansion Joint Health Status Monitoring System Based on Acoustic Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299817}, issn = {1424-8220}, mesh = {*Acoustics ; *Algorithms ; Cloud Computing ; Computer Simulation ; Health Status ; }, abstract = {The application of IoT (Internet of Things) technology to the health monitoring of expansion joints is of great importance in enhancing the efficiency of bridge expansion joint maintenance. In this study, a low-power, high-efficiency, end-to-cloud coordinated monitoring system analyzes acoustic signals to identify faults in bridge expansion joints. To address the issue of scarce authentic data related to bridge expansion joint failures, an expansion joint damage simulation data collection platform is established for well-annotated datasets. Based on this, a progressive two-level classifier mechanism is proposed, combining template matching based on AMPD (Automatic Peak Detection) and deep learning algorithms based on VMD (Variational Mode Decomposition), denoising, and utilizing edge and cloud computing power efficiently. The simulation-based datasets were used to test the two-level algorithm, with the first-level edge-end template matching algorithm achieving fault detection rates of 93.3% and the second-level cloud-based deep learning algorithm achieving classification accuracy of 98.4%. The proposed system in this paper has demonstrated efficient performance in monitoring the health of expansion joints, according to the aforementioned results.}, } @article {pmid37299800, year = {2023}, author = {Hou, KM and Diao, X and Shi, H and Ding, H and Zhou, H and de Vaulx, C}, title = {Trends and Challenges in AIoT/IIoT/IoT Implementation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299800}, issn = {1424-8220}, mesh = {Humans ; *Ecosystem ; Reproducibility of Results ; *Agriculture ; Autonomous Vehicles ; Data Science ; }, abstract = {For the next coming years, metaverse, digital twin and autonomous vehicle applications are the leading technologies for many complex applications hitherto inaccessible such as health and life sciences, smart home, smart agriculture, smart city, smart car and logistics, Industry 4.0, entertainment (video game) and social media applications, due to recent tremendous developments in process modeling, supercomputing, cloud data analytics (deep learning, etc.), communication network and AIoT/IIoT/IoT technologies. AIoT/IIoT/IoT is a crucial research field because it provides the essential data to fuel metaverse, digital twin, real-time Industry 4.0 and autonomous vehicle applications. However, the science of AIoT is inherently multidisciplinary, and therefore, it is difficult for readers to understand its evolution and impacts. Our main contribution in this article is to analyze and highlight the trends and challenges of the AIoT technology ecosystem including core hardware (MCU, MEMS/NEMS sensors and wireless access medium), core software (operating system and protocol communication stack) and middleware (deep learning on a microcontroller: TinyML). Two low-powered AI technologies emerge: TinyML and neuromorphic computing, but only one AIoT/IIoT/IoT device implementation using TinyML dedicated to strawberry disease detection as a case study. So far, despite the very rapid progress of AIoT/IIoT/IoT technologies, several challenges remain to be overcome such as safety, security, latency, interoperability and reliability of sensor data, which are essential characteristics to meet the requirements of metaverse, digital twin, autonomous vehicle and Industry 4.0. applications.}, } @article {pmid37299731, year = {2023}, author = {AlQahtani, SA}, title = {An Evaluation of e-Health Service Performance through the Integration of 5G IoT, Fog, and Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {11}, pages = {}, pmid = {37299731}, issn = {1424-8220}, mesh = {*Telemedicine/instrumentation/methods ; Cloud Computing ; Internet of Things ; Neural Networks, Computer ; Computer Simulation ; }, abstract = {In recent years, Internet of Things (IoT) advancements have led to the development of vastly improved remote healthcare services. Scalability, high bandwidth, low latency, and low power consumption are all essential features of the applications that make these services possible. An upcoming healthcare system and wireless sensor network that can fulfil these needs is based on fifth-generation network slicing. For better resource management, organizations can implement network slicing, which partitions the physical network into distinct logical slices according to quality of service (QoS) needs. Based on the findings of this research, an IoT-fog-cloud architecture is proposed for use in e-Health services. The framework is made up of three different but interconnected systems: a cloud radio access network, a fog computing system, and a cloud computing system. A queuing network serves as a model for the proposed system. The model's constituent parts are then subjected to analysis. To assess the system's performance, we run a numerical example simulation using Java modelling tools and then analyze the results to identify the key performance parameters. The analytical formulas that were derived ensure the precision of the results. Finally, the results show that the proposed model improves eHealth services' quality of service in an efficient way by selecting the right slice compared to the traditional systems.}, } @article {pmid37274449, year = {2023}, author = {Verma, P and Gupta, A and Kumar, M and Gill, SS}, title = {FCMCPS-COVID: AI propelled fog-cloud inspired scalable medical cyber-physical system, specific to coronavirus disease.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {23}, number = {}, pages = {100828}, pmid = {37274449}, issn = {2542-6605}, abstract = {Medical cyber-physical systems (MCPS) firmly integrate a network of medical objects. These systems are highly efficacious and have been progressively used in the Healthcare 4.0 to achieve continuous high-quality services. Healthcare 4.0 encompasses numerous emerging technologies and their applications have been realized in the monitoring of a variety of virus outbreaks. As a growing healthcare trend, coronavirus disease (COVID-19) can be cured and its spread can be prevented using MCPS. This virus spreads from human to human and can have devastating consequences. Moreover, with the alarmingly rising death rate and new cases across the world, there is an urgent need for continuous identification and screening of infected patients to mitigate their spread. Motivated by the facts, we propose a framework for early detection, prevention, and control of the COVID-19 outbreak by using novel Industry 5.0 technologies. The proposed framework uses a dimensionality reduction technique in the fog layer, allowing high-quality data to be used for classification purposes. The fog layer also uses the ensemble learning-based data classification technique for the detection of COVID-19 patients based on the symptomatic dataset. In addition, in the cloud layer, social network analysis (SNA) has been performed to control the spread of COVID-19. The experimental results reveal that compared with state-of-the-art methods, the proposed framework achieves better results in terms of accuracy (82.28 %), specificity (91.42 %), sensitivity (90 %) and stability with effective response time. Furthermore, the utilization of CVI-based alert generation at the fog layer improves the novelty aspects of the proposed system.}, } @article {pmid37274420, year = {2023}, author = {Rezazadeh, B and Asghari, P and Rahmani, AM}, title = {Computer-aided methods for combating Covid-19 in prevention, detection, and service provision approaches.}, journal = {Neural computing & applications}, volume = {35}, number = {20}, pages = {14739-14778}, pmid = {37274420}, issn = {0941-0643}, abstract = {The infectious disease Covid-19 has been causing severe social, economic, and human suffering across the globe since 2019. The countries have utilized different strategies in the last few years to combat Covid-19 based on their capabilities, technological infrastructure, and investments. A massive epidemic like this cannot be controlled without an intelligent and automatic health care system. The first reaction to the disease outbreak was lockdown, and researchers focused more on developing methods to diagnose the disease and recognize its behavior. However, as the new lifestyle becomes more normalized, research has shifted to utilizing computer-aided methods to monitor, track, detect, and treat individuals and provide services to citizens. Thus, the Internet of things, based on fog-cloud computing, using artificial intelligence approaches such as machine learning, and deep learning are practical concepts. This article aims to survey computer-based approaches to combat Covid-19 based on prevention, detection, and service provision. Technically and statistically, this article analyzes current methods, categorizes them, presents a technical taxonomy, and explores future and open issues.}, } @article {pmid37266260, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Medical Equipment Comprehensive Management System Based on Cloud Computing and Internet of Things.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9805036}, pmid = {37266260}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2021/6685456.].}, } @article {pmid37266231, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Digital Forensic Investigation of Healthcare Data in Cloud Computing Environment.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9782643}, pmid = {37266231}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/9709101.].}, } @article {pmid37259021, year = {2023}, author = {O'Connell, KA and Yosufzai, ZB and Campbell, RA and Lobb, CJ and Engelken, HT and Gorrell, LM and Carlson, TB and Catana, JJ and Mikdadi, D and Bonazzi, VR and Klenk, JA}, title = {Accelerating genomic workflows using NVIDIA Parabricks.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {221}, pmid = {37259021}, issn = {1471-2105}, mesh = {Workflow ; *Computer Graphics ; *Software ; Genomics ; }, abstract = {BACKGROUND: As genome sequencing becomes better integrated into scientific research, government policy, and personalized medicine, the primary challenge for researchers is shifting from generating raw data to analyzing these vast datasets. Although much work has been done to reduce compute times using various configurations of traditional CPU computing infrastructures, Graphics Processing Units (GPUs) offer opportunities to accelerate genomic workflows by orders of magnitude. Here we benchmark one GPU-accelerated software suite called NVIDIA Parabricks on Amazon Web Services (AWS), Google Cloud Platform (GCP), and an NVIDIA DGX cluster. We benchmarked six variant calling pipelines, including two germline callers (HaplotypeCaller and DeepVariant) and four somatic callers (Mutect2, Muse, LoFreq, SomaticSniper).

RESULTS: We achieved up to 65 × acceleration with germline variant callers, bringing HaplotypeCaller runtimes down from 36 h to 33 min on AWS, 35 min on GCP, and 24 min on the NVIDIA DGX. Somatic callers exhibited more variation between the number of GPUs and computing platforms. On cloud platforms, GPU-accelerated germline callers resulted in cost savings compared with CPU runs, whereas some somatic callers were more expensive than CPU runs because their GPU acceleration was not sufficient to overcome the increased GPU cost.

CONCLUSIONS: Germline variant callers scaled well with the number of GPUs across platforms, whereas somatic variant callers exhibited more variation in the number of GPUs with the fastest runtimes, suggesting that, at least with the version of Parabricks used here, these workflows are less GPU optimized and require benchmarking on the platform of choice before being deployed at production scales. Our study demonstrates that GPUs can be used to greatly accelerate genomic workflows, thus bringing closer to grasp urgent societal advances in the areas of biosurveillance and personalized medicine.}, } @article {pmid37258867, year = {2023}, author = {Callaghan, M}, title = {Cloud Computing for Metagenomics: Building a Personalized Computational Platform for Pipeline Analyses.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2649}, number = {}, pages = {261-279}, pmid = {37258867}, issn = {1940-6029}, mesh = {*Software ; *Metagenomics ; Cloud Computing ; Computers ; Web Browser ; Computational Biology/methods ; }, abstract = {Cloud Computing services such as Microsoft Azure, Amazon Web Services, and Google Cloud provide a range of tools and services that enable scientists to rapidly prototype, build, and deploy platforms for their computational experiments.This chapter describes a protocol to deploy and configure an Ubuntu Linux Virtual Machine in the Microsoft Azure cloud, which includes Minconda Python, a Jupyter Lab server, and the QIIME toolkit configured for access through a web browser to facilitate a typical metagenomics analysis pipeline.}, } @article {pmid37252914, year = {2023}, author = {Arefian, Z and Khayyambashi, MR and Movahhedinia, N}, title = {Delay reduction in MTC using SDN based offloading in Fog computing.}, journal = {PloS one}, volume = {18}, number = {5}, pages = {e0286483}, pmid = {37252914}, issn = {1932-6203}, mesh = {*Software ; *Algorithms ; Cloud Computing ; Communication ; Weather ; }, abstract = {Fog computing (FC) brings a Cloud close to users and improves the quality of service and delay services. In this article, the convergence of FC and Software-Defined-Networking (SDN) has been proposed to implement complicated mechanisms of resource management. SDN has suited the practical standard for FC systems. The priority and differential flow space allocation have been applied to arrange this framework for the heterogeneous request in Machine-Type-Communications. The delay-sensitive flows are assigned to a configuration of priority queues on each Fog. Due to limited resources in the Fog, a promising solution is offloading flows to other Fogs through a decision-based SDN controller. The flow-based Fog nodes have been modeled according to the queueing theory, where polling priority algorithms have been applied to service the flows and to reduce the starvation problem in a multi-queueing model. It is observed that the percentage of delay-sensitive processed flows, the network consumption, and the average service time in the proposed mechanism are improved by about 80%, 65%, and 60%, respectively, compared to traditional Cloud computing. Therefore, the delay reductions based on the types of flows and task offloading is proposed.}, } @article {pmid37252813, year = {2023}, author = {Samarakoon, H and Ferguson, JM and Gamaarachchi, H and Deveson, IW}, title = {Accelerated nanopore basecalling with SLOW5 data format.}, journal = {Bioinformatics (Oxford, England)}, volume = {39}, number = {6}, pages = {}, pmid = {37252813}, issn = {1367-4811}, mesh = {*Software ; Sequence Analysis, DNA/methods ; *Nanopores ; Genome ; Genomics ; High-Throughput Nucleotide Sequencing ; }, abstract = {MOTIVATION: Nanopore sequencing is emerging as a key pillar in the genomic technology landscape but computational constraints limiting its scalability remain to be overcome. The translation of raw current signal data into DNA or RNA sequence reads, known as 'basecalling', is a major friction in any nanopore sequencing workflow. Here, we exploit the advantages of the recently developed signal data format 'SLOW5' to streamline and accelerate nanopore basecalling on high-performance computing (HPC) and cloud environments.

RESULTS: SLOW5 permits highly efficient sequential data access, eliminating a potential analysis bottleneck. To take advantage of this, we introduce Buttery-eel, an open-source wrapper for Oxford Nanopore's Guppy basecaller that enables SLOW5 data access, resulting in performance improvements that are essential for scalable, affordable basecalling.

Buttery-eel is available at https://github.com/Psy-Fer/buttery-eel.}, } @article {pmid37252270, year = {2023}, author = {Torres-Gaona, G and Aledo-Serrano, Á and García-Morales, I and Toledano, R and Valls, J and Cosculluela, B and Munsó, L and Raurich, X and Trejo, A and Blanquez, D and Gil-Nagel, A}, title = {Artificial intelligence system, based on mjn-SERAS algorithm, for the early detection of seizures in patients with refractory focal epilepsy: A cross-sectional pilot study.}, journal = {Epilepsy & behavior reports}, volume = {22}, number = {}, pages = {100600}, pmid = {37252270}, issn = {2589-9864}, abstract = {Around one-third of epilepsy patients develop drug-resistant seizures; early detection of seizures could help improve safety, reduce patient anxiety, increase independence, and enable acute treatment. In recent years, the use of artificial intelligence techniques and machine learning algorithms in different diseases, including epilepsy, has increased significantly. The main objective of this study is to determine whether the mjn-SERAS artificial intelligence algorithm developed by MJN Neuroserveis, can detect seizures early using patient-specific data to create a personalized mathematical model based on EEG training, defined as the programmed recognition of oncoming seizures before they are primarily initiated, usually within a period of a few minutes, in patients diagnosed of epilepsy. Retrospective, cross-sectional, observational, multicenter study to determine the sensitivity and specificity of the artificial intelligence algorithm. We searched the database of the Epilepsy Units of three Spanish medical centers and selected 50 patients evaluated between January 2017 and February 2021, diagnosed with refractory focal epilepsy who underwent video-EEG monitoring recordings between 3 and 5 days, a minimum of 3 seizures per patient, lasting more than 5 s and the interval between each seizure was greater than 1 h. Exclusion criteria included age <18 years, intracranial EEG monitoring, and severe psychiatric, neurological, or systemic disorders. The algorithm identified pre-ictal and interictal patterns from EEG data using our learning algorithm and was compared to a senior epileptologist's evaluation as a gold standard. Individual mathematical models of each patient were trained using this feature dataset. A total of 1963 h of 49 video-EEG recordings were reviewed, with an average of 39.26 h per patient. The video-EEG monitoring recorded 309 seizures as subsequently analyzed by the epileptologists. The mjn-SERAS algorithm was trained on 119 seizures and split testing was performed on 188 seizures. The statistical analysis includes the data from each model and reports 10 false negatives (no detection of episodes recorded by video-EEG) and 22 false positives (alert detected without clinical correlation or abnormal EEG signal within 30 min). Specifically, the automated mjn-SERAS AI algorithm achieved a sensitivity of 94.7% (95 %; CI 94.67-94.73), and an F-Score representing specificity of 92.2% (95 %; CI 92.17-92.23) compared to the reference performance represented by a mean (harmonic mean or average) and a positive predictive value of 91%, with a false positive rate of 0.55 per 24 h in the patient-independent model. This patient-specific AI algorithm for early seizure detection shows promising results in terms of sensitivity and false positive rate. Although the algorithm requires high computational requirements on specialized servers cloud for training and computing, its computational load in real-time is low, allowing its implementation on embedded devices for online seizure detection.}, } @article {pmid37251849, year = {2023}, author = {Al-Sharafi, MA and Iranmanesh, M and Al-Emran, M and Alzahrani, AI and Herzallah, F and Jamil, N}, title = {Determinants of cloud computing integration and its impact on sustainable performance in SMEs: An empirical investigation using the SEM-ANN approach.}, journal = {Heliyon}, volume = {9}, number = {5}, pages = {e16299}, pmid = {37251849}, issn = {2405-8440}, abstract = {Although extant literature has thoroughly investigated the incorporation of cloud computing services, examining their influence on sustainable performance, particularly at the organizational level, is insufficient. Consequently, the present research aims to assess the factors that impact the integration of cloud computing within small and medium-sized enterprises (SMEs) and its subsequent effects on environmental, financial, and social performance. The data were collected from 415 SMEs and were analyzed using a hybrid SEM-ANN approach. PLS-SEM results indicate that relative advantage, complexity, compatibility, top management support, cost reduction, and government support significantly affect cloud computing integration. This study also empirically demonstrated that SMEs could improve their financial, environmental, and social performance by integrating cloud computing services. ANN results show that complexity, with a normalized importance (NI) of 89.14%, is ranked the first among other factors affecting cloud computing integration in SMEs. This is followed by cost reduction (NI = 82.67%), government support (NI = 73.37%), compatibility (NI = 70.02%), top management support (NI = 52.43%), and relative advantage (NI = 48.72%). Theoretically, this study goes beyond examining the determinants affecting cloud computing integration by examining their impact on SMEs' environmental, financial, and social performance in a comprehensive manner. The study also provides several practical implications for policymakers, SME managers, and cloud computing service providers.}, } @article {pmid37250444, year = {2023}, author = {Luscombe, DJ and Gatis, N and Anderson, K and Carless, D and Brazier, RE}, title = {Rapid, repeatable landscape-scale mapping of tree, hedgerow, and woodland habitats (THaW), using airborne LiDAR and spaceborne SAR data.}, journal = {Ecology and evolution}, volume = {13}, number = {5}, pages = {e10103}, pmid = {37250444}, issn = {2045-7758}, abstract = {In the UK, tree, hedgerow, and woodland (THaW) habitats are key havens for biodiversity and support many related ecosystem services. The UK is entering a period of agricultural policy realignment with respect to natural capital and climate change, meaning that now is a critical time to evaluate the distribution, resilience, and dynamics of THaW habitats. The fine-grained nature of habitats like hedgerows necessitates mapping of these features at relatively fine spatial resolution-and freely available public archives of airborne laser scanning (LiDAR) data at <2 m spatial resolution offer a means of doing so within UK settings. The high cost of LiDAR prohibits use for regular monitoring of THaW change, but space-borne sensors such as Sentinel-1 Synthetic Aperture Radar (SAR at ca. 10 m resolution) can potentially meet this need once baseline distributions are established. We address two aims in this manuscript-(1) to rapidly quantify THaW across UK landscapes using LiDAR data and (2) to monitor canopy change intra- and inter-annually using SAR data. We show that workflows applied to airborne LiDAR data can deliver THaW baselines at 2 m resolution, with positional accuracy of >90%. It was also possible to combine LiDAR mapping data and Sentinel-1 SAR data to rapidly track canopy change through time (i.e., every 3 months) using, cloud-based processing via Google Earth Engine. The resultant toolkit is also provided as an open-access web app. The results highlight that whilst nearly 90% of the tallest trees (above 15 m) are captured within the National Forest Inventory (NFI) database only 50% of THaW with a canopy height range of 3-15 m are recorded. Current estimates of tree distribution neglect these finer-grained features (i.e., smaller or less contiguous THaW canopies), which we argue will account for a significant proportion of landscape THaW cover.}, } @article {pmid37238577, year = {2023}, author = {Tang, Y and Jin, M and Meng, H and Yang, L and Zheng, C}, title = {Attribute-Based Verifiable Conditional Proxy Re-Encryption Scheme.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {5}, pages = {}, pmid = {37238577}, issn = {1099-4300}, support = {ICNS202006//the Shaanxi Key Laboratory of Information Communication Network and Security/ ; LNCT2022-A11//Henan Key Laboratory of Network Cryptography Technology/ ; }, abstract = {There are mostly semi-honest agents in cloud computing, so agents may perform unreliable calculations during the actual execution process. In this paper, an attribute-based verifiable conditional proxy re-encryption (AB-VCPRE) scheme using a homomorphic signature is proposed to solve the problem that the current attribute-based conditional proxy re-encryption (AB-CPRE) algorithm cannot detect the illegal behavior of the agent. The scheme implements robustness, that is the re-encryption ciphertext, can be verified by the verification server, showing that the received ciphertext is correctly converted by the agent from the original ciphertext, thus, meaning that illegal activities of agents can be effectively detected. In addition, the article demonstrates the reliability of the constructed AB-VCPRE scheme validation in the standard model, and proves that the scheme satisfies CPA security in the selective security model based on the learning with errors (LWE) assumption.}, } @article {pmid37238475, year = {2023}, author = {Zhao, M and Wang, H and Yao, B}, title = {Graphic Groups, Graph Homomorphisms, and Graphic Group Lattices in Asymmetric Topology Cryptography.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {5}, pages = {}, pmid = {37238475}, issn = {1099-4300}, support = {22JR5RA876//The Science and Technology Program of Gansu Province/ ; 61163054//National Natural Science Foundation of China/ ; 61363060//National Natural Science Foundation of China/ ; 61662066//National Natural Science Foundation of China/ ; }, abstract = {Using asymmetric topology cryptography to encrypt networks on the basis of topology coding is a new topic of cryptography, which consists of two major elements, i.e., topological structures and mathematical constraints. The topological signature of asymmetric topology cryptography is stored in the computer by matrices that can produce number-based strings for application. By means of algebra, we introduce every-zero mixed graphic groups, graphic lattices, and various graph-type homomorphisms and graphic lattices based on mixed graphic groups into cloud computing technology. The whole network encryption will be realized by various graphic groups.}, } @article {pmid37236950, year = {2023}, author = {Zhao, N and Zhao, YH and Zou, HF and Bai, XH and Zhen, Z}, title = {Spatial and temporal trends and drivers of fractional vegetation cover in Heilongjiang Province, China during 1990-2020.}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {34}, number = {5}, pages = {1320-1330}, doi = {10.13287/j.1001-9332.202305.021}, pmid = {37236950}, issn = {1001-9332}, mesh = {Humans ; *Ecosystem ; Seasons ; China ; *Models, Theoretical ; Human Activities ; }, abstract = {Fractional vegetation cover (FVC) is a quantitative indicator for vegetation growth conditions and ecosystem change. Clarifying the spatial and temporal trends and driving factors of FVC is an important research content of global and regional ecological environment. Based on Google Earth Engine (GEE) cloud computing platform, we estimated FVC in Heilongjiang Province from 1990 to 2020 using the pixel dichotomous model. We analyzed the temporal and spatial trends and drivers of FVC using Mann-Kendall mutation test, Sen's slope analysis with Mann-Kendall significance test, correlation analysis, and structural equation model. The results showed that the estimated FVC based on the pixel dichotomous model had high accuracy (R[2]>0.7, root mean square error <0.1, relative root mean square error <14%). From 1990 to 2020, the annual average FVC in Heilongjiang was 0.79, with a fluctuating upward trend (0.72-0.85) and an average annual growth rate of 0.4%. The annual average FVC at the municipal administrative districts level also showed different levels of increase of FVC. The area with extremely high FVC dominated the Heilongjiang Province with a gradual increase proportion. The area with increasing trend of FVC accounted for 67.4% of the total area, whereas the area with decreasing trend only accounted for 26.2%, and the rest remained unchanged. The correlation of human activity factor on annual average FVC was higher than that of growing season monthly average meteorological factor. The human activity factor was the main driver for FVC change in Heilongjiang Province, followed by land use type. The total effect of monthly average meteorological factor during the growing season on FVC change was negative. The results would serve as technical support for long-term FVC monitoring and driving force analysis in Heilongjiang Province, and provide a reference for ecological environment restoration and protection, as well as the formulation of related land use policy.}, } @article {pmid37233602, year = {2023}, author = {Schranghamer, TF and Pannone, A and Ravichandran, H and Stepanoff, SP and Trainor, N and Redwing, JM and Wolfe, DE and Das, S}, title = {Radiation Resilient Two-Dimensional Electronics.}, journal = {ACS applied materials & interfaces}, volume = {15}, number = {22}, pages = {26946-26959}, doi = {10.1021/acsami.3c02406}, pmid = {37233602}, issn = {1944-8252}, abstract = {Limitations in cloud-based computing have prompted a paradigm shift toward all-in-one "edge" devices capable of independent data sensing, computing, and storage. Advanced defense and space applications stand to benefit immensely from this due to their need for continual operation in areas where maintaining remote oversight is difficult. However, the extreme environments relevant to these applications necessitate rigorous testing of technologies, with a common requirement being hardness to ionizing radiation. Two-dimensional (2D) molybdenum disulfide (MoS2) has been noted to enable the sensing, storage, and logic capabilities necessary for all-in-one edge devices. Despite this, the investigation of ionizing radiation effects in MoS2-based devices remains incomplete. In particular, studies on gamma radiation effects in MoS2 have been largely limited to standalone films, with few device investigations; to the best of our knowledge, no explorations have been made into gamma radiation effects on the sensing and memory capabilities of MoS2-based devices. In this work, we have used a statistical approach to study high-dose (1 Mrad) gamma radiation effects on photosensitive and programmable memtransistors fabricated from large-area monolayer MoS2. Memtransistors were divided into separate groups to ensure accurate extraction of device characteristics pertaining to baseline performance, sensing, and memory before and after irradiation. All-MoS2 logic gates were also assessed to determine the gamma irradiation impact on logic implementation. Our findings show that the multiple functionalities of MoS2 memtransistors are not severely impacted by gamma irradiation even without dedicated shielding/mitigation techniques. We believe that these results serve as a foundation for more application-oriented studies going forward.}, } @article {pmid37220560, year = {2023}, author = {Behbehani, D and Komninos, N and Al-Begain, K and Rajarajan, M}, title = {Cloud Enterprise Dynamic Risk Assessment (CEDRA): a dynamic risk assessment using dynamic Bayesian networks for cloud environment.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {79}, pmid = {37220560}, issn = {2192-113X}, abstract = {Cloud computing adoption has been increasing rapidly amid COVID-19 as organisations accelerate the implementation of their digital strategies. Most models adopt traditional dynamic risk assessment, which does not adequately quantify or monetise risks to enable business-appropriate decision-making. In view of this challenge, a new model is proposed in this paper for assignment of monetary losses terms to the consequences nodes, thereby enabling experts to understand better the financial risks of any consequence. The proposed model is named Cloud Enterprise Dynamic Risk Assessment (CEDRA) model that uses CVSS, threat intelligence feeds and information about exploitation availability in the wild using dynamic Bayesian networks to predict vulnerability exploitations and financial losses. A case study of a scenario based on the Capital One breach attack was conducted to demonstrate experimentally the applicability of the model proposed in this paper. The methods presented in this study has improved vulnerability and financial losses prediction.}, } @article {pmid37198391, year = {2023}, author = {Halder, B and Ahmadianfar, I and Heddam, S and Mussa, ZH and Goliatt, L and Tan, ML and Sa'adi, Z and Al-Khafaji, Z and Al-Ansari, N and Jawad, AH and Yaseen, ZM}, title = {Machine learning-based country-level annual air pollutants exploration using Sentinel-5P and Google Earth Engine.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {7968}, pmid = {37198391}, issn = {2045-2322}, abstract = {Climatic condition is triggering human health emergencies and earth's surface changes. Anthropogenic activities, such as built-up expansion, transportation development, industrial works, and some extreme phases, are the main reason for climate change and global warming. Air pollutants are increased gradually due to anthropogenic activities and triggering the earth's health. Nitrogen Dioxide (NO2), Carbon Monoxide (CO), and Aerosol Optical Depth (AOD) are truthfully important for air quality measurement because those air pollutants are more harmful to the environment and human's health. Earth observational Sentinel-5P is applied for monitoring the air pollutant and chemical conditions in the atmosphere from 2018 to 2021. The cloud computing-based Google Earth Engine (GEE) platform is applied for monitoring those air pollutants and chemical components in the atmosphere. The NO2 variation indicates high during the time because of the anthropogenic activities. Carbon Monoxide (CO) is also located high between two 1-month different maps. The 2020 and 2021 results indicate AQI change is high where 2018 and 2019 indicates low AQI throughout the year. The Kolkata have seven AQI monitoring station where high nitrogen dioxide recorded 102 (2018), 48 (2019), 26 (2020) and 98 (2021), where Delhi AQI stations recorded 99 (2018), 49 (2019), 37 (2020), and 107 (2021). Delhi, Kolkata, Mumbai, Pune, and Chennai recorded huge fluctuations of air pollutants during the study periods, where ~ 50-60% NO2 was recorded as high in the recent time. The AOD was noticed high in Uttar Pradesh in 2020. These results indicate that air pollutant investigation is much necessary for future planning and management otherwise; our planet earth is mostly affected by the anthropogenic and climatic conditions where maybe life does not exist.}, } @article {pmid37192819, year = {2023}, author = {Ahalt, S and Avillach, P and Boyles, R and Bradford, K and Cox, S and Davis-Dusenbery, B and Grossman, RL and Krishnamurthy, A and Manning, A and Paten, B and Philippakis, A and Borecki, I and Chen, SH and Kaltman, J and Ladwa, S and Schwartz, C and Thomson, A and Davis, S and Leaf, A and Lyons, J and Sheets, E and Bis, JC and Conomos, M and Culotti, A and Desain, T and Digiovanna, J and Domazet, M and Gogarten, S and Gutierrez-Sacristan, A and Harris, T and Heavner, B and Jain, D and O'Connor, B and Osborn, K and Pillion, D and Pleiness, J and Rice, K and Rupp, G and Serret-Larmande, A and Smith, A and Stedman, JP and Stilp, A and Barsanti, T and Cheadle, J and Erdmann, C and Farlow, B and Gartland-Gray, A and Hayes, J and Hiles, H and Kerr, P and Lenhardt, C and Madden, T and Mieczkowska, JO and Miller, A and Patton, P and Rathbun, M and Suber, S and Asare, J}, title = {Building a collaborative cloud platform to accelerate heart, lung, blood, and sleep research.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {30}, number = {7}, pages = {1293-1300}, pmid = {37192819}, issn = {1527-974X}, support = {OT3 HL142478/HL/NHLBI NIH HHS/United States ; R01 HL120393/HL/NHLBI NIH HHS/United States ; U01 HL120393/HL/NHLBI NIH HHS/United States ; OT3 HL142481/HL/NHLBI NIH HHS/United States ; OT3 HL147154/HL/NHLBI NIH HHS/United States ; HHSN268201800001C/HL/NHLBI NIH HHS/United States ; OT3 HL142480/HL/NHLBI NIH HHS/United States ; OT3 HL142479/HL/NHLBI NIH HHS/United States ; HHSN268201000001I/HL/NHLBI NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; Ecosystem ; Reproducibility of Results ; *COVID-19 ; Lung ; Software ; }, abstract = {Research increasingly relies on interrogating large-scale data resources. The NIH National Heart, Lung, and Blood Institute developed the NHLBI BioData CatalystⓇ (BDC), a community-driven ecosystem where researchers, including bench and clinical scientists, statisticians, and algorithm developers, find, access, share, store, and compute on large-scale datasets. This ecosystem provides secure, cloud-based workspaces, user authentication and authorization, search, tools and workflows, applications, and new innovative features to address community needs, including exploratory data analysis, genomic and imaging tools, tools for reproducibility, and improved interoperability with other NIH data science platforms. BDC offers straightforward access to large-scale datasets and computational resources that support precision medicine for heart, lung, blood, and sleep conditions, leveraging separately developed and managed platforms to maximize flexibility based on researcher needs, expertise, and backgrounds. Through the NHLBI BioData Catalyst Fellows Program, BDC facilitates scientific discoveries and technological advances. BDC also facilitated accelerated research on the coronavirus disease-2019 (COVID-19) pandemic.}, } @article {pmid37190404, year = {2023}, author = {Li, J and Fan, Y and Bian, X and Yuan, Q}, title = {Online/Offline MA-CP-ABE with Cryptographic Reverse Firewalls for IoT.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {4}, pages = {}, pmid = {37190404}, issn = {1099-4300}, support = {61872204//National Natural Science Foundation of China/ ; LH2020F050//Natural Science Foundation of Heilongjiang Province of China/ ; 2021-KYYWF-0016//Fundamental Research Funds for Heilongjiang Universities of China/ ; 135309453//Science Research project of Basic scientific research business expenses in Heilongjiang Provincical colleges and universities of China/ ; }, abstract = {Devices in the Internet of Things (IoT) usually use cloud storage and cloud computing to save storage and computing cost. Therefore, the efficient realization of one-to-many communication of data on the premise of ensuring the security of cloud storage data is a challenge. Ciphertext-Policy Attribute-Based Encryption (CP-ABE) can not only protect the security of data in the cloud and achieve one-to-many communication but also achieve fine-grained access control for data. However, the single-authority CP-ABE faces the crisis of single point of failure. In order to improve security, the Multi-Authority CP-ABE (MA-CP-ABE) is adopted. Although there are provably-secure MA-CP-ABE schemes, Edward Snowden's research shows that provably-secure cryptographic schemes are vulnerable to backdoor attacks, resulting in secret disclosure, and thus threatening security. In addition, ABE requires huge computational overhead in key generation, encryption and decryption, which increase with the increase in the number of attributes and the complexity of the access structure, and there are a large number of resource-constrained devices in the IoT. To mitigate this issue, we construct the Online/Offline MA-CP-ABE with Cryptographic Reverse Firewalls (OO-MA-CP-ABE-CRFs) scheme. This scheme not only uses Cryptographic Reverse Firewall (CRF) to resist backdoor attacks but also uses online/offline key generation, online/offline encryption and outsourcing encryption technology to optimize the efficiency of the MA-CP-ABE scheme with reverse firewall, reducing the storage and computing cost of users. Finally, the security of the OO-MA-CP-ABE-CRFs scheme is proved, and the experimental results indicate that the scheme is efficient and practical.}, } @article {pmid37190351, year = {2023}, author = {Panchikkil, S and Manikandan, VM and Zhang, Y and Wang, S}, title = {A Multi-Directional Pixel-Swapping Approach (MPSA) for Entropy-Retained Reversible Data Hiding in Encrypted Images.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {4}, pages = {}, pmid = {37190351}, issn = {1099-4300}, support = {MC_PC_17171/MRC_/Medical Research Council/United Kingdom ; }, abstract = {Reversible data hiding (RDH), a promising data-hiding technique, is widely examined in domains such as medical image transmission, satellite image transmission, crime investigation, cloud computing, etc. None of the existing RDH schemes addresses a solution from a real-time aspect. A good compromise between the information embedding rate and computational time makes the scheme suitable for real-time applications. As a solution, we propose a novel RDH scheme that recovers the original image by retaining its quality and extracting the hidden data. Here, the cover image gets encrypted using a stream cipher and is partitioned into non-overlapping blocks. Secret information is inserted into the encrypted blocks of the cover image via a controlled local pixel-swapping approach to achieve a comparatively good payload. The new scheme MPSA allows the data hider to hide two bits in every encrypted block. The existing reversible data-hiding schemes modify the encrypted image pixels leading to a compromise in image security. However, the proposed work complements the support of encrypted image security by maintaining the same entropy of the encrypted image in spite of hiding the data. Experimental results illustrate the competency of the proposed work accounting for various parameters, including embedding rate and computational time.}, } @article {pmid37187785, year = {2023}, author = {Cabrero-Holgueras, J and Pastrana, S}, title = {Towards realistic privacy-preserving deep learning over encrypted medical data.}, journal = {Frontiers in cardiovascular medicine}, volume = {10}, number = {}, pages = {1117360}, pmid = {37187785}, issn = {2297-055X}, abstract = {Cardiovascular disease supposes a substantial fraction of healthcare systems. The invisible nature of these pathologies demands solutions that enable remote monitoring and tracking. Deep Learning (DL) has arisen as a solution in many fields, and in healthcare, multiple successful applications exist for image enhancement and health outside hospitals. However, the computational requirements and the need for large-scale datasets limit DL. Thus, we often offload computation onto server infrastructure, and various Machine-Learning-as-a-Service (MLaaS) platforms emerged from this need. These enable the conduction of heavy computations in a cloud infrastructure, usually equipped with high-performance computing servers. Unfortunately, the technical barriers persist in healthcare ecosystems since sending sensitive data (e.g., medical records or personally identifiable information) to third-party servers involves privacy and security concerns with legal and ethical implications. In the scope of Deep Learning for Healthcare to improve cardiovascular health, Homomorphic Encryption (HE) is a promising tool to enable secure, private, and legal health outside hospitals. Homomorphic Encryption allows for privacy-preserving computations over encrypted data, thus preserving the privacy of the processed information. Efficient HE requires structural optimizations to perform the complex computation of the internal layers. One such optimization is Packed Homomorphic Encryption (PHE), which encodes multiple elements on a single ciphertext, allowing for efficient Single Instruction over Multiple Data (SIMD) operations. However, using PHE in DL circuits is not straightforward, and it demands new algorithms and data encoding, which existing literature has not adequately addressed. To fill this gap, in this work, we elaborate on novel algorithms to adapt the linear algebra operations of DL layers to PHE. Concretely, we focus on Convolutional Neural Networks. We provide detailed descriptions and insights into the different algorithms and efficient inter-layer data format conversion mechanisms. We formally analyze the complexity of the algorithms in terms of performance metrics and provide guidelines and recommendations for adapting architectures that deal with private data. Furthermore, we confirm the theoretical analysis with practical experimentation. Among other conclusions, we prove that our new algorithms speed up the processing of convolutional layers compared to the existing proposals.}, } @article {pmid37181330, year = {2023}, author = {Dahlquist, JM and Nelson, SC and Fullerton, SM}, title = {Cloud-based biomedical data storage and analysis for genomic research: Landscape analysis of data governance in emerging NIH-supported platforms.}, journal = {HGG advances}, volume = {4}, number = {3}, pages = {100196}, pmid = {37181330}, issn = {2666-2477}, support = {R21 HG011501/HG/NHGRI NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; *Population Health ; Genomics/methods ; Genome ; Information Storage and Retrieval ; }, abstract = {The storage, sharing, and analysis of genomic data poses technical and logistical challenges that have precipitated the development of cloud-based computing platforms designed to facilitate collaboration and maximize the scientific utility of data. To understand cloud platforms' policies and procedures and the implications for different stakeholder groups, in summer 2021, we reviewed publicly available documents (N = 94) sourced from platform websites, scientific literature, and lay media for five NIH-funded cloud platforms (the All of Us Research Hub, NHGRI AnVIL, NHLBI BioData Catalyst, NCI Genomic Data Commons, and the Kids First Data Resource Center) and a pre-existing data sharing mechanism, dbGaP. Platform policies were compared across seven categories of data governance: data submission, data ingestion, user authentication and authorization, data security, data access, auditing, and sanctions. Our analysis finds similarities across the platforms, including reliance on a formal data ingestion process, multiple tiers of data access with varying user authentication and/or authorization requirements, platform and user data security measures, and auditing for inappropriate data use. Platforms differ in how data tiers are organized, as well as the specifics of user authentication and authorization across access tiers. Our analysis maps elements of data governance across emerging NIH-funded cloud platforms and as such provides a key resource for stakeholders seeking to understand and utilize data access and analysis options across platforms and to surface aspects of governance that may require harmonization to achieve the desired interoperability.}, } @article {pmid37179470, year = {2023}, author = {Balla, Y and Tirunagari, S and Windridge, D}, title = {Machine Learning in Pediatrics: Evaluating Challenges, Opportunities, and Explainability.}, journal = {Indian pediatrics}, volume = {}, number = {}, pages = {}, pmid = {37179470}, issn = {0974-7559}, abstract = {BACKGROUND: The emergence of Artificial Intelligence (AI) tools such as ChatGPT and Bard is disrupting a broad swathe of fields, including medicine. In pediatric medicine, AI is also increasingly being used across multiple subspecialties. However, the practical application of AI still faces a number of key challenges. Consequently, there is a requirement for a concise overview of the roles of AI across the multiple domains of pediatric medicine that the current study seeks to address.

AIM: To systematically assess the challenges, opportunities, and explainability of AI in pediatric medicine.

METHODOLOGY: A systematic search was carried out on peer-reviewed databases, PubMed Central, Europe PubMed Central, and grey literature using search terms related to machine learning (ML) and AI for the years 2016 to 2022 in the English language. A total of 210 articles were retrieved that were screened with PRISMA for abstract, year, language, context, and proximal relevance to research aims. A thematic analysis was carried out to extract findings from the included studies.

RESULTS: Twenty articles were selected for data abstraction and analysis, with three consistent themes emerging from these articles. In particular, eleven articles address the current state-of-the-art application of AI in diagnosing and predicting health conditions such as behavioral and mental health, cancer, syndromic and metabolic diseases. Five articles highlight the specific challenges of AI deployment in pediatric medicines: data security, handling, authentication, and validation. Four articles set out future opportunities for AI to be adapted: the incorporation of Big Data, cloud computing, precision medicine, and clinical decision support systems. These studies collectively critically evaluate the potential of AI in overcoming current barriers to adoption.

CONCLUSION: AI is proving disruptive within pediatric medicine and is presently associated with challenges, opportunities, and the need for explainability. AI should be viewed as a tool to enhance and support clinical decision-making rather than a substitute for human judgement and expertise. Future research should consequently focus on obtaining comprehensive data to ensure the generalizability of research findings.}, } @article {pmid37177753, year = {2023}, author = {Cicceri, G and Tricomi, G and D'Agati, L and Longo, F and Merlino, G and Puliafito, A}, title = {A Deep Learning-Driven Self-Conscious Distributed Cyber-Physical System for Renewable Energy Communities.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177753}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) is transforming various domains, including smart energy management, by enabling the integration of complex digital and physical components in distributed cyber-physical systems (DCPSs). The design of DCPSs has so far been focused on performance-related, non-functional requirements. However, with the growing power consumption and computation expenses, sustainability is becoming an important aspect to consider. This has led to the concept of energy-aware DCPSs, which integrate conventional non-functional requirements with additional attributes for sustainability, such as energy consumption. This research activity aimed to investigate and develop energy-aware architectural models and edge/cloud computing technologies to design next-generation, AI-enabled (and, specifically, deep-learning-enhanced), self-conscious IoT-extended DCPSs. Our key contributions include energy-aware edge-to-cloud architectural models and technologies, the orchestration of a (possibly federated) edge-to-cloud infrastructure, abstractions and unified models for distributed heterogeneous virtualized resources, innovative machine learning algorithms for the dynamic reallocation and reconfiguration of energy resources, and the management of energy communities. The proposed solution was validated through case studies on optimizing renewable energy communities (RECs), or energy-aware DCPSs, which are particularly challenging due to their unique requirements and constraints; in more detail, in this work, we aim to define the optimal implementation of an energy-aware DCPS. Moreover, smart grids play a crucial role in developing energy-aware DCPSs, providing a flexible and efficient power system integrating renewable energy sources, microgrids, and other distributed energy resources. The proposed energy-aware DCPSs contribute to the development of smart grids by providing a sustainable, self-consistent, and efficient way to manage energy distribution and consumption. The performance demonstrates our approach's effectiveness for consumption and production (based on RMSE and MAE metrics). Our research supports the transition towards a more sustainable future, where communities adopting REC principles become key players in the energy landscape.}, } @article {pmid37177697, year = {2023}, author = {Mamede, H and Neves, JC and Martins, J and Gonçalves, R and Branco, F}, title = {A Prototype for an Intelligent Water Management System for Household Use.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177697}, issn = {1424-8220}, support = {LA/P/0063/2020//Fundação para a Ciência e Tecnologia/ ; }, abstract = {Water scarcity is becoming an issue of more significant concern with a major impact on global sustainability. For it, new measures and approaches are urgently needed. Digital technologies and tools can play an essential role in improving the effectiveness and efficiency of current water management approaches. Therefore, a solution is proposed and validated, given the limited presence of models or technological architectures in the literature to support intelligent water management systems for domestic use. It is based on a layered architecture, fully designed to meet the needs of households and to do so through the adoption of technologies such as the Internet of Things and cloud computing. By developing a prototype and using it as a use case for testing purposes, we have concluded the positive impact of using such a solution. Considering this is a first contribution to overcome the problem, some issues will be addressed in a future work, namely, data and device security and energy and traffic optimisation issues, among several others.}, } @article {pmid37177672, year = {2023}, author = {Micko, K and Papcun, P and Zolotova, I}, title = {Review of IoT Sensor Systems Used for Monitoring the Road Infrastructure.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177672}, issn = {1424-8220}, support = {APVV-20-0247//Slovak Research and Development Agency/ ; }, abstract = {An intelligent transportation system is one of the fundamental goals of the smart city concept. The Internet of Things (IoT) concept is a basic instrument to digitalize and automatize the process in the intelligent transportation system. Digitalization via the IoT concept enables the automatic collection of data usable for management in the transportation system. The IoT concept includes a system of sensors, actuators, control units and computational distribution among the edge, fog and cloud layers. The study proposes a taxonomy of sensors used for monitoring tasks based on motion detection and object tracking in intelligent transportation system tasks. The sensor's taxonomy helps to categorize the sensors based on working principles, installation or maintenance methods and other categories. The sensor's categorization enables us to compare the effectiveness of each sensor's system. Monitoring tasks are analyzed, categorized, and solved in intelligent transportation systems based on a literature review and focusing on motion detection and object tracking methods. A literature survey of sensor systems used for monitoring tasks in the intelligent transportation system was performed according to sensor and monitoring task categorization. In this review, we analyzed the achieved results to measure, sense, or classify events in intelligent transportation system monitoring tasks. The review conclusions were used to propose an architecture of the universal sensor system for common monitoring tasks based on motion detection and object tracking methods in intelligent transportation tasks. The proposed architecture was built and tested for the first experimental results in the case study scenario. Finally, we propose methods that could significantly improve the results in the following research.}, } @article {pmid37177663, year = {2023}, author = {Esposito, M and Belli, A and Palma, L and Pierleoni, P}, title = {Design and Implementation of a Framework for Smart Home Automation Based on Cellular IoT, MQTT, and Serverless Functions.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177663}, issn = {1424-8220}, abstract = {Smart objects and home automation tools are becoming increasingly popular, and the number of smart devices that each dedicated application has to manage is increasing accordingly. The emergence of technologies such as serverless computing and dedicated machine-to-machine communication protocols represents a valuable opportunity to facilitate management of smart objects and replicability of new solutions. The aim of this paper is to propose a framework for home automation applications that can be applied to control and monitor any appliance or object in a smart home environment. The proposed framework makes use of a dedicated messages-exchange protocol based on MQTT and cloud-deployed serverless functions. Furthermore, a vocal command interface is implemented to let users control the smart object with vocal interactions, greatly increasing the accessibility and intuitiveness of the proposed solution. A smart object, namely a smart kitchen fan extractor system, was developed, prototyped, and tested to illustrate the viability of the proposed solution. The smart object is equipped with a narrowband IoT (NB-IoT) module to send and receive commands to and from the cloud. In order to evaluate the performance of the proposed solution, the suitability of NB-IoT for the transmission of MQTT messages was evaluated. The results show how NB-IoT has an acceptable latency performance despite some minimal packet loss.}, } @article {pmid37177654, year = {2023}, author = {Carvalho, J and Vieira, D and Rodrigues, C and Trinta, F}, title = {LM[2]K Model for Hosting an Application Based on Microservices in Multi-Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177654}, issn = {1424-8220}, abstract = {Cloud computing has become a popular delivery model service, offering several advantages. However, there are still challenges that need to be addressed when applying the cloud model to specific scenarios. Two of such challenges involve deploying and executing applications across multiple providers, each comprising several services with similar functionalities and different capabilities. Therefore, dealing with application distributions across various providers can be a complex task for a software architect due to the differing characteristics of the application components. Some works have proposed solutions to address the challenges discussed here, but most of them focus on service providers. To facilitate the decision-making process of software architects, we previously presented PacificClouds, an architecture for managing the deployment and execution of applications based on microservices and distributed in a multi-cloud environment. Therefore, in this work, we focus on the challenges of selecting multiple clouds for PacificClouds and choosing providers that best meet the microservices and software architect requirements. We propose a selection model and three approaches to address various scenarios. We evaluate the performance of the approaches and conduct a comparative analysis of them. The results demonstrate their feasibility regarding performance.}, } @article {pmid37177653, year = {2023}, author = {Tošić, A and Vičič, J and Burnard, M and Mrissa, M}, title = {A Blockchain Protocol for Real-Time Application Migration on the Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177653}, issn = {1424-8220}, support = {J2-2504//Slovenian Research Agency/ ; 739574//European Commission/ ; 857188//European Commission/ ; }, abstract = {The Internet of Things (IoT) is experiencing widespread adoption across industry sectors ranging from supply chain management to smart cities, buildings, and health monitoring. However, most software architectures for the IoT deployment rely on centralized cloud computing infrastructures to provide storage and computing power, as cloud providers have high economic incentives to organize their infrastructure into clusters. Despite these incentives, there has been a recent shift from centralized to decentralized architectures that harness the potential of edge devices, reduce network latency, and lower infrastructure costs to support IoT applications. This shift has resulted in new edge computing architectures, but many still rely on centralized solutions for managing applications. A truly decentralized approach would offer interesting properties required for IoT use cases. In this paper, we introduce a decentralized architecture tailored for large-scale deployments of peer-to-peer IoT sensor networks and capable of run-time application migration. We propose a leader election consensus protocol for permissioned distributed networks that only requires one series of messages in order to commit to a change. The solution combines a blockchain consensus protocol using Verifiable Delay Functions (VDF) to achieve decentralized randomness, fault tolerance, transparency, and no single point of failure. We validate our solution by testing and analyzing the performance of our reference implementation. Our results show that nodes are able to reach consensus consistently, and the VDF proofs can be used as an entropy pool for decentralized randomness. We show that our system can perform autonomous real-time application migrations. Finally, we conclude that the implementation is scalable by testing it on 100 consensus nodes running 200 applications.}, } @article {pmid37177615, year = {2023}, author = {Vergara, J and Botero, J and Fletscher, L}, title = {A Comprehensive Survey on Resource Allocation Strategies in Fog/Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177615}, issn = {1424-8220}, abstract = {The growing number of connected objects has allowed the development of new applications in different areas. In addition, the technologies that support these applications, such as cloud and fog computing, face challenges in providing the necessary resources to process information for different applications due to the highly dynamic nature of these networks and the many heterogeneous devices involved. This article reviews the existing literature on one of these challenges: resource allocation in the fog-cloud continuum, including approaches that consider different strategies and network characteristics. We also discuss the factors influencing resource allocation decisions, such as energy consumption, latency, monetary cost, or network usage. Finally, we identify the open research challenges and highlight potential future directions. This survey article aims to serve as a valuable reference for researchers and practitioners interested in the field of edge computing and resource allocation.}, } @article {pmid37177587, year = {2023}, author = {Ma, H and Zhou, D and Li, P and Wang, X}, title = {EVOAC-HP: An Efficient and Verifiable Outsourced Access Control Scheme with Hidden Policy.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177587}, issn = {1424-8220}, support = {61932010//the National Natural Science Foundation of China/ ; GPKLPSNS-2022-KF-05//Guangdong Provincial Key Laboratory of Power System Network Security/ ; }, abstract = {As medical data become increasingly important in healthcare, it is crucial to have proper access control mechanisms, ensuring that sensitive data are only accessible to authorized users while maintaining privacy and security. Ciphertext-Policy Attribute-Based Encryption (CP-ABE) is an attractive access control solution that can offer effective, fine-grained and secure medical data sharing, but it has two major drawbacks: Firstly, decryption is computationally expensive for resource-limited data users, especially when the access policy has many attributes, limiting its use in large-scale data-sharing scenarios. Secondly, existing schemes are based on data users' attributes, which can potentially reveal sensitive information about the users, especially in healthcare data sharing, where strong privacy and security are essential. To address these issues, we designed an improved CP-ABE scheme that provides efficient and verifiable outsourced access control with fully hidden policy named EVOAC-HP. In this paper, we utilize the attribute bloom filter to achieve policy hiding without revealing user privacy. For the purpose of alleviating the decryption burden for data users, we also adopt the technique of outsourced decryption to outsource the heavy computation overhead to the cloud service provider (CSP) with strong computing and storage capabilities, while the transformed ciphertext results can be verified by the data user. Finally, with rigorous security and reliable performance analysis, we demonstrate that EVOAC-HP is both practical and effective with robust privacy protection.}, } @article {pmid37177514, year = {2023}, author = {Dias, J and Simões, P and Soares, N and Costa, CM and Petry, MR and Veiga, G and Rocha, LF}, title = {Comparison of 3D Sensors for Automating Bolt-Tightening Operations in the Automotive Industry.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177514}, issn = {1424-8220}, support = {NORTE-01-0247-FEDER- 324 072550//This work has received funding from the ERDF - European Regional Development Fund, 322 through the North Portugal Regional Operational - NORTE 2020 programme under the Portugal 323 2020 Partnership Agreement within project EuroBot./ ; }, abstract = {Machine vision systems are widely used in assembly lines for providing sensing abilities to robots to allow them to handle dynamic environments. This paper presents a comparison of 3D sensors for evaluating which one is best suited for usage in a machine vision system for robotic fastening operations within an automotive assembly line. The perception system is necessary for taking into account the position uncertainty that arises from the vehicles being transported in an aerial conveyor. Three sensors with different working principles were compared, namely laser triangulation (SICK TriSpector1030), structured light with sequential stripe patterns (Photoneo PhoXi S) and structured light with infrared speckle pattern (Asus Xtion Pro Live). The accuracy of the sensors was measured by computing the root mean square error (RMSE) of the point cloud registrations between their scans and two types of reference point clouds, namely, CAD files and 3D sensor scans. Overall, the RMSE was lower when using sensor scans, with the SICK TriSpector1030 achieving the best results (0.25 mm ± 0.03 mm), the Photoneo PhoXi S having the intermediate performance (0.49 mm ± 0.14 mm) and the Asus Xtion Pro Live obtaining the higher RMSE (1.01 mm ± 0.11 mm). Considering the use case requirements, the final machine vision system relied on the SICK TriSpector1030 sensor and was integrated with a collaborative robot, which was successfully deployed in an vehicle assembly line, achieving 94% success in 53,400 screwing operations.}, } @article {pmid37177511, year = {2023}, author = {Li, X and Yi, Z and Li, R and Wang, XA and Li, H and Yang, X}, title = {SM2-Based Offline/Online Efficient Data Integrity Verification Scheme for Multiple Application Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177511}, issn = {1424-8220}, support = {[62172436]//National Natural Science Foundation of China/ ; [62102452]//National Natural Science Foundation of China/ ; }, abstract = {With the rapid development of cloud storage and cloud computing technology, users tend to store data in the cloud for more convenient services. In order to ensure the integrity of cloud data, scholars have proposed cloud data integrity verification schemes to protect users' data security. The storage environment of the Internet of Things, in terms of big data and medical big data, demonstrates a stronger demand for data integrity verification schemes, but at the same time, the comprehensive function of data integrity verification schemes is required to be higher. Existing data integrity verification schemes are mostly applied in the cloud storage environment but cannot successfully be applied to the environment of the Internet of Things in the context of big data storage and medical big data storage. To solve this problem when combined with the characteristics and requirements of Internet of Things data storage and medical data storage, we designed an SM2-based offline/online efficient data integrity verification scheme. The resulting scheme uses the SM4 block cryptography algorithm to protect the privacy of the data content and uses a dynamic hash table to realize the dynamic updating of data. Based on the SM2 signature algorithm, the scheme can also realize offline tag generation and batch audits, reducing the computational burden of users. In security proof and efficiency analysis, the scheme has proven to be safe and efficient and can be used in a variety of application scenarios.}, } @article {pmid37177424, year = {2023}, author = {Hadjkouider, AM and Kerrache, CA and Korichi, A and Sahraoui, Y and Calafate, CT}, title = {Stackelberg Game Approach for Service Selection in UAV Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177424}, issn = {1424-8220}, abstract = {Nowadays, mobile devices are expected to perform a growing number of tasks, whose complexity is also increasing significantly. However, despite great technological improvements in the last decade, such devices still have limitations in terms of processing power and battery lifetime. In this context, mobile edge computing (MEC) emerges as a possible solution to address such limitations, being able to provide on-demand services to the customer, and bringing closer several services published in the cloud with a reduced cost and fewer security concerns. On the other hand, Unmanned Aerial Vehicle (UAV) networking emerged as a paradigm offering flexible services, new ephemeral applications such as safety and disaster management, mobile crowd-sensing, and fast delivery, to name a few. However, to efficiently use these services, discovery and selection strategies must be taken into account. In this context, discovering the services made available by a UAV-MEC network, and selecting the best services among those available in a timely and efficient manner, can become a challenging task. To face these issues, game theory methods have been proposed in the literature that perfectly suit the case of UAV-MEC services by modeling this challenge as a Stackelberg game, and using existing approaches to find the solution for such a game aiming at an efficient services' discovery and service selection. Hence, the goal of this paper is to propose Stackelberg-game-based solutions for service discovery and selection in the context of UAV-based mobile edge computing. Simulations results conducted using the NS-3 simulator highlight the efficiency of our proposed game in terms of price and QoS metrics.}, } @article {pmid37177402, year = {2023}, author = {Dang, VA and Vu Khanh, Q and Nguyen, VH and Nguyen, T and Nguyen, DC}, title = {Intelligent Healthcare: Integration of Emerging Technologies and Internet of Things for Humanity.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {9}, pages = {}, pmid = {37177402}, issn = {1424-8220}, mesh = {Humans ; *Internet of Things ; Internet ; Gold ; Intelligence ; Delivery of Health Care ; }, abstract = {Health is gold, and good health is a matter of survival for humanity. The development of the healthcare industry aligns with the development of humans throughout history. Nowadays, along with the strong growth of science and technology, the medical domain in general and the healthcare industry have achieved many breakthroughs, such as remote medical examination and treatment applications, pandemic prediction, and remote patient health monitoring. The advent of 5th generation communication networks in the early 2020s led to the Internet of Things concept. Moreover, the 6th generation communication networks (so-called 6G) expected to launch in 2030 will be the next revolution of the IoT era, and will include autonomous IoT systems and form a series of endogenous intelligent applications that serve humanity. One of the domains that receives the most attention is smart healthcare. In this study, we conduct a comprehensive survey of IoT-based technologies and solutions in the medical field. Then, we propose an all-in-one computing architecture for real-time IoHT applications and present possible solutions to achieving the proposed architecture. Finally, we discuss challenges, open issues, and future research directions. We hope that the results of this study will serve as essential guidelines for further research in the human healthcare domain.}, } @article {pmid37172382, year = {2023}, author = {Feraud, M and O'Brien, JW and Samanipour, S and Dewapriya, P and van Herwerden, D and Kaserzon, S and Wood, I and Rauert, C and Thomas, KV}, title = {InSpectra - A platform for identifying emerging chemical threats.}, journal = {Journal of hazardous materials}, volume = {455}, number = {}, pages = {131486}, doi = {10.1016/j.jhazmat.2023.131486}, pmid = {37172382}, issn = {1873-3336}, abstract = {Non-target analysis (NTA) employing high-resolution mass spectrometry (HRMS) coupled with liquid chromatography is increasingly being used to identify chemicals of biological relevance. HRMS datasets are large and complex making the identification of potentially relevant chemicals extremely challenging. As they are recorded in vendor-specific formats, interpreting them is often reliant on vendor-specific software that may not accommodate advancements in data processing. Here we present InSpectra, a vendor independent automated platform for the systematic detection of newly identified emerging chemical threats. InSpectra is web-based, open-source/access and modular providing highly flexible and extensible NTA and suspect screening workflows. As a cloud-based platform, InSpectra exploits parallel computing and big data archiving capabilities with a focus for sharing and community curation of HRMS data. InSpectra offers a reproducible and transparent approach for the identification, tracking and prioritisation of emerging chemical threats.}, } @article {pmid37172351, year = {2023}, author = {Zerouali, B and Santos, CAG and do Nascimento, TVM and Silva, RMD}, title = {A cloud-integrated GIS for forest cover loss and land use change monitoring using statistical methods and geospatial technology over northern Algeria.}, journal = {Journal of environmental management}, volume = {341}, number = {}, pages = {118029}, doi = {10.1016/j.jenvman.2023.118029}, pmid = {37172351}, issn = {1095-8630}, mesh = {Humans ; *Geographic Information Systems ; *Conservation of Natural Resources/methods ; Algeria ; Agriculture ; Environmental Monitoring/methods ; Technology ; }, abstract = {Over the last two decades, forest cover has experienced significant impacts from fires and deforestation worldwide due to direct human activities and climate change. This paper assesses trends in forest cover loss and land use and land cover changes in northern Algeria between 2000 and 2020 using datasets extracted from Google Earth Engine (GEE), such as the Hanssen Global Forest Change and MODIS Land Cover Type products (MCD12Q1). Classification was performed using the pixel-based supervised machine-learning algorithm called Random Forest (RF). Trends were analyzed using methods such as Mann-Kendall and Sen. The study area comprises 17 basins with high rainfall variability. The results indicated that the forest area decreased by 64.96%, from 3718 to 1266 km[2], during the 2000-2020 period, while the barren area increased by 40%, from 134,777 to 188,748 km[2]. The findings revealed that the Constantinois-Seybousse-Mellegue hydrographic basin was the most affected by deforestation and cover loss, exceeding 50% (with an area of 1018 km[2]), while the Seybouse River basin experienced the highest percentage of cover loss at 40%. Nonparametric tests showed that seven river basins (41%) had significantly increasing trends of forest cover loss. According to the obtained results, the forest loss situation in Algeria, especially in the northeastern part, is very alarming and requires an exceptional and urgent plan to protect forests and the ecological system against wildfires and climate change. The study provides a diagnosis that should encourage better protection and management of forest cover in Algeria.}, } @article {pmid37169026, year = {2022}, author = {Mirzadeh, SI and Arefeen, A and Ardo, J and Fallahzadeh, R and Minor, B and Lee, JA and Hildebrand, JA and Cook, D and Ghasemzadeh, H and Evangelista, LS}, title = {Use of machine learning to predict medication adherence in individuals at risk for atherosclerotic cardiovascular disease.}, journal = {Smart health (Amsterdam, Netherlands)}, volume = {26}, number = {}, pages = {}, pmid = {37169026}, issn = {2352-6483}, support = {R21 AG053162/AG/NIA NIH HHS/United States ; R21 NR015410/NR/NINR NIH HHS/United States ; }, abstract = {BACKGROUND: Medication nonadherence is a critical problem with severe implications in individuals at risk for atherosclerotic cardiovascular disease. Many studies have attempted to predict medication adherence in this population, but few, if any, have been effective in prediction, sug-gesting that essential risk factors remain unidentified.

OBJECTIVE: This study's objective was to (1) establish an accurate prediction model of medi-cation adherence in individuals at risk for atherosclerotic cardiovascular disease and (2) identify significant contributing factors to the predictive accuracy of medication adherence. In particular, we aimed to use only the baseline questionnaire data to assess medication adherence prediction feasibility.

METHODS: A sample of 40 individuals at risk for atherosclerotic cardiovascular disease was recruited for an eight-week feasibility study. After collecting baseline data, we recorded data from a pillbox that sent events to a cloud-based server. Health measures and medication use events were analyzed using machine learning algorithms to identify variables that best predict medication adherence.

RESULTS: Our adherence prediction model, based on only the ten most relevant variables, achieved an average error rate of 12.9%. Medication adherence was closely correlated with being encouraged to play an active role in their treatment, having confidence about what to do in an emergency, knowledge about their medications, and having a special person in their life.

CONCLUSIONS: Our results showed the significance of clinical and psychosocial factors for predicting medication adherence in people at risk for atherosclerotic cardiovascular diseases. Clini-cians and researchers can use these factors to stratify individuals to make evidence-based decisions to reduce the risks.}, } @article {pmid37168713, year = {2023}, author = {Shi, S and Jiang, Q and Jin, X and Wang, W and Liu, K and Chen, H and Liu, P and Zhou, W and Yao, S}, title = {A comparative analysis of near-infrared image colorization methods for low-power NVIDIA Jetson embedded systems.}, journal = {Frontiers in neurorobotics}, volume = {17}, number = {}, pages = {1143032}, pmid = {37168713}, issn = {1662-5218}, abstract = {The near-infrared (NIR) image obtained by an NIR camera is a grayscale image that is inconsistent with the human visual spectrum. It can be difficult to perceive the details of a scene from an NIR scene; thus, a method is required to convert them to visible images, providing color and texture information. In addition, a camera produces so much video data that it increases the pressure on the cloud server. Image processing can be done on an edge device, but the computing resources of edge devices are limited, and their power consumption constraints need to be considered. Graphics Processing Unit (GPU)-based NVIDIA Jetson embedded systems offer a considerable advantage over Central Processing Unit (CPU)-based embedded devices in inference speed. For this study, we designed an evaluation system that uses image quality, resource occupancy, and energy consumption metrics to verify the performance of different NIR image colorization methods on low-power NVIDIA Jetson embedded systems for practical applications. The performance of 11 image colorization methods on NIR image datasets was tested on three different configurations of NVIDIA Jetson boards. The experimental results indicate that the Pix2Pix method performs best, with a rate of 27 frames per second on the Jetson Xavier NX. This performance is sufficient to meet the requirements of real-time NIR image colorization.}, } @article {pmid37168541, year = {2022}, author = {Ko, S and Zhou, H and Zhou, JJ and Won, JH}, title = {High-Performance Statistical Computing in the Computing Environments of the 2020s.}, journal = {Statistical science : a review journal of the Institute of Mathematical Statistics}, volume = {37}, number = {4}, pages = {494-518}, pmid = {37168541}, issn = {0883-4237}, support = {R25 HD108136/HD/NICHD NIH HHS/United States ; R01 HG006139/HG/NHGRI NIH HHS/United States ; R21 HL150374/HL/NHLBI NIH HHS/United States ; R35 GM141798/GM/NIGMS NIH HHS/United States ; K01 DK106116/DK/NIDDK NIH HHS/United States ; }, abstract = {Technological advances in the past decade, hardware and software alike, have made access to high-performance computing (HPC) easier than ever. We review these advances from a statistical computing perspective. Cloud computing makes access to supercomputers affordable. Deep learning software libraries make programming statistical algorithms easy and enable users to write code once and run it anywhere-from a laptop to a workstation with multiple graphics processing units (GPUs) or a supercomputer in a cloud. Highlighting how these developments benefit statisticians, we review recent optimization algorithms that are useful for high-dimensional models and can harness the power of HPC. Code snippets are provided to demonstrate the ease of programming. We also provide an easy-to-use distributed matrix data structure suitable for HPC. Employing this data structure, we illustrate various statistical applications including large-scale positron emission tomography and ℓ1-regularized Cox regression. Our examples easily scale up to an 8-GPU workstation and a 720-CPU-core cluster in a cloud. As a case in point, we analyze the onset of type-2 diabetes from the UK Biobank with 200,000 subjects and about 500,000 single nucleotide polymorphisms using the HPC ℓ1-regularized Cox regression. Fitting this half-million-variate model takes less than 45 minutes and reconfirms known associations. To our knowledge, this is the first demonstration of the feasibility of penalized regression of survival outcomes at this scale.}, } @article {pmid37168441, year = {2023}, author = {Chatterjee, P and Bose, R and Banerjee, S and Roy, S}, title = {Enhancing Data Security of Cloud Based LMS.}, journal = {Wireless personal communications}, volume = {130}, number = {2}, pages = {1123-1139}, pmid = {37168441}, issn = {0929-6212}, abstract = {Around the world, the educational system is evolving. The new trend can be found in traditional classroom systems as well as digitalization systems. Cloud-based Learning Management Systems (LMS) will accelerate the educational industry forward in the next years because they can provide end-user with a versatile, convenient, secure, and cost-effective learning process. The cloud-based LMS approach is the most effective and proper learning model in the worldwide educational sector, particularly if the organization is in a state of depression owing to a global pandemic. It can be utilized over the internet with several users on the same platform. As a result, the initial requirement is important to enable to the LMS model. Despite its many advantages, LMS confronts challenges such as confidentiality, user acceptance, and traffic. In a pandemic like Covid 19, the entire planet depends on a safe LMS platform to establish student and instructor trust. Therefore, with this work, the attempt has been made to explain one LMS model that may provide its users with optimal security, a user-friendly environment, and quick access. This paper discusses the use of the cloud attack, and also cryptographic and steganographic security models and techniques to address these issues. There's also information on what kinds of security vulnerabilities or operations on cloud data are feasible, and also how to deal with them using various algorithms.}, } @article {pmid37167613, year = {2023}, author = {Yang, SY and Oh, YH}, title = {Video-Assisted Versus Traditional Problem-Based Learning: A Quasi-Experimental Study Among Pediatric Nursing Students.}, journal = {The journal of nursing research : JNR}, volume = {31}, number = {3}, pages = {e277}, pmid = {37167613}, issn = {1948-965X}, mesh = {Humans ; Child ; *Problem-Based Learning/methods ; *Students, Nursing/psychology ; Learning ; Thinking ; Pediatric Nursing ; }, abstract = {BACKGROUND: The text-assisted problem-based, methods traditionally used to teach nursing students cannot adequately simulate holistic clinical situations and patient symptoms. Although video-assisted, problem-based learning methods combined with text have shown positive results in terms of improving comprehension and cognitive abilities, some studies have shown these methods to be inferior to text-assisted methods in terms of promoting deep critical thinking in medical students.

PURPOSE: This study was designed to assess the benefits in nursing education of video-assisted, problem-based learning using online multimedia technologies compared with text-assisted, problem-based learning using traditional face-to-face classes.

METHODS: A quasi-experimental, nonequivalent control group, preintervention-and-postintervention design was used. The experimental group (n = 31) received video-assisted, problem-based learning materials with multimedia technologies (video scenarios, Google Docs worksheets, Google slides, Zoom cloud meetings, and e-learning management system) and weekly online lectures (100 minutes) for 4 weeks. The control group (n = 35) received text-assisted, problem-based learning materials with traditional face-to-face classes and weekly lectures (100 minutes) for 4 weeks. The study data were analyzed using chi-square, Fisher's exact, and independent t tests as well as analysis of variance.

RESULTS: At posttest, learning motivation (t = 3.25, p = .002), academic self-efficacy (t = 2.41, p = .019), and self-directed learning (t = 3.08, p = .003) were significantly higher in the experimental group than in the control group.

Video-assisted, problem-based learning using multimedia technologies was shown to be effective in increasing learning motivation, academic self-efficacy, and self-directed learning in nursing students. These findings have implications for the development and planning of contactless classes in response to the coronavirus pandemic. Notably, no intergroup differences were found in terms of problem-solving skills. Future studies should include in-depth reviews and assessments of the difficulties faced in producing problem scenarios as well as the methods of instruction.}, } @article {pmid37165050, year = {2023}, author = {Liu, N and Guo, D and Song, Z and Zhong, S and Hu, R}, title = {BIM-based digital platform and risk management system for mountain tunnel construction.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {7585}, pmid = {37165050}, issn = {2045-2322}, support = {2019M663648//the China Postdoctoral Science Foundation/ ; 2022JM-190//the Natural Science Basic Research Program of Shaanxi/ ; 52178393//the National Natural Science Foundation of China/ ; 2020TD-005//the Innovation Capability Support Plan of Shaanxi Province - Innovation Team/ ; 20JK0709//the Shaanxi Provincial Department of Education Project/ ; }, abstract = {During the construction of mountain tunnels, there are often various intricate and mutable potential hazards, the management and control of which are crucial to ensuring the safety of such construction. With the rapid advancement of engineering information technologies, including Building Information Model (BIM), the internet, big data, and cloud computing, dynamic management of mountain tunnel construction will inevitably become a prevailing trend. This paper proposes a new digital approach to realize the informatization and visualization of risk management in mountain tunnel construction, by combining monitoring measurement with advanced geological prediction based on BIM technology. The proposed approach suggests a BIM-based digital platform architecture for mountain tunnel construction, which is comprised of five layers-basic, model, data, application, and user. The integration of these five layers can realize risk management information during the construction of mountain tunnels. In addition, a set of dynamic risk management systems, including risk monitoring, identification, and assessment, can be established based on the digital platform. The digital platform and dynamic risk management system proposed in this paper have certain advantages in the construction of mountain tunnels, providing a new and significant way for the management of safety risks in such construction projects.}, } @article {pmid37133929, year = {2023}, author = {Li, R and Shen, M and Liu, H and Bai, L and Zhang, L}, title = {Do Infrared Thermometers Hold Promise for an Effective Early Warning System for Emerging Respiratory Infectious Diseases?.}, journal = {JMIR formative research}, volume = {7}, number = {}, pages = {e42548}, pmid = {37133929}, issn = {2561-326X}, support = {INV-006104/GATES/Bill & Melinda Gates Foundation/United States ; }, abstract = {BACKGROUND: Major respiratory infectious diseases, such as influenza, SARS-CoV, and SARS-CoV-2, have caused historic global pandemics with severe disease and economic burdens. Early warning and timely intervention are key to suppress such outbreaks.

OBJECTIVE: We propose a theoretical framework for a community-based early warning (EWS) system that will proactively detect temperature abnormalities in the community based on a collective network of infrared thermometer-enabled smartphone devices.

METHODS: We developed a framework for a community-based EWS and demonstrated its operation with a schematic flowchart. We emphasize the potential feasibility of the EWS and potential obstacles.

RESULTS: Overall, the framework uses advanced artificial intelligence (AI) technology on cloud computing platforms to identify the probability of an outbreak in a timely manner. It hinges on the detection of geospatial temperature abnormalities in the community based on mass data collection, cloud-based computing and analysis, decision-making, and feedback. The EWS may be feasible for implementation considering its public acceptance, technical practicality, and value for money. However, it is important that the proposed framework work in parallel or in combination with other early warning mechanisms due to a relatively long initial model training process.

CONCLUSIONS: The framework, if implemented, may provide an important tool for important decisions for early prevention and control of respiratory diseases for health stakeholders.}, } @article {pmid37128501, year = {2023}, author = {Zhao, F and Peng, C and Xu, D and Liu, Y and Niu, K and Tang, H}, title = {Attribute-based multi-user collaborative searchable encryption in COVID-19.}, journal = {Computer communications}, volume = {205}, number = {}, pages = {118-126}, pmid = {37128501}, issn = {0140-3664}, abstract = {With the outbreak of COVID-19, the government has been forced to collect a large amount of detailed information about patients in order to effectively curb the epidemic of the disease, including private data of patients. Searchable encryption is an essential technology for ciphertext retrieval in cloud computing environments, and many searchable encryption schemes are based on attributes to control user's search permissions to protect their data privacy. The existing attribute-based searchable encryption (ABSE) scheme can only implement the situation where the search permission of one person meets the search policy and does not support users to obtain the search permission through collaboration. In this paper, we proposed a new attribute-based collaborative searchable encryption scheme in multi-user setting (ABCSE-MU), which takes the access tree as the access policy and introduces the translation nodes to implement collaborative search. The cooperation can only be reached on the translation node and the flexibility of search permission is achieved on the premise of data security. ABCSE-MU scheme solves the problem that a single user has insufficient search permissions but still needs to search, making the user's access policy more flexible. We use random blinding to ensure the confidentiality and security of the secret key, further prove that our scheme is secure under the Decisional Bilinear Diffie-Hellman (DBDH) assumption. Security analysis further shows that the scheme can ensure the confidentiality of data under chosen-keyword attacks and resist collusion attacks.}, } @article {pmid37128419, year = {2022}, author = {Jalali, A and Huang, SS and Kochendorfer, KM}, title = {Cloud Computing Synthetic Syndromic Surveillance Systems: Opioid Epidemic in Illinois.}, journal = {AMIA ... Annual Symposium proceedings. AMIA Symposium}, volume = {2022}, number = {}, pages = {580-586}, pmid = {37128419}, issn = {1942-597X}, mesh = {Humans ; *Analgesics, Opioid/administration & dosage/adverse effects/poisoning ; *Cloud Computing ; *Drug Overdose/drug therapy/epidemiology ; *Emergency Medical Services ; *Opioid Epidemic/statistics & numerical data ; *Sentinel Surveillance ; Databases, Factual ; Chicago/epidemiology ; Prognosis ; Male ; Female ; Middle Aged ; }, abstract = {With an increasing number of overdose cases yearly, the city of Chicago is facing an opioid epidemic. Many of these overdose cases lead to 911 calls that necessitate timely response from our limited emergency medicine services. This paper demonstrates how data from these calls along with synthetic and geospatial data can help create a syndromic surveillance system to combat this opioid crisis. Chicago EMS data is obtained from the Illinois Department of Public Health with a database structure using the NEMSIS standard. This information is combined with information from the RTI U.S. Household Population database, before being transferred to an Azure Data Lake. Afterwards, the data is integrated with Azure Synapse before being refined in another data lake and filtered with ICD-10 codes. Afterwards, we moved the data to ArcGIS Enterprise to apply spatial statistics and geospatial analytics to create our surveillance system.}, } @article {pmid37123982, year = {2023}, author = {Gomis, MKS and Oladinrin, OT and Saini, M and Pathirage, C and Arif, M}, title = {A scientometric analysis of global scientific literature on learning resources in higher education.}, journal = {Heliyon}, volume = {9}, number = {4}, pages = {e15438}, pmid = {37123982}, issn = {2405-8440}, abstract = {There is a significant increase in the literature on learning resources in Higher Education (HE) but very limited evidence of studies that have taken a global overview of the context, range, and emerging trends from the previous research. This study aims to conduct a Scientometric analysis of research articles to accommodate a global overview and research trends under the theme of learning resources in HE. 4489 scientific articles were obtained as the dataset from the Web Of Science database between 1970 and 2022. Network maps and critical data were obtained by conducting co-authorship analysis for authors, organisations and countries and co-occurrence analysis for keywords from the VOSviewer software. The study revealed that the USA had a significant research input, and Salamin, N. from the University of Lausanne was recognised as the most frequently published author. The University of Illinois, USA, has the highest contribution to research articles, and the most popular research hotspots and trends were e-learning, Education, Academic libraries, Learning resources, and Cloud computing. However, the most critical finding from the study is that there needs to be real collaboration within the research theme and suggests ways to improve collaborations to enhance learning resources in HE. This study may be the first to conduct a scientometric analysis of Learning Resources in Higher education. This study offers valuable insight to academics, academic institutions, researchers, policymakers and pedagogical statutory bodies to understand the current context of learning resources in HE and recognise further develop research, collaborations and policies by considering critical findings from the study.}, } @article {pmid37122827, year = {2023}, author = {Zhang, Y and Dong, H}, title = {Criminal law regulation of cyber fraud crimes-from the perspective of citizens' personal information protection in the era of edge computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {64}, pmid = {37122827}, issn = {2192-113X}, abstract = {Currently, cloud computing provides users all over the globe with Information and Communication Technology facilities that are utility-oriented. This technology is trying to drive the development of data center design by designing and building them as networks of cloud machines, enabling users to access and run the application from any part of the globe. Cloud computing provides considerable benefits to organizations by providing rapid and adaptable ICT software and hardware systems, allowing them to concentrate on creating innovative business values for the facilities they provide. The right to privacy of big data has acquired new definitions with the continued advancement of cloud computing, and the techniques available to protect citizens' personal information under administrative law have managed to grow in a multitude. Because of the foregoing, internet fraud is a new type of crime that has emerged over time and is based on network technology. This paper analyzed and studied China's internet fraud governance capabilities, and made a comprehensive evaluation of them using cloud computing technology and the Analytic Hierarchy Process (AHP). This paper discussed personal information security and the improvement of criminal responsibility from the perspective of citizens' information security and designed and analyzed cases. In addition, this paper also analyzed and studied the ability of network fraud governance in the era of cloud computing. It also carried out a comprehensive evaluation and used the fuzzy comprehensive evaluation method to carry out the evaluation. A questionnaire survey was used to survey 100 residents in district X of city Z and district Y of the suburban area. Among the 100 people, almost all of them received scam calls or text messages, accounting for 99%, of which 8 were scammed. Among the people, more than 59.00% of the people expressed dissatisfaction with the government's Internet fraud satisfaction survey. Therefore, in the process of combating Internet fraud, the government still needs to step up its efforts.}, } @article {pmid37115834, year = {2023}, author = {Lin, BS and Peng, CW and Lee, IJ and Hsu, HK and Lin, BS}, title = {System Based on Artificial Intelligence Edge Computing for Detecting Bedside Falls and Sleep Posture.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {7}, pages = {3549-3558}, doi = {10.1109/JBHI.2023.3271463}, pmid = {37115834}, issn = {2168-2208}, mesh = {Humans ; Aged ; *Artificial Intelligence ; *Neural Networks, Computer ; Algorithms ; Posture ; Sleep ; Cloud Computing ; }, abstract = {Bedside falls and pressure ulcers are crucial issues in geriatric care. Although many bedside monitoring systems have been proposed, they are limited by the computational complexity of their algorithms. Moreover, most of the data collected by the sensors of these systems must be transmitted to a back-end server for calculation. With an increase in the demand for the Internet of Things, problems such as higher cost of bandwidth and overload of server computing are faced when using the aforementioned systems. To reduce the server workload, certain computing tasks must be offloaded from cloud servers to edge computing platforms. In this study, a bedside monitoring system based on neuromorphic computing hardware was developed to detect bedside falls and sleeping posture. The artificial intelligence neural network executed on the back-end server was simplified and used on an edge computing platform. An integer 8-bit-precision neural network model was deployed on the edge computing platform to process the thermal image captured by the thermopile array sensing element to conduct sleep posture classification and bed position detection. The bounding box of the bed was then converted into the features for posture classification correction to correct the posture. In an experimental evaluation, the accuracy rate, inferencing speed, and power consumption of the developed system were 94.56%, 5.28 frames per second, and 1.5 W, respectively. All the calculations of the developed system are conducted on an edge computing platform, and the developed system only transmits fall events to the back-end server through Wi-Fi and protects user privacy.}, } @article {pmid37112575, year = {2023}, author = {Gabhane, LR and Kanidarapu, N}, title = {Environmental Risk Assessment Using Neural Network in Liquefied Petroleum Gas Terminal.}, journal = {Toxics}, volume = {11}, number = {4}, pages = {}, pmid = {37112575}, issn = {2305-6304}, abstract = {The accidental release of toxic gases leads to fire, explosion, and acute toxicity, and may result in severe problems for people and the environment. The risk analysis of hazardous chemicals using consequence modelling is essential to improve the process reliability and safety of the liquefied petroleum gas (LPG) terminal. The previous researchers focused on single-mode failure for risk assessment. No study exists on LPG plant multimode risk analysis and threat zone prediction using machine learning. This study aims to evaluate the fire and explosion hazard potential of one of Asia's biggest LPG terminals in India. Areal locations of hazardous atmospheres (ALOHA) software simulations are used to generate threat zones for the worst scenarios. The same dataset is used to develop the artificial neural network (ANN) prediction model. The threats of flammable vapour cloud, thermal radiations from fire, and overpressure blast waves are estimated in two different weather conditions. A total of 14 LPG leak scenarios involving a 19 kg capacity cylinder, 21 tons capacity tank truck, 600 tons capacity mounded bullet, and 1350 tons capacity Horton sphere in the terminal are considered. Amongst all scenarios, the catastrophic rupture of the Horton sphere of 1350 MT capacity presented the most significant risk to life safety. Thermal flux of 37.5 kW/ m[2] from flames will damage nearby structures and equipment and spread fire by the domino effect. A novel soft computing technique called a threat and risk analysis-based ANN model has been developed to predict threat zone distances for LPG leaks. Based on the significance of incidents in the LPG terminal, 160 attributes were collected for the ANN modelling. The developed ANN model predicted the threat zone distance with an accuracy of R[2] value being 0.9958, and MSE being 202.9061 in testing. These results are evident in the reliability of the proposed framework for safety distance prediction. The LPG plant authorities can adopt this model to assess the safety distance from the hazardous chemical explosion based on the prior forecasted atmosphere conditions from the weather department.}, } @article {pmid37112349, year = {2023}, author = {Čilić, I and Krivić, P and Podnar Žarko, I and Kušek, M}, title = {Performance Evaluation of Container Orchestration Tools in Edge Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112349}, issn = {1424-8220}, support = {IP-2019-04-1986//Croatian Science Foundation/ ; }, abstract = {Edge computing is a viable approach to improve service delivery and performance parameters by extending the cloud with resources placed closer to a given service environment. Numerous research papers in the literature have already identified the key benefits of this architectural approach. However, most results are based on simulations performed in closed network environments. This paper aims to analyze the existing implementations of processing environments containing edge resources, taking into account the targeted quality of service (QoS) parameters and the utilized orchestration platforms. Based on this analysis, the most popular edge orchestration platforms are evaluated in terms of their workflow that allows the inclusion of remote devices in the processing environment and their ability to adapt the logic of the scheduling algorithms to improve the targeted QoS attributes. The experimental results compare the performance of the platforms and show the current state of their readiness for edge computing in real network and execution environments. These findings suggest that Kubernetes and its distributions have the potential to provide effective scheduling across the resources on the network's edge. However, some challenges still have to be addressed to completely adapt these tools for such a dynamic and distributed execution environment as edge computing implies.}, } @article {pmid37112296, year = {2023}, author = {Kim, T and Yoo, SE and Kim, Y}, title = {Edge/Fog Computing Technologies for IoT Infrastructure II.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112296}, issn = {1424-8220}, support = {2020R1F1A1048179//National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT)/ ; Research Grant of Jeonju University in 2022//Research Grant of Jeonju University in 2022/ ; }, abstract = {The prevalence of smart devices and cloud computing has led to an explosion in the amount of data generated by IoT devices [...].}, } @article {pmid37112224, year = {2023}, author = {Zhu, M and Gao, S and Tu, G and Chen, D}, title = {Multi-Access Edge Computing (MEC) Based on MIMO: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112224}, issn = {1424-8220}, support = {292021000242//Research Funds for the Central Universities/ ; 2017YFB0403604//National Key Research and Development Program of China/ ; 61571416//National Natural Science Foundation of China/ ; 61072045//National Natural Science Foundation of China/ ; 61032006//National Natural Science Foundation of China/ ; }, abstract = {With the rapid development of wireless communication technology and the emergence of intelligent applications, higher requirements have been put forward for data communication and computing capacity. Multi-access edge computing (MEC) can handle highly demanding applications by users by sinking the services and computing capabilities of the cloud to the edge of the cell. Meanwhile, the multiple input multiple output (MIMO) technology based on large-scale antenna arrays can achieve an order-of-magnitude improvement in system capacity. The introduction of MIMO into MEC takes full advantage of the energy and spectral efficiency of MIMO technology, providing a new computing paradigm for time-sensitive applications. In parallel, it can accommodate more users and cope with the inevitable trend of continuous data traffic explosion. In this paper, the state-of-the-art research status in this field is investigated, summarized and analyzed. Specifically, we first summarize a multi-base station cooperative mMIMO-MEC model that can easily be expanded to adapt to different MIMO-MEC application scenarios. Subsequently, we comprehensively analyze the current works, compare them to each other and summarize them, mainly from four aspects: research scenarios, application scenarios, evaluation indicators and research issues, and research algorithms. Finally, some open research challenges are identified and discussed, and these indicate the direction for future research on MIMO-MEC.}, } @article {pmid37112221, year = {2023}, author = {Oladimeji, D and Gupta, K and Kose, NA and Gundogan, K and Ge, L and Liang, F}, title = {Smart Transportation: An Overview of Technologies and Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {8}, pages = {}, pmid = {37112221}, issn = {1424-8220}, abstract = {As technology continues to evolve, our society is becoming enriched with more intelligent devices that help us perform our daily activities more efficiently and effectively. One of the most significant technological advancements of our time is the Internet of Things (IoT), which interconnects various smart devices (such as smart mobiles, intelligent refrigerators, smartwatches, smart fire alarms, smart door locks, and many more) allowing them to communicate with each other and exchange data seamlessly. We now use IoT technology to carry out our daily activities, for example, transportation. In particular, the field of smart transportation has intrigued researchers due to its potential to revolutionize the way we move people and goods. IoT provides drivers in a smart city with many benefits, including traffic management, improved logistics, efficient parking systems, and enhanced safety measures. Smart transportation is the integration of all these benefits into applications for transportation systems. However, as a way of further improving the benefits provided by smart transportation, other technologies have been explored, such as machine learning, big data, and distributed ledgers. Some examples of their application are the optimization of routes, parking, street lighting, accident prevention, detection of abnormal traffic conditions, and maintenance of roads. In this paper, we aim to provide a detailed understanding of the developments in the applications mentioned earlier and examine current researches that base their applications on these sectors. We aim to conduct a self-contained review of the different technologies used in smart transportation today and their respective challenges. Our methodology encompassed identifying and screening articles on smart transportation technologies and its applications. To identify articles addressing our topic of review, we searched for articles in the four significant databases: IEEE Xplore, ACM Digital Library, Science Direct, and Springer. Consequently, we examined the communication mechanisms, architectures, and frameworks that enable these smart transportation applications and systems. We also explored the communication protocols enabling smart transportation, including Wi-Fi, Bluetooth, and cellular networks, and how they contribute to seamless data exchange. We delved into the different architectures and frameworks used in smart transportation, including cloud computing, edge computing, and fog computing. Lastly, we outlined current challenges in the smart transportation field and suggested potential future research directions. We will examine data privacy and security issues, network scalability, and interoperability between different IoT devices.}, } @article {pmid37100543, year = {2023}, author = {Alberto, IRI and Alberto, NRI and Ghosh, AK and Jain, B and Jayakumar, S and Martinez-Martin, N and McCague, N and Moukheiber, D and Moukheiber, L and Moukheiber, M and Moukheiber, S and Yaghy, A and Zhang, A and Celi, LA}, title = {The impact of commercial health datasets on medical research and health-care algorithms.}, journal = {The Lancet. Digital health}, volume = {5}, number = {5}, pages = {e288-e294}, pmid = {37100543}, issn = {2589-7500}, support = {R01 EB017205/EB/NIBIB NIH HHS/United States ; }, mesh = {Humans ; *Algorithms ; *Biomedical Research ; Privacy ; Reproducibility of Results ; *Datasets as Topic/economics/ethics/trends ; Consumer Health Information/economics/ethics ; }, abstract = {As the health-care industry emerges into a new era of digital health driven by cloud data storage, distributed computing, and machine learning, health-care data have become a premium commodity with value for private and public entities. Current frameworks of health data collection and distribution, whether from industry, academia, or government institutions, are imperfect and do not allow researchers to leverage the full potential of downstream analytical efforts. In this Health Policy paper, we review the current landscape of commercial health data vendors, with special emphasis on the sources of their data, challenges associated with data reproducibility and generalisability, and ethical considerations for data vending. We argue for sustainable approaches to curating open-source health data to enable global populations to be included in the biomedical research community. However, to fully implement these approaches, key stakeholders should come together to make health-care datasets increasingly accessible, inclusive, and representative, while balancing the privacy and rights of individuals whose data are being collected.}, } @article {pmid37093388, year = {2023}, author = {Mohinuddin, S and Sengupta, S and Sarkar, B and Saha, UD and Islam, A and Islam, ARMT and Hossain, ZM and Mahammad, S and Ahamed, T and Mondal, R and Zhang, W and Basra, A}, title = {Assessing lake water quality during COVID-19 era using geospatial techniques and artificial neural network model.}, journal = {Environmental science and pollution research international}, volume = {30}, number = {24}, pages = {65848-65864}, pmid = {37093388}, issn = {1614-7499}, mesh = {Humans ; *Water Quality ; Lakes ; *COVID-19 ; Environmental Monitoring/methods ; Communicable Disease Control ; Chlorophyll/analysis ; Neural Networks, Computer ; Phosphorus/analysis ; }, abstract = {The present study evaluates the impact of the COVID-19 lockdown on the water quality of a tropical lake (East Kolkata Wetland or EKW, India) along with seasonal change using Landsat 8 and 9 images of the Google Earth Engine (GEE) cloud computing platform. The research focuses on detecting, monitoring, and predicting water quality in the EKW region using eight parameters-normalized suspended material index (NSMI), suspended particular matter (SPM), total phosphorus (TP), electrical conductivity (EC), chlorophyll-α, floating algae index (FAI), turbidity, Secchi disk depth (SDD), and two water quality indices such as Carlson tropic state index (CTSI) and entropy‑weighted water quality index (EWQI). The results demonstrate that SPM, turbidity, EC, TP, and SDD improved while the FAI and chlorophyll-α increased during the lockdown period due to the stagnation of water as well as a reduction in industrial and anthropogenic pollution. Moreover, the prediction of EWQI using an artificial neural network indicates that the overall water quality will improve more if the lockdown period is sustained for another 3 years. The outcomes of the study will help the stakeholders develop effective regulations and strategies for the timely restoration of lake water quality.}, } @article {pmid37090033, year = {2023}, author = {Ji, JL and Demšar, J and Fonteneau, C and Tamayo, Z and Pan, L and Kraljič, A and Matkovič, A and Purg, N and Helmer, M and Warrington, S and Winkler, A and Zerbi, V and Coalson, TS and Glasser, MF and Harms, MP and Sotiropoulos, SN and Murray, JD and Anticevic, A and Repovš, G}, title = {QuNex-An integrative platform for reproducible neuroimaging analytics.}, journal = {Frontiers in neuroinformatics}, volume = {17}, number = {}, pages = {1104508}, pmid = {37090033}, issn = {1662-5196}, support = {R01 MH060974/MH/NIMH NIH HHS/United States ; }, abstract = {INTRODUCTION: Neuroimaging technology has experienced explosive growth and transformed the study of neural mechanisms across health and disease. However, given the diversity of sophisticated tools for handling neuroimaging data, the field faces challenges in method integration, particularly across multiple modalities and species. Specifically, researchers often have to rely on siloed approaches which limit reproducibility, with idiosyncratic data organization and limited software interoperability.

METHODS: To address these challenges, we have developed Quantitative Neuroimaging Environment & Toolbox (QuNex), a platform for consistent end-to-end processing and analytics. QuNex provides several novel functionalities for neuroimaging analyses, including a "turnkey" command for the reproducible deployment of custom workflows, from onboarding raw data to generating analytic features.

RESULTS: The platform enables interoperable integration of multi-modal, community-developed neuroimaging software through an extension framework with a software development kit (SDK) for seamless integration of community tools. Critically, it supports high-throughput, parallel processing in high-performance compute environments, either locally or in the cloud. Notably, QuNex has successfully processed over 10,000 scans across neuroimaging consortia, including multiple clinical datasets. Moreover, QuNex enables integration of human and non-human workflows via a cohesive translational platform.

DISCUSSION: Collectively, this effort stands to significantly impact neuroimaging method integration across acquisition approaches, pipelines, datasets, computational environments, and species. Building on this platform will enable more rapid, scalable, and reproducible impact of neuroimaging technology across health and disease.}, } @article {pmid37089762, year = {2023}, author = {Pongpech, WA}, title = {A Distributed Data Mesh Paradigm for an Event-based Smart Communities Monitoring Product.}, journal = {Procedia computer science}, volume = {220}, number = {}, pages = {584-591}, pmid = {37089762}, issn = {1877-0509}, abstract = {The recent pandemic events in Thailand, Covid-19 in 2018, demonstrated the need for an event-based smart monitoring system. While a distributed multi-level architecture has emerged as an architecture of choice for a larger-scale smart event-based system that requires better latency, security, scalability, and reliability, a recently introduced data mesh paradigm can add a few additional benefits. The paradigm enables each district to become an event-based smart monitoring mesh and handle its analytics and monitoring workload. Districts can form a set of domains in a network of event-based smart community monitoring systems and provide data products for others during a crisis. This paper presents a distributed data mesh paradigm for an event-based smart monitoring product in a given community with predefined domains. The paper presents smart monitoring as a data product between domains. Key considerations for designing an event-based smart monitoring data product are given. The author introduces three possible domains necessary for creating a smart monitoring system in each community. Each domain creates a data product for a given domain and shares data between domains. Finally, a three-layer analytics architecture for a smart monitoring product in each domain and a use case is presented.}, } @article {pmid37087660, year = {2023}, author = {Zhao, ZK and Tian, YS and Weng, XX and Li, HW and Sun, WY}, title = {Temporal and spatial variation characteristics of surface water area in the Yellow River Basin from 1986 to 2021.}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {34}, number = {3}, pages = {761-769}, doi = {10.13287/j.1001-9332.202303.021}, pmid = {37087660}, issn = {1001-9332}, mesh = {Humans ; *Water ; *Environmental Monitoring ; Rivers ; Climate Change ; Algorithms ; China ; }, abstract = {The Yellow River Basin is short of water resources. The dynamic monitoring of surface water area is helpful to clarify the distribution and change trend of water resources in this area. It is of great scientific significance to deeply understand the impacts of climate change and human activities on water resources and ensure the ecological security of the basin. Based on the Google Earth Engine (GEE) cloud platform, we analyzed the spatial variations of surface water area in the Yellow River Basin from 1986 to 2021 by using the mixed index algorithm, and revealed the driving factors of surface water area change in the Yellow River Basin. The results showed that the overall recognition accuracy of the water extraction algorithm based on mixing index was 97.5%. Compared with available water data products, the proposed algorithm can guarantee the integrity of the whole water area to a certain extent. The surface water area in the upper, middle, and lower reaches of the Yellow River Basin was 71.7%, 18.4%, and 9.9% of the total surface water area, respectively. From 1986 to 2021, the surface water area of the basin showed an overall upward trend, with a total increase of 3163.6 km[2]. The surface water area of the upper, middle, and downstream regions increased by 72.0%, 22.4%, and 5.6%, respectively. The increase of precipitation was the main reason for the increase of water area, with a contribution of 55%. Vegetation restoration and construction of water conservancy projects had increased the water area of the basin. The intensification of human water extraction activity reduced the water area of the basin.}, } @article {pmid37085488, year = {2023}, author = {Harle, N and Shtanko, O and Movassagh, R}, title = {Observing and braiding topological Majorana modes on programmable quantum simulators.}, journal = {Nature communications}, volume = {14}, number = {1}, pages = {2286}, pmid = {37085488}, issn = {2041-1723}, abstract = {Electrons are indivisible elementary particles, yet paradoxically a collection of them can act as a fraction of a single electron, exhibiting exotic and useful properties. One such collective excitation, known as a topological Majorana mode, is naturally stable against perturbations, such as unwanted local noise, and can thereby robustly store quantum information. As such, Majorana modes serve as the basic primitive of topological quantum computing, providing resilience to errors. However, their demonstration on quantum hardware has remained elusive. Here, we demonstrate a verifiable identification and braiding of topological Majorana modes using a superconducting quantum processor as a quantum simulator. By simulating fermions on a one-dimensional lattice subject to a periodic drive, we confirm the existence of Majorana modes localized at the edges, and distinguish them from other trivial modes. To simulate a basic logical operation of topological quantum computing known as braiding, we propose a non-adiabatic technique, whose implementation reveals correct braiding statistics in our experiments. This work could further be used to study topological models of matter using circuit-based simulations, and shows that long-sought quantum phenomena can be realized by anyone in cloud-run quantum simulations, whereby accelerating fundamental discoveries in quantum science and technology.}, } @article {pmid37079367, year = {2023}, author = {Afshar, M and Adelaine, S and Resnik, F and Mundt, MP and Long, J and Leaf, M and Ampian, T and Wills, GJ and Schnapp, B and Chao, M and Brown, R and Joyce, C and Sharma, B and Dligach, D and Burnside, ES and Mahoney, J and Churpek, MM and Patterson, BW and Liao, F}, title = {Deployment of Real-time Natural Language Processing and Deep Learning Clinical Decision Support in the Electronic Health Record: Pipeline Implementation for an Opioid Misuse Screener in Hospitalized Adults.}, journal = {JMIR medical informatics}, volume = {11}, number = {}, pages = {e44977}, pmid = {37079367}, issn = {2291-9694}, support = {UL1 TR002373/TR/NCATS NIH HHS/United States ; }, abstract = {BACKGROUND: The clinical narrative in electronic health records (EHRs) carries valuable information for predictive analytics; however, its free-text form is difficult to mine and analyze for clinical decision support (CDS). Large-scale clinical natural language processing (NLP) pipelines have focused on data warehouse applications for retrospective research efforts. There remains a paucity of evidence for implementing NLP pipelines at the bedside for health care delivery.

OBJECTIVE: We aimed to detail a hospital-wide, operational pipeline to implement a real-time NLP-driven CDS tool and describe a protocol for an implementation framework with a user-centered design of the CDS tool.

METHODS: The pipeline integrated a previously trained open-source convolutional neural network model for screening opioid misuse that leveraged EHR notes mapped to standardized medical vocabularies in the Unified Medical Language System. A sample of 100 adult encounters were reviewed by a physician informaticist for silent testing of the deep learning algorithm before deployment. An end user interview survey was developed to examine the user acceptability of a best practice alert (BPA) to provide the screening results with recommendations. The planned implementation also included a human-centered design with user feedback on the BPA, an implementation framework with cost-effectiveness, and a noninferiority patient outcome analysis plan.

RESULTS: The pipeline was a reproducible workflow with a shared pseudocode for a cloud service to ingest, process, and store clinical notes as Health Level 7 messages from a major EHR vendor in an elastic cloud computing environment. Feature engineering of the notes used an open-source NLP engine, and the features were fed into the deep learning algorithm, with the results returned as a BPA in the EHR. On-site silent testing of the deep learning algorithm demonstrated a sensitivity of 93% (95% CI 66%-99%) and specificity of 92% (95% CI 84%-96%), similar to published validation studies. Before deployment, approvals were received across hospital committees for inpatient operations. Five interviews were conducted; they informed the development of an educational flyer and further modified the BPA to exclude certain patients and allow the refusal of recommendations. The longest delay in pipeline development was because of cybersecurity approvals, especially because of the exchange of protected health information between the Microsoft (Microsoft Corp) and Epic (Epic Systems Corp) cloud vendors. In silent testing, the resultant pipeline provided a BPA to the bedside within minutes of a provider entering a note in the EHR.

CONCLUSIONS: The components of the real-time NLP pipeline were detailed with open-source tools and pseudocode for other health systems to benchmark. The deployment of medical artificial intelligence systems in routine clinical care presents an important yet unfulfilled opportunity, and our protocol aimed to close the gap in the implementation of artificial intelligence-driven CDS.

TRIAL REGISTRATION: ClinicalTrials.gov NCT05745480; https://www.clinicaltrials.gov/ct2/show/NCT05745480.}, } @article {pmid37079353, year = {2023}, author = {Florensa, D and Mateo-Fornes, J and Lopez Sorribes, S and Torres Tuca, A and Solsona, F and Godoy, P}, title = {Exploring Cancer Incidence, Risk Factors, and Mortality in the Lleida Region: Interactive, Open-source R Shiny Application for Cancer Data Analysis.}, journal = {JMIR cancer}, volume = {9}, number = {}, pages = {e44695}, pmid = {37079353}, issn = {2369-1999}, abstract = {BACKGROUND: The cancer incidence rate is essential to public health surveillance. The analysis of this information allows authorities to know the cancer situation in their regions, especially to determine cancer patterns, monitor cancer trends, and help prioritize the allocation of health resource.

OBJECTIVE: This study aimed to present the design and implementation of an R Shiny application to assist cancer registries conduct rapid descriptive and predictive analytics in a user-friendly, intuitive, portable, and scalable way. Moreover, we wanted to describe the design and implementation road map to inspire other population registries to exploit their data sets and develop similar tools and models.

METHODS: The first step was to consolidate the data into the population registry cancer database. These data were cross validated by ASEDAT software, checked later, and reviewed by experts. Next, we developed an online tool to visualize the data and generate reports to assist decision-making under the R Shiny framework. Currently, the application can generate descriptive analytics using population variables, such as age, sex, and cancer type; cancer incidence in region-level geographical heat maps; line plots to visualize temporal trends; and typical risk factor plots. The application also showed descriptive plots about cancer mortality in the Lleida region. This web platform was built as a microservices cloud platform. The web back end consists of an application programming interface and a database, which NodeJS and MongoDB have implemented. All these parts were encapsulated and deployed by Docker and Docker Compose.

RESULTS: The results provide a successful case study in which the tool was applied to the cancer registry of the Lleida region. The study illustrates how researchers and cancer registries can use the application to analyze cancer databases. Furthermore, the results highlight the analytics related to risk factors, second tumors, and cancer mortality. The application shows the incidence and evolution of each cancer during a specific period for gender, age groups, and cancer location, among other functionalities. The risk factors view permitted us to detect that approximately 60% of cancer patients were diagnosed with excess weight at diagnosis. Regarding mortality, the application showed that lung cancer registered the highest number of deaths for both genders. Breast cancer was the lethal cancer in women. Finally, a customization guide was included as a result of this implementation to deploy the architecture presented.

CONCLUSIONS: This paper aimed to document a successful methodology for exploiting the data in population cancer registries and propose guidelines for other similar records to develop similar tools. We intend to inspire other entities to build an application that can help decision-making and make data more accessible and transparent for the community of users.}, } @article {pmid37073281, year = {2023}, author = {Touckia, JK}, title = {Integrating the digital twin concept into the evaluation of reconfigurable manufacturing systems (RMS): literature review and research trend.}, journal = {The International journal, advanced manufacturing technology}, volume = {126}, number = {3-4}, pages = {875-889}, pmid = {37073281}, issn = {0268-3768}, abstract = {With the rapid advent of new information technologies (Big Data analytics, cyber-physical systems, such as IoT, cloud computing and artificial intelligence), digital twins are being used more and more in smart manufacturing. Despite the fact that their use in industry has attracted the attention of many practitioners and researchers, there is still a need for an integrated and comprehensive digital twin framework for reconfigurable manufacturing systems. To close this research gap, we present evidence from a systematic literature review, including 76 papers from high-quality journals. This paper presents the current research trends on evaluation and the digital twin in reconfigurable manufacturing systems, highlighting application areas and key methodologies and tools. The originality of this paper lies in its proposal of interesting avenues for future research on the integration of the digital twin in the evaluation of RMS. The benefits of digital twins are multiple such as evaluation of current and future capabilities of an RMS during its life cycle, early discovery of system performance deficiencies and production optimization. The idea is to implement a digital twin that links the virtual and physical environments. Finally, important issues and emerging trends in the literature are highlighted to encourage researchers and practitioners to develop studies in this area that are strongly related to the Industry 4.0 environment.}, } @article {pmid37066421, year = {2023}, author = {Hitz, BC and Jin-Wook, L and Jolanki, O and Kagda, MS and Graham, K and Sud, P and Gabdank, I and Strattan, JS and Sloan, CA and Dreszer, T and Rowe, LD and Podduturi, NR and Malladi, VS and Chan, ET and Davidson, JM and Ho, M and Miyasato, S and Simison, M and Tanaka, F and Luo, Y and Whaling, I and Hong, EL and Lee, BT and Sandstrom, R and Rynes, E and Nelson, J and Nishida, A and Ingersoll, A and Buckley, M and Frerker, M and Kim, DS and Boley, N and Trout, D and Dobin, A and Rahmanian, S and Wyman, D and Balderrama-Gutierrez, G and Reese, F and Durand, NC and Dudchenko, O and Weisz, D and Rao, SSP and Blackburn, A and Gkountaroulis, D and Sadr, M and Olshansky, M and Eliaz, Y and Nguyen, D and Bochkov, I and Shamim, MS and Mahajan, R and Aiden, E and Gingeras, T and Heath, S and Hirst, M and Kent, WJ and Kundaje, A and Mortazavi, A and Wold, B and Cherry, JM}, title = {The ENCODE Uniform Analysis Pipelines.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37066421}, support = {R01 HG009318/HG/NHGRI NIH HHS/United States ; UM1 HG009375/HG/NHGRI NIH HHS/United States ; }, abstract = {The Encyclopedia of DNA elements (ENCODE) project is a collaborative effort to create a comprehensive catalog of functional elements in the human genome. The current database comprises more than 19000 functional genomics experiments across more than 1000 cell lines and tissues using a wide array of experimental techniques to study the chromatin structure, regulatory and transcriptional landscape of the Homo sapiens and Mus musculus genomes. All experimental data, metadata, and associated computational analyses created by the ENCODE consortium are submitted to the Data Coordination Center (DCC) for validation, tracking, storage, and distribution to community resources and the scientific community. The ENCODE project has engineered and distributed uniform processing pipelines in order to promote data provenance and reproducibility as well as allow interoperability between genomic resources and other consortia. All data files, reference genome versions, software versions, and parameters used by the pipelines are captured and available via the ENCODE Portal. The pipeline code, developed using Docker and Workflow Description Language (WDL; https://openwdl.org/) is publicly available in GitHub, with images available on Dockerhub (https://hub.docker.com), enabling access to a diverse range of biomedical researchers. ENCODE pipelines maintained and used by the DCC can be installed to run on personal computers, local HPC clusters, or in cloud computing environments via Cromwell. Access to the pipelines and data via the cloud allows small labs the ability to use the data or software without access to institutional compute clusters. Standardization of the computational methodologies for analysis and quality control leads to comparable results from different ENCODE collections - a prerequisite for successful integrative analyses.}, } @article {pmid37066386, year = {2023}, author = {Olson, RH and Kalafut, NC and Wang, D}, title = {MANGEM: a web app for Multimodal Analysis of Neuronal Gene expression, Electrophysiology and Morphology.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {37066386}, support = {R21 NS127432/NS/NINDS NIH HHS/United States ; R21 NS128761/NS/NINDS NIH HHS/United States ; P50 HD105353/HD/NICHD NIH HHS/United States ; R01 AG067025/AG/NIA NIH HHS/United States ; R03 NS123969/NS/NINDS NIH HHS/United States ; RF1 MH128695/MH/NIMH NIH HHS/United States ; }, abstract = {Single-cell techniques have enabled the acquisition of multi-modal data, particularly for neurons, to characterize cellular functions. Patch-seq, for example, combines patch-clamp recording, cell imaging, and single-cell RNA-seq to obtain electrophysiology, morphology, and gene expression data from a single neuron. While these multi-modal data offer potential insights into neuronal functions, they can be heterogeneous and noisy. To address this, machine-learning methods have been used to align cells from different modalities onto a low-dimensional latent space, revealing multi-modal cell clusters. However, the use of those methods can be challenging for biologists and neuroscientists without computational expertise and also requires suitable computing infrastructure for computationally expensive methods. To address these issues, we developed a cloud-based web application, MANGEM (Multimodal Analysis of Neuronal Gene expression, Electrophysiology, and Morphology) at https://ctc.waisman.wisc.edu/mangem. MANGEM provides a step-by-step accessible and user-friendly interface to machine-learning alignment methods of neuronal multi-modal data while enabling real-time visualization of characteristics of raw and aligned cells. It can be run asynchronously for large-scale data alignment, provides users with various downstream analyses of aligned cells and visualizes the analytic results such as identifying multi-modal cell clusters of cells and detecting correlated genes with electrophysiological and morphological features. We demonstrated the usage of MANGEM by aligning Patch-seq multimodal data of neuronal cells in the mouse visual cortex.}, } @article {pmid37064531, year = {2023}, author = {Horsley, JJ and Thomas, RH and Chowdhury, FA and Diehl, B and McEvoy, AW and Miserocchi, A and de Tisi, J and Vos, SB and Walker, MC and Winston, GP and Duncan, JS and Wang, Y and Taylor, PN}, title = {Complementary structural and functional abnormalities to localise epileptogenic tissue.}, journal = {ArXiv}, volume = {}, number = {}, pages = {}, pmid = {37064531}, issn = {2331-8422}, support = {/WT_/Wellcome Trust/United Kingdom ; MR/V034758/1/MRC_/Medical Research Council/United Kingdom ; U01 NS090407/NS/NINDS NIH HHS/United States ; }, abstract = {BACKGROUND: When investigating suitability for epilepsy surgery, people with drug-refractory focal epilepsy may have intracranial EEG (iEEG) electrodes implanted to localise seizure onset. Diffusion-weighted magnetic resonance imaging (dMRI) may be acquired to identify key white matter tracts for surgical avoidance. Here, we investigate whether structural connectivity abnormalities, inferred from dMRI, may be used in conjunction with functional iEEG abnormalities to aid localisation of the epileptogenic zone (EZ), improving surgical outcomes in epilepsy.

METHODS: We retrospectively investigated data from 43 patients with epilepsy who had surgery following iEEG. Twenty-five patients (58%) were free from disabling seizures (ILAE 1 or 2) at one year. Interictal iEEG functional, and dMRI structural connectivity abnormalities were quantified by comparison to a normative map and healthy controls. We explored whether the resection of maximal abnormalities related to improved surgical outcomes, in both modalities individually and concurrently. Additionally, we suggest how connectivity abnormalities may inform the placement of iEEG electrodes pre-surgically using a patient case study.

FINDINGS: Seizure freedom was 15 times more likely in patients with resection of maximal connectivity and iEEG abnormalities (p=0.008). Both modalities separately distinguished patient surgical outcome groups and when used simultaneously, a decision tree correctly separated 36 of 43 (84%) patients.

INTERPRETATION: Our results suggest that both connectivity and iEEG abnormalities may localise epileptogenic tissue, and that these two modalities may provide complementary information in pre-surgical evaluations.

FUNDING: This research was funded by UKRI, CDT in Cloud Computing for Big Data, NIH, MRC, Wellcome Trust and Epilepsy Research UK.}, } @article {pmid37063644, year = {2023}, author = {Dong, C and Li, TZ and Xu, K and Wang, Z and Maldonado, F and Sandler, K and Landman, BA and Huo, Y}, title = {Characterizing browser-based medical imaging AI with serverless edge computing: towards addressing clinical data security constraints.}, journal = {Proceedings of SPIE--the International Society for Optical Engineering}, volume = {12469}, number = {}, pages = {}, pmid = {37063644}, issn = {0277-786X}, support = {R01 CA253923/CA/NCI NIH HHS/United States ; R01 EB017230/EB/NIBIB NIH HHS/United States ; U01 CA152662/CA/NCI NIH HHS/United States ; U01 CA196405/CA/NCI NIH HHS/United States ; }, abstract = {Artificial intelligence (AI) has been widely introduced to various medical imaging applications ranging from disease visualization to medical decision support. However, data privacy has become an essential concern in clinical practice of deploying the deep learning algorithms through cloud computing. The sensitivity of patient health information (PHI) commonly limits network transfer, installation of bespoke desktop software, and access to computing resources. Serverless edge-computing shed light on privacy preserved model distribution maintaining both high flexibility (as cloud computing) and security (as local deployment). In this paper, we propose a browser-based, cross-platform, and privacy preserved medical imaging AI deployment system working on consumer-level hardware via serverless edge-computing. Briefly we implement this system by deploying a 3D medical image segmentation model for computed tomography (CT) based lung cancer screening. We further curate tradeoffs in model complexity and data size by characterizing the speed, memory usage, and limitations across various operating systems and browsers. Our implementation achieves a deployment with (1) a 3D convolutional neural network (CNN) on CT volumes (256×256×256 resolution), (2) an average runtime of 80 seconds across Firefox v.102.0.1/Chrome v.103.0.5060.114/Microsoft Edge v.103.0.1264.44 and 210 seconds on Safari v.14.1.1, and (3) an average memory usage of 1.5 GB on Microsoft Windows laptops, Linux workstation, and Apple Mac laptops. In conclusion, this work presents a privacy-preserved solution for medical imaging AI applications that minimizes the risk of PHI exposure. We characterize the tools, architectures, and parameters of our framework to facilitate the translation of modern deep learning methods into routine clinical care.}, } @article {pmid37055366, year = {2023}, author = {Zhang, H and Wang, LC and Chaudhuri, S and Pickering, A and Usvyat, L and Larkin, J and Waguespack, P and Kuang, Z and Kooman, JP and Maddux, FW and Kotanko, P}, title = {Real-time prediction of intradialytic hypotension using machine learning and cloud computing infrastructure.}, journal = {Nephrology, dialysis, transplantation : official publication of the European Dialysis and Transplant Association - European Renal Association}, volume = {38}, number = {7}, pages = {1761-1769}, pmid = {37055366}, issn = {1460-2385}, mesh = {Humans ; *Kidney Failure, Chronic/therapy/complications ; Prospective Studies ; Cloud Computing ; *Hypotension/diagnosis/etiology ; Renal Dialysis/adverse effects ; Blood Pressure ; }, abstract = {BACKGROUND: In maintenance hemodialysis patients, intradialytic hypotension (IDH) is a frequent complication that has been associated with poor clinical outcomes. Prediction of IDH may facilitate timely interventions and eventually reduce IDH rates.

METHODS: We developed a machine learning model to predict IDH in in-center hemodialysis patients 15-75 min in advance. IDH was defined as systolic blood pressure (SBP) <90 mmHg. Demographic, clinical, treatment-related and laboratory data were retrieved from electronic health records and merged with intradialytic machine data that were sent in real-time to the cloud. For model development, dialysis sessions were randomly split into training (80%) and testing (20%) sets. The area under the receiver operating characteristic curve (AUROC) was used as a measure of the model's predictive performance.

RESULTS: We utilized data from 693 patients who contributed 42 656 hemodialysis sessions and 355 693 intradialytic SBP measurements. IDH occurred in 16.2% of hemodialysis treatments. Our model predicted IDH 15-75 min in advance with an AUROC of 0.89. Top IDH predictors were the most recent intradialytic SBP and IDH rate, as well as mean nadir SBP of the previous 10 dialysis sessions.

CONCLUSIONS: Real-time prediction of IDH during an ongoing hemodialysis session is feasible and has a clinically actionable predictive performance. If and to what degree this predictive information facilitates the timely deployment of preventive interventions and translates into lower IDH rates and improved patient outcomes warrants prospective studies.}, } @article {pmid37055332, year = {2023}, author = {Servida, F and Fischer, M and Delémont, O and Souvignet, TR}, title = {Ok Google, Start a Fire. IoT devices as witnesses and actors in fire investigations.}, journal = {Forensic science international}, volume = {348}, number = {}, pages = {111674}, doi = {10.1016/j.forsciint.2023.111674}, pmid = {37055332}, issn = {1872-6283}, abstract = {Fire incidents are amongst the most destructive events an investigator might encounter, completely transforming a scene with most of the objects left in ashes or highly damaged. Until now, fire investigations relied heavily on burn patterns and electrical artifacts to find possible starting locations, as well as witness statements and more recently witness imagery. As Internet of Things (IoT) devices, often seen as connected smart devices, become more common, the various sensors embedded within them provide a novel source of traces about the environment and events within. They collect and store information in different locations, often not touched by the event, such as remote servers (cloud) or companion smartphones, widening the investigation field for fire incidents. This work presents two controlled fire incidents in apartments that we furnished, equipped with IoT devices, and subsequently burnt. We studied the traces retrievable from the objects themselves after the incident, the companion smartphone apps, and the cloud and assessed the value of the information they conveyed. This research highlighted the pertinence to consider traces from IoT devices in the forensic process of fire investigation.}, } @article {pmid37053173, year = {2023}, author = {Asim Shahid, M and Alam, MM and Mohd Su'ud, M}, title = {Improved accuracy and less fault prediction errors via modified sequential minimal optimization algorithm.}, journal = {PloS one}, volume = {18}, number = {4}, pages = {e0284209}, pmid = {37053173}, issn = {1932-6203}, mesh = {Bayes Theorem ; *Algorithms ; *Machine Learning ; Random Forest ; Support Vector Machine ; }, abstract = {The benefits and opportunities offered by cloud computing are among the fastest-growing technologies in the computer industry. Additionally, it addresses the difficulties and issues that make more users more likely to accept and use the technology. The proposed research comprised of machine learning (ML) algorithms is Naïve Bayes (NB), Library Support Vector Machine (LibSVM), Multinomial Logistic Regression (MLR), Sequential Minimal Optimization (SMO), K Nearest Neighbor (KNN), and Random Forest (RF) to compare the classifier gives better results in accuracy and less fault prediction. In this research, the secondary data results (CPU-Mem Mono) give the highest percentage of accuracy and less fault prediction on the NB classifier in terms of 80/20 (77.01%), 70/30 (76.05%), and 5 folds cross-validation (74.88%), and (CPU-Mem Multi) in terms of 80/20 (89.72%), 70/30 (90.28%), and 5 folds cross-validation (92.83%). Furthermore, on (HDD Mono) the SMO classifier gives the highest percentage of accuracy and less fault prediction fault in terms of 80/20 (87.72%), 70/30 (89.41%), and 5 folds cross-validation (88.38%), and (HDD-Multi) in terms of 80/20 (93.64%), 70/30 (90.91%), and 5 folds cross-validation (88.20%). Whereas, primary data results found RF classifier gives the highest percentage of accuracy and less fault prediction in terms of 80/20 (97.14%), 70/30 (96.19%), and 5 folds cross-validation (95.85%) in the primary data results, but the algorithm complexity (0.17 seconds) is not good. In terms of 80/20 (95.71%), 70/30 (95.71%), and 5 folds cross-validation (95.71%), SMO has the second highest accuracy and less fault prediction, but the algorithm complexity is good (0.3 seconds). The difference in accuracy and less fault prediction between RF and SMO is only (.13%), and the difference in time complexity is (14 seconds). We have decided that we will modify SMO. Finally, the Modified Sequential Minimal Optimization (MSMO) Algorithm method has been proposed to get the highest accuracy & less fault prediction errors in terms of 80/20 (96.42%), 70/30 (96.42%), & 5 fold cross validation (96.50%).}, } @article {pmid37050834, year = {2023}, author = {Wang, Z and Yu, X and Xue, P and Qu, Y and Ju, L}, title = {Research on Medical Security System Based on Zero Trust.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050834}, issn = {1424-8220}, support = {2018YFB0803401//the National Key Research and Development Plan of China/ ; 328202203//the Fundamental Research Funds for the Central Universities/ ; 2019M650606//China Postdoctoral Science Foundation funded project/ ; }, mesh = {Humans ; *Trust ; *Computer Security ; Big Data ; Computer Simulation ; Cloud Computing ; }, abstract = {With the rapid development of Internet of Things technology, cloud computing, and big data, the combination of medical systems and information technology has become increasingly close. However, the emergence of intelligent medical systems has brought a series of network security threats and hidden dangers, including data leakage and remote attacks, which can directly threaten patients' lives. To ensure the security of medical information systems and expand the application of zero trust in the medical field, we combined the medical system with the zero-trust security system to propose a zero-trust medical security system. In addition, in its dynamic access control module, based on the RBAC model and the calculation of user behavior risk value and trust, an access control model based on subject behavior evaluation under zero-trust conditions (ABEAC) was designed to improve the security of medical equipment and data. Finally, the feasibility of the system is verified through a simulation experiment.}, } @article {pmid37050740, year = {2023}, author = {Kim, K and Alshenaifi, IM and Ramachandran, S and Kim, J and Zia, T and Almorjan, A}, title = {Cybersecurity and Cyber Forensics for Smart Cities: A Comprehensive Literature Review and Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050740}, issn = {1424-8220}, support = {SRC-PR2-05//Naif Arab University for Security Sciences/ ; }, abstract = {Smart technologies, such as the Internet of Things (IoT), cloud computing, and artificial intelligence (AI), are being adopted in cities and transforming them into smart cities. In smart cities, various network technologies, such as the Internet and IoT, are combined to exchange real-time information, making the everyday lives of their residents more convenient. However, there is a lack of systematic research on cybersecurity and cyber forensics in smart cities. This paper presents a comprehensive review and survey of cybersecurity and cyber forensics for smart cities. We analysed 154 papers that were published from 2015 to 2022 and proposed a new framework based on a decade of related research papers. We identified four major areas and eleven sub-areas for smart cities. We found that smart homes and the IoT were the most active research areas within the cybersecurity field. Additionally, we found that research on cyber forensics for smart cities was relatively limited compared to that on cybersecurity. Since 2020, there have been many studies on the IoT (which is a technological component of smart cities) that have utilized machine learning and deep learning. Due to the transmission of large-scale data through IoT devices in smart cities, ML and DL are expected to continue playing critical roles in smart city research.}, } @article {pmid37050561, year = {2023}, author = {Elbagoury, BM and Vladareanu, L and Vlădăreanu, V and Salem, AB and Travediu, AM and Roushdy, MI}, title = {A Hybrid Stacked CNN and Residual Feedback GMDH-LSTM Deep Learning Model for Stroke Prediction Applied on Mobile AI Smart Hospital Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050561}, issn = {1424-8220}, mesh = {Humans ; Artificial Intelligence ; Feedback ; *Deep Learning ; *Stroke/diagnosis ; Hospitals ; }, abstract = {Artificial intelligence (AI) techniques for intelligent mobile computing in healthcare has opened up new opportunities in healthcare systems. Combining AI techniques with the existing Internet of Medical Things (IoMT) will enhance the quality of care that patients receive at home remotely and the successful establishment of smart living environments. Building a real AI for mobile AI in an integrated smart hospital environment is a challenging problem due to the complexities of receiving IoT medical sensors data, data analysis, and deep learning algorithm complexity programming for mobile AI engine implementation AI-based cloud computing complexities, especially when we tackle real-time environments of AI technologies. In this paper, we propose a new mobile AI smart hospital platform architecture for stroke prediction and emergencies. In addition, this research is focused on developing and testing different modules of integrated AI software based on XAI architecture, this is for the mobile health app as an independent expert system or as connected with a simulated environment of an AI-cloud-based solution. The novelty is in the integrated architecture and results obtained in our previous works and this extended research on hybrid GMDH and LSTM deep learning models for the proposed artificial intelligence and IoMT engine for mobile health edge computing technology. Its main goal is to predict heart-stroke disease. Current research is still missing a mobile AI system for heart/brain stroke prediction during patient emergency cases. This research work implements AI algorithms for stroke prediction and diagnosis. The hybrid AI in connected health is based on a stacked CNN and group handling method (GMDH) predictive analytics model, enhanced with an LSTM deep learning module for biomedical signals prediction. The techniques developed depend on the dataset of electromyography (EMG) signals, which provides a significant source of information for the identification of normal and abnormal motions in a stroke scenario. The resulting artificial intelligence mHealth app is an innovation beyond the state of the art and the proposed techniques achieve high accuracy as stacked CNN reaches almost 98% for stroke diagnosis. The GMDH neural network proves to be a good technique for monitoring the EMG signal of the same patient case with an average accuracy of 98.60% to an average of 96.68% of the signal prediction. Moreover, extending the GMDH model and a hybrid LSTM with dense layers deep learning model has improved significantly the prediction results that reach an average of 99%.}, } @article {pmid37050551, year = {2023}, author = {Fahimullah, M and Philippe, G and Ahvar, S and Trocan, M}, title = {Simulation Tools for Fog Computing: A Comparative Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050551}, issn = {1424-8220}, abstract = {Fog Computing (FC) was introduced to offer resources closer to the users. Researchers propose different solutions to make FC mature and use simulators for evaluating their solutions at early stages. In this paper, we compare different FC simulators based on their technical and non-technical characteristics. In addition, a practical comparison is conducted to compare the three main FC simulators based on their performance such as execution time, CPU, and memory usage for running different applications. The analysis can be helpful for researchers to select the appropriate simulator and platform to evaluate their solutions on different use cases. Furthermore, open issues and challenges for FC simulators are discussed that require attention and need to be addressed in the future.}, } @article {pmid37050548, year = {2023}, author = {Singhal, S and Athithan, S and Alomar, MA and Kumar, R and Sharma, B and Srivastava, G and Lin, JC}, title = {Energy Aware Load Balancing Framework for Smart Grid Using Cloud and Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050548}, issn = {1424-8220}, abstract = {Data centers are producing a lot of data as cloud-based smart grids replace traditional grids. The number of automated systems has increased rapidly, which in turn necessitates the rise of cloud computing. Cloud computing helps enterprises offer services cheaply and efficiently. Despite the challenges of managing resources, longer response plus processing time, and higher energy consumption, more people are using cloud computing. Fog computing extends cloud computing. It adds cloud services that minimize traffic, increase security, and speed up processes. Cloud and fog computing help smart grids save energy by aggregating and distributing the submitted requests. The paper discusses a load-balancing approach in Smart Grid using Rock Hyrax Optimization (RHO) to optimize response time and energy consumption. The proposed algorithm assigns tasks to virtual machines for execution and shuts off unused virtual machines, reducing the energy consumed by virtual machines. The proposed model is implemented on the CloudAnalyst simulator, and the results demonstrate that the proposed method has a better and quicker response time with lower energy requirements as compared with both static and dynamic algorithms. The suggested algorithm reduces processing time by 26%, response time by 15%, energy consumption by 29%, cost by 6%, and delay by 14%.}, } @article {pmid37050477, year = {2023}, author = {García, E and Quiles, E and Correcher, A}, title = {Distributed Intelligent Battery Management System Using a Real-World Cloud Computing System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050477}, issn = {1424-8220}, abstract = {In this work, a decentralized but synchronized real-world system for smart battery management was designed by using a general controller with cloud computing capability, four charge regulators, and a set of sensorized battery monitors with networking and Bluetooth capabilities. Currently, for real-world applications, battery management systems (BMSs) can be used in the form of distributed control systems where general controllers, charge regulators, and smart monitors and sensors are integrated, such as those proposed in this work, which allow more precise estimations of a large set of important parameters, such as the state of charge (SOC), state of health (SOH), current, voltage, and temperature, seeking the safety and the extension of the useful life of energy storage systems based on battery banks. The system used is a paradigmatic real-world example of the so-called intelligent battery management systems. One of the contributions made in this work is the realization of a distributed design of a BMS, which adds the benefit of increased system security compared to a fully centralized BMS structure. Another research contribution made in this work is the development of a methodical modeling procedure based on Petri Nets, which establishes, in a visible, organized, and precise way, the set of conditions that will determine the operation of the BMS. If this modeling is not carried out, the threshold values and their conditions remain scattered, not very transparent, and difficult to deal with in an aggregate way.}, } @article {pmid37050454, year = {2023}, author = {Akturk, E and Popescu, SC and Malambo, L}, title = {ICESat-2 for Canopy Cover Estimation at Large-Scale on a Cloud-Based Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {7}, pages = {}, pmid = {37050454}, issn = {1424-8220}, abstract = {Forest canopy cover is an essential biophysical parameter of ecological significance, especially for characterizing woodlands and forests. This research focused on using data from the ICESat-2/ATLAS spaceborne lidar sensor, a photon-counting altimetry system, to map the forest canopy cover over a large country extent. The study proposed a novel approach to compute categorized canopy cover using photon-counting data and available ancillary Landsat images to build the canopy cover model. In addition, this research tested a cloud-mapping platform, the Google Earth Engine (GEE), as an example of a large-scale study. The canopy cover map of the Republic of Türkiye produced from this study has an average accuracy of over 70%. Even though the results were promising, it has been determined that the issues caused by the auxiliary data negatively affect the overall success. Moreover, while GEE offered many benefits, such as user-friendliness and convenience, it had processing limits that posed challenges for large-scale studies. Using weak or strong beams' segments separately did not show a significant difference in estimating canopy cover. Briefly, this study demonstrates the potential of using photon-counting data and GEE for mapping forest canopy cover at a large scale.}, } @article {pmid37046022, year = {2023}, author = {Hemati, M and Hasanlou, M and Mahdianpari, M and Mohammadimanesh, F}, title = {Iranian wetland inventory map at a spatial resolution of 10 m using Sentinel-1 and Sentinel-2 data on the Google Earth Engine cloud computing platform.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {5}, pages = {558}, pmid = {37046022}, issn = {1573-2959}, mesh = {*Wetlands ; Iran ; *Cloud Computing ; Search Engine ; Environmental Monitoring/methods ; }, abstract = {Detailed wetland inventories and information about the spatial arrangement and the extent of wetland types across the Earth's surface are crucially important for resource assessment and sustainable management. In addition, it is crucial to update these inventories due to the highly dynamic characteristics of the wetlands. Remote sensing technologies capturing high-resolution and multi-temporal views of landscapes are incredibly beneficial in wetland mapping compared to traditional methods. Taking advantage of the Google Earth Engine's computational power and multi-source earth observation data from Sentinel-1 multi-spectral sensor and Sentinel-2 radar, we generated a 10 m nationwide wetlands inventory map for Iran. The whole country is mapped using an object-based image processing framework, containing SNIC superpixel segmentation and a Random Forest classifier that was performed for four different ecological zones of Iran separately. Reference data was provided by different sources and through both field and office-based methods. Almost 70% of this data was used for the training stage and the other 30% for evaluation. The whole map overall accuracy was 96.39% and the producer's accuracy for wetland classes ranged from nearly 65 to 99%. It is estimated that 22,384 km[2] of Iran are covered with water bodies and wetland classes, and emergent and shrub-dominated are the most common wetland classes in Iran. Considering the water crisis that has been started in Iran, the resulting ever-demanding map of Iranian wetland sites offers remarkable information about wetland boundaries and spatial distribution of wetland species, and therefore it is helpful for both governmental and commercial sectors.}, } @article {pmid37025550, year = {2023}, author = {Chlasta, K and Sochaczewski, P and Wójcik, GM and Krejtz, I}, title = {Neural simulation pipeline: Enabling container-based simulations on-premise and in public clouds.}, journal = {Frontiers in neuroinformatics}, volume = {17}, number = {}, pages = {1122470}, pmid = {37025550}, issn = {1662-5196}, abstract = {In this study, we explore the simulation setup in computational neuroscience. We use GENESIS, a general purpose simulation engine for sub-cellular components and biochemical reactions, realistic neuron models, large neural networks, and system-level models. GENESIS supports developing and running computer simulations but leaves a gap for setting up today's larger and more complex models. The field of realistic models of brain networks has overgrown the simplicity of earliest models. The challenges include managing the complexity of software dependencies and various models, setting up model parameter values, storing the input parameters alongside the results, and providing execution statistics. Moreover, in the high performance computing (HPC) context, public cloud resources are becoming an alternative to the expensive on-premises clusters. We present Neural Simulation Pipeline (NSP), which facilitates the large-scale computer simulations and their deployment to multiple computing infrastructures using the infrastructure as the code (IaC) containerization approach. The authors demonstrate the effectiveness of NSP in a pattern recognition task programmed with GENESIS, through a custom-built visual system, called RetNet(8 × 5,1) that uses biologically plausible Hodgkin-Huxley spiking neurons. We evaluate the pipeline by performing 54 simulations executed on-premise, at the Hasso Plattner Institute's (HPI) Future Service-Oriented Computing (SOC) Lab, and through the Amazon Web Services (AWS), the biggest public cloud service provider in the world. We report on the non-containerized and containerized execution with Docker, as well as present the cost per simulation in AWS. The results show that our neural simulation pipeline can reduce entry barriers to neural simulations, making them more practical and cost-effective.}, } @article {pmid37023063, year = {2023}, author = {Hou, YF and Ge, F and Dral, PO}, title = {Explicit Learning of Derivatives with the KREG and pKREG Models on the Example of Accurate Representation of Molecular Potential Energy Surfaces.}, journal = {Journal of chemical theory and computation}, volume = {19}, number = {8}, pages = {2369-2379}, doi = {10.1021/acs.jctc.2c01038}, pmid = {37023063}, issn = {1549-9626}, abstract = {The KREG and pKREG models were proven to enable accurate learning of multidimensional single-molecule surfaces of quantum chemical properties such as ground-state potential energies, excitation energies, and oscillator strengths. These models are based on kernel ridge regression (KRR) with the Gaussian kernel function and employ a relative-to-equilibrium (RE) global molecular descriptor, while pKREG is designed to enforce invariance under atom permutations with a permutationally invariant kernel. Here we extend these two models to also explicitly include the derivative information from the training data into the models, which greatly improves their accuracy. We demonstrate on the example of learning potential energies and energy gradients that KREG and pKREG models are better or on par with state-of-the-art machine learning models. We also found that in challenging cases both energy and energy gradient labels should be learned to properly model potential energy surfaces and learning only energies or gradients is insufficient. The models' open-source implementation is freely available in the MLatom package for general-purpose atomistic machine learning simulations, which can be also performed on the MLatom@XACS cloud computing service.}, } @article {pmid37018339, year = {2023}, author = {Yu, H and Zhang, Q and Yang, LT}, title = {An Edge-cloud-aided Private High-order Fuzzy C-means Clustering Algorithm in Smart Healthcare.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TCBB.2022.3233380}, pmid = {37018339}, issn = {1557-9964}, abstract = {Smart healthcare has emerged to provide healthcare services using data analysis techniques. Especially, clustering is playing an indispensable role in analyzing healthcare records. However, large multi-modal healthcare data imposes great challenges on clustering. Specifically, it is hard for traditional approaches to obtain desirable results for healthcare data clustering since they are not able to work for multi-modal data. This paper presents a new high-order multi-modal learning approach using multimodal deep learning and the Tucker decomposition (F- HoFCM). Furthermore, we propose an edge-cloud-aided private scheme to facilitate the clustering efficiency for its embedding in edge resources. Specifically, the computationally intensive tasks, such as parameter updating with high-order back propagation algorithm and clustering through high-order fuzzy c-means, are processed in a centralized location with cloud computing. The other tasks such as multi-modal data fusion and Tucker decomposition are performed at the edge resources. Since the feature fusion and Tucker decomposition are nonlinear operations, the cloud cannot obtain the raw data, thus protecting the privacy. Experimental results state that the presented approach produces significantly more accurate results than the existing high-order fuzzy c-means (HOFCM) on multi-modal healthcare datasets and furthermore the clustering efficiency are significantly improved by the developed edge-cloud-aided private healthcare system.}, } @article {pmid37015671, year = {2022}, author = {Disabato, S and Roveri, M}, title = {Tiny Machine Learning for Concept Drift.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TNNLS.2022.3229897}, pmid = {37015671}, issn = {2162-2388}, abstract = {Tiny machine learning (TML) is a new research area whose goal is to design machine and deep learning (DL) techniques able to operate in embedded systems and the Internet-of-Things (IoT) units, hence satisfying the severe technological constraints on memory, computation, and energy characterizing these pervasive devices. Interestingly, the related literature mainly focused on reducing the computational and memory demand of the inference phase of machine and deep learning models. At the same time, the training is typically assumed to be carried out in cloud or edge computing systems (due to the larger memory and computational requirements). This assumption results in TML solutions that might become obsolete when the process generating the data is affected by concept drift (e.g., due to periodicity or seasonality effect, faults or malfunctioning affecting sensors or actuators, or changes in the users' behavior), a common situation in real-world application scenarios. For the first time in the literature, this article introduces a TML for concept drift (TML-CD) solution based on deep learning feature extractors and a k -nearest neighbors (k -NNs) classifier integrating a hybrid adaptation module able to deal with concept drift affecting the data-generating process. This adaptation module continuously updates (in a passive way) the knowledge base of TML-CD and, at the same time, employs a change detection test (CDT) to inspect for changes (in an active way) to quickly adapt to concept drift by removing obsolete knowledge. Experimental results on both image and audio benchmarks show the effectiveness of the proposed solution, whilst the porting of TML-CD on three off-the-shelf micro-controller units (MCUs) shows the feasibility of what is proposed in real-world pervasive systems.}, } @article {pmid37012744, year = {2023}, author = {Wang, Y and Li, J and Wang, H and Yan, Z and Xu, Z and Li, C and Zhao, Z and Raza, SA}, title = {Non-contact wearable synchronous measurement method of electrocardiogram and seismocardiogram signals.}, journal = {The Review of scientific instruments}, volume = {94}, number = {3}, pages = {034101}, doi = {10.1063/5.0120722}, pmid = {37012744}, issn = {1089-7623}, mesh = {Humans ; *Artificial Intelligence ; Signal Processing, Computer-Assisted ; Electrocardiography/methods ; Heart ; *Wearable Electronic Devices ; }, abstract = {Cardiovascular disease is one of the leading threats to human lives and its fatality rate still rises gradually year by year. Driven by the development of advanced information technologies, such as big data, cloud computing, and artificial intelligence, remote/distributed cardiac healthcare is presenting a promising future. The traditional dynamic cardiac health monitoring method based on electrocardiogram (ECG) signals only has obvious deficiencies in comfortableness, informativeness, and accuracy under motion state. Therefore, a non-contact, compact, wearable, synchronous ECG and seismocardiogram (SCG) measuring system, based on a pair of capacitance coupling electrodes with ultra-high input impedance, and a high-resolution accelerometer were developed in this work, which can collect the ECG and SCG signals at the same point simultaneously through the multi-layer cloth. Meanwhile, the driven right leg electrode for ECG measurement is replaced by the AgCl fabric sewn to the outside of the cloth for realizing the total gel-free ECG measurement. Besides, synchronous ECG and SCG signals at multiple points on the chest surface were measured, and the recommended measuring points were given by their amplitude characteristics and the timing sequence correspondence analysis. Finally, the empirical mode decomposition algorithm was used to adaptively filter the motion artifacts within the ECG and SCG signals for measuring performance enhancement under motion states. The results demonstrate that the proposed non-contact, wearable cardiac health monitoring system can effectively collect ECG and SCG synchronously under various measuring situations.}, } @article {pmid37010950, year = {2023}, author = {Ansari, M and White, AD}, title = {Serverless Prediction of Peptide Properties with Recurrent Neural Networks.}, journal = {Journal of chemical information and modeling}, volume = {63}, number = {8}, pages = {2546-2553}, pmid = {37010950}, issn = {1549-960X}, support = {R35 GM137966/GM/NIGMS NIH HHS/United States ; }, mesh = {Reproducibility of Results ; *Neural Networks, Computer ; *Peptides ; Machine Learning ; Cloud Computing ; }, abstract = {We present three deep learning sequence-based prediction models for peptide properties including hemolysis, solubility, and resistance to nonspecific interactions that achieve comparable results to the state-of-the-art models. Our sequence-based solubility predictor, MahLooL, outperforms the current state-of-the-art methods for short peptides. These models are implemented as a static website without the use of a dedicated server or cloud computing. Web-based models like this allow for accessible and effective reproducibility. Most existing approaches rely on third-party servers that typically require upkeep and maintenance. Our predictive models do not require servers, require no installation of dependencies, and work across a range of devices. The specific architecture is bidirectional recurrent neural networks. This serverless approach is a demonstration of edge machine learning that removes the dependence on cloud providers. The code and models are accessible at https://github.com/ur-whitelab/peptide-dashboard.}, } @article {pmid37007983, year = {2023}, author = {Elouali, A and Mora Mora, H and Mora-Gimeno, FJ}, title = {Data transmission reduction formalization for cloud offloading-based IoT systems.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {48}, pmid = {37007983}, issn = {2192-113X}, abstract = {Computation offloading is the solution for IoT devices of limited resources and high-cost processing requirements. However, the network related issues such as latency and bandwidth consumption need to be considered. Data transmission reduction is one of the solutions aiming to solve network related problems by reducing the amount of data transmitted. In this paper, we propose a generalized formal data transmission reduction model independent of the system and the data type. This formalization is based on two main ideas: 1) Not sending data until a significant change occurs, 2) Sending a lighter size entity permitting the cloud to deduct the data captured by the IoT device without actually receiving it. This paper includes the mathematical representation of the model, general evaluation metrics formulas as well as detailed projections on real world use cases.}, } @article {pmid37007982, year = {2023}, author = {Zhong, L}, title = {A convolutional neural network based online teaching method using edge-cloud computing platform.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {49}, pmid = {37007982}, issn = {2192-113X}, abstract = {Teaching has become a complex essential tool for students' abilities, due to their different levels of learning and understanding. In the traditional offline teaching methods, dance teachers lack a target for students 'classroom teaching. Furthermore, teachers have limited time, so they cannot take full care of each student's learning needs according to their understanding and learning ability, which leads to the polarization of the learning effect. Because of this, this paper proposes an online teaching method based on Artificial Intelligence and edge calculation. In the first phase, standard teaching and student-recorded dance learning videos are conducted through the key frames extraction through a deep convolutional neural network. In the second phase, the extracted key frame images were then extracted for human key points using grid coding, and the fully convolutional neural network was used to predict the human posture. The guidance vector is used to correct the dance movements to achieve the purpose of online learning. The CNN model is distributed into two parts so that the training occurs at the cloud and prediction happens at the edge server. Moreover, the questionnaire was used to obtain the students' learning status, understand their difficulties in dance learning, and record the corresponding dance teaching videos to make up for their weak links. Finally, the edge-cloud computing platform is used to help the training model learn quickly form vast amount of collected data. Our experiments show that the cloud-edge platform helps to support new teaching forms, enhance the platform's overall application performance and intelligence level, and improve the online learning experience. The application of this paper can help dance students to achieve efficient learning.}, } @article {pmid37003227, year = {2023}, author = {Flores-de-Santiago, F and Rodríguez-Sobreyra, R and Álvarez-Sánchez, LF and Valderrama-Landeros, L and Amezcua, F and Flores-Verdugo, F}, title = {Understanding the natural expansion of white mangrove (Laguncularia racemosa) in an ephemeral inlet based on geomorphological analysis and remote sensing data.}, journal = {Journal of environmental management}, volume = {338}, number = {}, pages = {117820}, doi = {10.1016/j.jenvman.2023.117820}, pmid = {37003227}, issn = {1095-8630}, mesh = {*Bays ; *Combretaceae ; Forests ; *Remote Sensing Technology/methods ; Water ; *Wetlands ; }, abstract = {The interactions between local tides and river discharges are crucial in the processes related to the recruitment of mangrove propagules in estuarine systems. This investigation aimed to determine the causes of the recent natural recruitment and expansion of Laguncularia racemosa in mudflats within an ephemeral inlet in Mexico. We conducted a fluvial and coastal geomorphology assessment with spaceborne and UAV-based images. We deployed and recorded continuous data loggers in the estuarine system to assess water level and salinity. Depending on the available data, we used a combination of cloud-computing Google Earth Engine, UAV-Digital Surface Models, LiDAR, Google Earth images, and biophysical variables to monitor mangrove forests from 2005 to 2022. When the inlet is open, the estuarine system presents a full tidal range (∼1-1.5 m) with a strong salinity gradient (0-35 mS/cm), in contrast to the strong freshwater influence and minimal water level variability (<10 cm) that prevails for three months when the inlet is closed. Once the mouth of the river closes, there is considerable sediment accumulation, creating mudflat areas adjacent to the mangrove forests where Laguncularia racemosa propagules begin to establish under minimal water level variability and oligohaline conditions. After 16 years, the new forest expanded by 12.3 ha, presenting a very high density (10000 stems/ha), a considerable basal area (54-63 m[2]/ha), and a maximum canopy height of 15.8 m, which largely surpasses that of other semiarid Laguncularia racemosa forests within permanent open-inlet systems or even in ephemeral inlets with different hydrological conditions. Our study will help to understand the causes of natural Laguncularia racemosa recruitment in extremely dynamic systems.}, } @article {pmid37002322, year = {2023}, author = {Hassan, A and Elhoseny, M and Kayed, M}, title = {Hierarchical cloud architecture for identifying the bite of "Egyptian cobra" based on deep learning and quantum particle swarm optimization.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {5250}, pmid = {37002322}, issn = {2045-2322}, abstract = {One of the most dangerous snake species is the "Egyptian cobra" which can kill a man in only 15 min. This paper uses deep learning techniques to identify the Egyptian cobra bite in an accurate manner based on an image of the marks of the bites. We build a dataset consisting of 500 images of cobra bites marks and 600 images of marks of other species of snakes that exist in Egypt. We utilize techniques such as multi-task learning, transfer learning and data augmentation to boost the generalization and accuracy of our model. We have achieved 90.9% of accuracy. We must keep the availability and accuracy of our model as much as possible. So, we utilize cloud and edge computing techniques to enhance the availability of our model. We have achieved 90.9% of accuracy, which is considered as an efficient result, not 100%, so it is normal for the system to perform sometimes wrong classifications. So, we suggest to re-train our model with the wrong predictions, whereas the edge computing units, where the classifier task is positioned, resend the wrong predictions to the cloud model, where the training process occurs, to retrain the model. This enhances the accuracy to the best level after a small period and increases the dataset size. We use the quantum particle swarm optimization technique to determine the optimal required number of edge nodes.}, } @article {pmid36998174, year = {2023}, author = {Ru, J and Khan Mirzaei, M and Xue, J and Peng, X and Deng, L}, title = {ViroProfiler: a containerized bioinformatics pipeline for viral metagenomic data analysis.}, journal = {Gut microbes}, volume = {15}, number = {1}, pages = {2192522}, pmid = {36998174}, issn = {1949-0984}, mesh = {Software ; *Gastrointestinal Microbiome ; Reproducibility of Results ; Metagenome ; *Microbiota ; Metagenomics/methods ; Computational Biology/methods ; Data Analysis ; }, abstract = {Bacteriophages play central roles in the maintenance and function of most ecosystems by regulating bacterial communities. Yet, our understanding of their diversity remains limited due to the lack of robust bioinformatics standards. Here we present ViroProfiler, an in-silico workflow for analyzing shotgun viral metagenomic data. ViroProfiler can be executed on a local Linux computer or cloud computing environments. It uses the containerization technique to ensure computational reproducibility and facilitate collaborative research. ViroProfiler is freely available at https://github.com/deng-lab/viroprofiler.}, } @article {pmid36993557, year = {2023}, author = {Renton, AI and Dao, TT and Johnstone, T and Civier, O and Sullivan, RP and White, DJ and Lyons, P and Slade, BM and Abbott, DF and Amos, TJ and Bollmann, S and Botting, A and Campbell, MEJ and Chang, J and Close, TG and Eckstein, K and Egan, GF and Evas, S and Flandin, G and Garner, KG and Garrido, MI and Ghosh, SS and Grignard, M and Hannan, AJ and Huber, R and Kaczmarzyk, JR and Kasper, L and Kuhlmann, L and Lou, K and Mantilla-Ramos, YJ and Mattingley, JB and Morris, J and Narayanan, A and Pestilli, F and Puce, A and Ribeiro, FL and Rogasch, NC and Rorden, C and Schira, M and Shaw, TB and Sowman, PF and Spitz, G and Stewart, A and Ye, X and Zhu, JD and Hughes, ME and Narayanan, A and Bollmann, S}, title = {Neurodesk: An accessible, flexible, and portable data analysis environment for reproducible neuroimaging.}, journal = {Research square}, volume = {}, number = {}, pages = {}, pmid = {36993557}, support = {P41 EB019936/EB/NIBIB NIH HHS/United States ; R01 EB030896/EB/NIBIB NIH HHS/United States ; R01 MH126699/MH/NIMH NIH HHS/United States ; }, abstract = {Neuroimaging data analysis often requires purpose-built software, which can be challenging to install and may produce different results across computing environments. Beyond being a roadblock to neuroscientists, these issues of accessibility and portability can hamper the reproducibility of neuroimaging data analysis pipelines. Here, we introduce the Neurodesk platform, which harnesses software containers to support a comprehensive and growing suite of neuroimaging software (https://www.neurodesk.org/). Neurodesk includes a browser-accessible virtual desktop environment and a command line interface, mediating access to containerized neuroimaging software libraries on various computing platforms, including personal and high-performance computers, cloud computing and Jupyter Notebooks. This community-oriented, open-source platform enables a paradigm shift for neuroimaging data analysis, allowing for accessible, flexible, fully reproducible, and portable data analysis pipelines.}, } @article {pmid36991964, year = {2023}, author = {Alharbi, HA and Aldossary, M and Almutairi, J and Elgendy, IA}, title = {Energy-Aware and Secure Task Offloading for Multi-Tier Edge-Cloud Computing Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991964}, issn = {1424-8220}, abstract = {Nowadays, Unmanned Aerial Vehicle (UAV) devices and their services and applications are gaining popularity and attracting considerable attention in different fields of our daily life. Nevertheless, most of these applications and services require more powerful computational resources and energy, and their limited battery capacity and processing power make it difficult to run them on a single device. Edge-Cloud Computing (ECC) is emerging as a new paradigm to cope with the challenges of these applications, which moves computing resources to the edge of the network and remote cloud, thereby alleviating the overhead through task offloading. Even though ECC offers substantial benefits for these devices, the limited bandwidth condition in the case of simultaneous offloading via the same channel with increasing data transmission of these applications has not been adequately addressed. Moreover, protecting the data through transmission remains a significant concern that still needs to be addressed. Therefore, in this paper, to bypass the limited bandwidth and address the potential security threats challenge, a new compression, security, and energy-aware task offloading framework is proposed for the ECC system environment. Specifically, we first introduce an efficient layer of compression to smartly reduce the transmission data over the channel. In addition, to address the security issue, a new layer of security based on an Advanced Encryption Standard (AES) cryptographic technique is presented to protect offloaded and sensitive data from different vulnerabilities. Subsequently, task offloading, data compression, and security are jointly formulated as a mixed integer problem whose objective is to reduce the overall energy of the system under latency constraints. Finally, simulation results reveal that our model is scalable and can cause a significant reduction in energy consumption (i.e., 19%, 18%, 21%, 14.5%, 13.1% and 12%) with respect to other benchmarks (i.e., local, edge, cloud and further benchmark models).}, } @article {pmid36991820, year = {2023}, author = {Morkevičius, N and Liutkevičius, A and Venčkauskas, A}, title = {Multi-Objective Path Optimization in Fog Architectures Using the Particle Swarm Optimization Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991820}, issn = {1424-8220}, abstract = {IoT systems can successfully employ wireless sensor networks (WSNs) for data gathering and fog/edge computing for processing collected data and providing services. The proximity of edge devices to sensors improves latency, whereas cloud assets provide higher computational power when needed. Fog networks include various heterogeneous fog nodes and end-devices, some of which are mobile, such as vehicles, smartwatches, and cell phones, while others are static, such as traffic cameras. Therefore, some nodes in the fog network can be randomly organized, forming a self-organizing ad hoc structure. Moreover, fog nodes can have different resource constraints, such as energy, security, computational power, and latency. Therefore, two major problems arise in fog networks: ensuring optimal service (application) placement and determining the optimal path between the user end-device and the fog node that provides the services. Both problems require a simple and lightweight method that can rapidly identify a good solution using the constrained resources available in the fog nodes. In this paper, a novel two-stage multi-objective path optimization method is proposed that optimizes the data routing path between the end-device and fog node(s). A particle swarm optimization (PSO) method is used to determine the Pareto Frontier of alternative data paths, and then the analytical hierarchy process (AHP) is used to choose the best path alternative according to the application-specific preference matrix. The results show that the proposed method works with a wide range of objective functions that can be easily expanded. Moreover, the proposed method provides a whole set of alternative solutions and evaluates each of them, allowing us to choose the second- or third-best alternative if the first one is not suitable for some reason.}, } @article {pmid36991748, year = {2023}, author = {Cadenas, JM and Garrido, MC and Martínez-España, R}, title = {A Methodology Based on Machine Learning and Soft Computing to Design More Sustainable Agriculture Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991748}, issn = {1424-8220}, support = {2020-112675RB-C44//MCIN/AEI/ 10.13039/501100011033/ ; }, abstract = {Advances in new technologies are allowing any field of real life to benefit from using these ones. Among of them, we can highlight the IoT ecosystem making available large amounts of information, cloud computing allowing large computational capacities, and Machine Learning techniques together with the Soft Computing framework to incorporate intelligence. They constitute a powerful set of tools that allow us to define Decision Support Systems that improve decisions in a wide range of real-life problems. In this paper, we focus on the agricultural sector and the issue of sustainability. We propose a methodology that, starting from times series data provided by the IoT ecosystem, a preprocessing and modelling of the data based on machine learning techniques is carried out within the framework of Soft Computing. The obtained model will be able to carry out inferences in a given prediction horizon that allow the development of Decision Support Systems that can help the farmer. By way of illustration, the proposed methodology is applied to the specific problem of early frost prediction. With some specific scenarios validated by expert farmers in an agricultural cooperative, the benefits of the methodology are illustrated. The evaluation and validation show the effectiveness of the proposal.}, } @article {pmid36991663, year = {2023}, author = {Al-Jumaili, AHA and Muniyandi, RC and Hasan, MK and Paw, JKS and Singh, MJ}, title = {Big Data Analytics Using Cloud Computing Based Frameworks for Power Management Systems: Status, Constraints, and Future Recommendations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991663}, issn = {1424-8220}, support = {FRGS/1/2021/ICT07/UKM/02/1//National University of Malaysia/ ; }, abstract = {Traditional parallel computing for power management systems has prime challenges such as execution time, computational complexity, and efficiency like process time and delays in power system condition monitoring, particularly consumer power consumption, weather data, and power generation for detecting and predicting data mining in the centralized parallel processing and diagnosis. Due to these constraints, data management has become a critical research consideration and bottleneck. To cope with these constraints, cloud computing-based methodologies have been introduced for managing data efficiently in power management systems. This paper reviews the concept of cloud computing architecture that can meet the multi-level real-time requirements to improve monitoring and performance which is designed for different application scenarios for power system monitoring. Then, cloud computing solutions are discussed under the background of big data, and emerging parallel programming models such as Hadoop, Spark, and Storm are briefly described to analyze the advancement, constraints, and innovations. The key performance metrics of cloud computing applications such as core data sampling, modeling, and analyzing the competitiveness of big data was modeled by applying related hypotheses. Finally, it introduces a new design concept with cloud computing and eventually some recommendations focusing on cloud computing infrastructure, and methods for managing real-time big data in the power management system that solve the data mining challenges.}, } @article {pmid36991661, year = {2023}, author = {Gabriele, M and Brumana, R}, title = {Monitoring Land Degradation Dynamics to Support Landscape Restoration Actions in Remote Areas of the Mediterranean Basin (Murcia Region, Spain).}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991661}, issn = {1424-8220}, abstract = {This study aims to develop a workflow methodology for collecting substantial amounts of Earth Observation data to investigate the effectiveness of landscape restoration actions and support the implementation of the Above Ground Carbon Capture indicator of the Ecosystem Restoration Camps (ERC) Soil Framework. To achieve this objective, the study will utilize the Google Earth Engine API within R (rGEE) to monitor the Normalized Difference Vegetation Index (NDVI). The results of this study will provide a common scalable reference for ERC camps globally, with a specific focus on Camp Altiplano, the first European ERC located in Murcia, Southern Spain. The coding workflow has effectively acquired almost 12 TB of data for analyzing MODIS/006/MOD13Q1 NDVI over a 20-year span. Additionally, the average retrieval of image collections has yielded 120 GB of data for the COPERNICUS/S2_SR 2017 vegetation growing season and 350 GB of data for the COPERNICUS/S2_SR 2022 vegetation winter season. Based on these results, it is reasonable to asseverate that cloud computing platforms like GEE will enable the monitoring and documentation of regenerative techniques to achieve unprecedented levels. The findings will be shared on a predictive platform called Restor, which will contribute to the development of a global ecosystem restoration model.}, } @article {pmid36991641, year = {2023}, author = {S-Julián, R and Lacalle, I and Vaño, R and Boronat, F and Palau, CE}, title = {Self-* Capabilities of Cloud-Edge Nodes: A Research Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {6}, pages = {}, pmid = {36991641}, issn = {1424-8220}, support = {101069732//European Commission/ ; }, abstract = {Most recent edge and fog computing architectures aim at pushing cloud-native traits at the edge of the network, reducing latency, power consumption, and network overhead, allowing operations to be performed close to data sources. To manage these architectures in an autonomous way, systems that materialize in specific computing nodes must deploy self-* capabilities minimizing human intervention across the continuum of computing equipment. Nowadays, a systematic classification of such capabilities is missing, as well as an analysis on how those can be implemented. For a system owner in a continuum deployment, there is not a main reference publication to consult to determine what capabilities do exist and which are the sources to rely on. In this article, a literature review is conducted to analyze the self-* capabilities needed to achieve a self-* equipped nature in truly autonomous systems. The article aims to shed light on a potential uniting taxonomy in this heterogeneous field. In addition, the results provided include conclusions on why those aspects are too heterogeneously tackled, depend hugely on specific cases, and shed light on why there is not a clear reference architecture to guide on the matter of which traits to equip the nodes with.}, } @article {pmid36991190, year = {2023}, author = {Rao, M and Tang, H and Wu, J and Song, W and Zhang, M and Yin, W and Zhuo, Y and Kiani, F and Chen, B and Jiang, X and Liu, H and Chen, HY and Midya, R and Ye, F and Jiang, H and Wang, Z and Wu, M and Hu, M and Wang, H and Xia, Q and Ge, N and Li, J and Yang, JJ}, title = {Thousands of conductance levels in memristors integrated on CMOS.}, journal = {Nature}, volume = {615}, number = {7954}, pages = {823-829}, pmid = {36991190}, issn = {1476-4687}, support = {FA9550-19-1-0213//US Airforce Research Laboratory/ ; W911NF2120128//Army Research Office/ ; CMMI-2240407//National Science Foundation/ ; CMMI-1922206//National Science Foundation/ ; }, abstract = {Neural networks based on memristive devices[1-3] have the ability to improve throughput and energy efficiency for machine learning[4,5] and artificial intelligence[6], especially in edge applications[7-21]. Because training a neural network model from scratch is costly in terms of hardware resources, time and energy, it is impractical to do it individually on billions of memristive neural networks distributed at the edge. A practical approach would be to download the synaptic weights obtained from the cloud training and program them directly into memristors for the commercialization of edge applications. Some post-tuning in memristor conductance could be done afterwards or during applications to adapt to specific situations. Therefore, in neural network applications, memristors require high-precision programmability to guarantee uniform and accurate performance across a large number of memristive networks[22-28]. This requires many distinguishable conductance levels on each memristive device, not only laboratory-made devices but also devices fabricated in factories. Analog memristors with many conductance states also benefit other applications, such as neural network training, scientific computing and even 'mortal computing'[25,29,30]. Here we report 2,048 conductance levels achieved with memristors in fully integrated chips with 256 × 256 memristor arrays monolithically integrated on complementary metal-oxide-semiconductor (CMOS) circuits in a commercial foundry. We have identified the underlying physics that previously limited the number of conductance levels that could be achieved in memristors and developed electrical operation protocols to avoid such limitations. These results provide insights into the fundamental understanding of the microscopic picture of memristive switching as well as approaches to enable high-precision memristors for various applications. Fig. 1 HIGH-PRECISION MEMRISTOR FOR NEUROMORPHIC COMPUTING.: a, Proposed scheme of the large-scale application of memristive neural networks for edge computing. Neural network training is performed in the cloud. The obtained weights are downloaded and accurately programmed into a massive number of memristor arrays distributed at the edge, which imposes high-precision requirements on memristive devices. b, An eight-inch wafer with memristors fabricated by a commercial semiconductor manufacturer. c, High-resolution transmission electron microscopy image of the cross-section view of a memristor. Pt and Ta serve as the bottom electrode (BE) and top electrode (TE), respectively. Scale bars, 1 μm and 100 nm (inset). d, Magnification of the memristor material stack. Scale bar, 5 nm. e, As-programmed (blue) and after-denoising (red) currents of a memristor are read by a constant voltage (0.2 V). The denoising process eliminated the large-amplitude RTN observed in the as-programmed state (see Methods). f, Magnification of three nearest-neighbour states after denoising. The current of each state was read by a constant voltage (0.2 V). No large-amplitude RTN was observed, and all of the states can be clearly distinguished. g, An individual memristor on the chip was tuned into 2,048 resistance levels by high-resolution off-chip driving circuitry, and each resistance level was read by a d.c. voltage sweeping from 0 to 0.2 V. The target resistance was set from 50 µS to 4,144 µS with a 2-µS interval between neighbouring levels. All readings at 0.2 V are less than 1 µS from the target conductance. Bottom inset, magnification of the resistance levels. Top inset, experimental results of an entire 256 × 256 array programmed by its 6-bit on-chip circuitry into 64 32 × 32 blocks, and each block is programmed into one of the 64 conductance levels. Each of the 256 × 256 memristors has been previously switched over one million cycles, demonstrating the high endurance and robustness of the devices.}, } @article {pmid36990988, year = {2023}, author = {Kusunose, M and Muto, K}, title = {Public attitudes toward cloud computing and willingness to share personal health records (PHRs) and genome data for health care research in Japan.}, journal = {Human genome variation}, volume = {10}, number = {1}, pages = {11}, pmid = {36990988}, issn = {2054-345X}, support = {JP19km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP22ama221001//Japan Agency for Medical Research and Development (AMED)/ ; JP19km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP22ama221001//Japan Agency for Medical Research and Development (AMED)/ ; JP80745985//MEXT | Japan Society for the Promotion of Science (JSPS)/ ; JP80745985//MEXT | Japan Society for the Promotion of Science (JSPS)/ ; }, abstract = {Japan's government aims to promote the linkage of medical records, including medical genomic testing data and personal health records (PHRs), via cloud computing (the cloud). However, linking national medical records and using them for health care research can be controversial. Additionally, many ethical issues with using cloud networks with health care and genome data have been noted. However, no research has yet explored the Japanese public's opinions about their PHRs, including genome data, being shared for health care research or the use of the cloud for storing and analyzing such data. Therefore, we conducted a survey in March 2021 to clarify the public's attitudes toward sharing their PHRs, including genome data and using the cloud for health care research. We analyzed data to experimentally create digital health basic literacy scores (BLSs). Our results showed that the Japanese public had concerns about data sharing that overlapped with structural cloud computing issues. The effect of incentives on changes in participants' willingness to share data (WTSD) was limited. Instead, there could be a correlation between WTSD and BLSs. Finally, we argue that it is vital to consider not only researchers but also research participants as value cocreators in health care research conducted through the cloud to overcome both parties' vulnerability.}, } @article {pmid36977690, year = {2023}, author = {Rogers, DM and Agarwal, R and Vermaas, JV and Smith, MD and Rajeshwar, RT and Cooper, C and Sedova, A and Boehm, S and Baker, M and Glaser, J and Smith, JC}, title = {SARS-CoV2 billion-compound docking.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {173}, pmid = {36977690}, issn = {2052-4463}, support = {DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; DE-AC05-00OR22725//DOE | Office of Science (SC)/ ; }, mesh = {Humans ; *COVID-19 ; *Ligands ; *SARS-CoV-2 ; Molecular Docking Simulation ; }, abstract = {This dataset contains ligand conformations and docking scores for 1.4 billion molecules docked against 6 structural targets from SARS-CoV2, representing 5 unique proteins: MPro, NSP15, PLPro, RDRP, and the Spike protein. Docking was carried out using the AutoDock-GPU platform on the Summit supercomputer and Google Cloud. The docking procedure employed the Solis Wets search method to generate 20 independent ligand binding poses per compound. Each compound geometry was scored using the AutoDock free energy estimate, and rescored using RFScore v3 and DUD-E machine-learned rescoring models. Input protein structures are included, suitable for use by AutoDock-GPU and other docking programs. As the result of an exceptionally large docking campaign, this dataset represents a valuable resource for discovering trends across small molecule and protein binding sites, training AI models, and comparing to inhibitor compounds targeting SARS-CoV-2. The work also gives an example of how to organize and process data from ultra-large docking screens.}, } @article {pmid36970305, year = {2022}, author = {Christensen, JR and Golden, HE and Alexander, LC and Pickard, BR and Fritz, KM and Lane, CR and Weber, MH and Kwok, RM and Keefer, MN}, title = {Headwater streams and inland wetlands: Status and advancements of geospatial datasets and maps across the United States.}, journal = {Earth-science reviews}, volume = {235}, number = {}, pages = {1-24}, pmid = {36970305}, issn = {0012-8252}, support = {EPA999999/ImEPA/Intramural EPA/United States ; }, abstract = {Headwater streams and inland wetlands provide essential functions that support healthy watersheds and downstream waters. However, scientists and aquatic resource managers lack a comprehensive synthesis of national and state stream and wetland geospatial datasets and emerging technologies that can further improve these data. We conducted a review of existing United States (US) federal and state stream and wetland geospatial datasets, focusing on their spatial extent, permanence classifications, and current limitations. We also examined recent peer-reviewed literature for emerging methods that can potentially improve the estimation, representation, and integration of stream and wetland datasets. We found that federal and state datasets rely heavily on the US Geological Survey's National Hydrography Dataset for stream extent and duration information. Only eleven states (22%) had additional stream extent information and seven states (14%) provided additional duration information. Likewise, federal and state wetland datasets primarily use the US Fish and Wildlife Service's National Wetlands Inventory (NWI) Geospatial Dataset, with only two states using non-NWI datasets. Our synthesis revealed that LiDAR-based technologies hold promise for advancing stream and wetland mapping at limited spatial extents. While machine learning techniques may help to scale-up these LiDAR-derived estimates, challenges related to preprocessing and data workflows remain. High-resolution commercial imagery, supported by public imagery and cloud computing, may further aid characterization of the spatial and temporal dynamics of streams and wetlands, especially using multi-platform and multi-temporal machine learning approaches. Models integrating both stream and wetland dynamics are limited, and field-based efforts must remain a key component in developing improved headwater stream and wetland datasets. Continued financial and partnership support of existing databases is also needed to enhance mapping and inform water resources research and policy decisions.}, } @article {pmid36969371, year = {2023}, author = {Islam, MJ and Datta, R and Iqbal, A}, title = {Actual rating calculation of the zoom cloud meetings app using user reviews on google play store with sentiment annotation of BERT and hybridization of RNN and LSTM.}, journal = {Expert systems with applications}, volume = {223}, number = {}, pages = {119919}, pmid = {36969371}, issn = {0957-4174}, abstract = {The recent outbreaks of the COVID-19 forced people to work from home. All the educational institutes run their academic activities online. The online meeting app the "Zoom Cloud Meeting" provides the most entire supports for this purpose. For providing proper functionalities require in this situation of online supports the developers need the frequent release of new versions of the application. Which makes the chances to have lots of bugs during the release of new versions. To fix those bugs introduce developer needs users' feedback based on the new release of the application. But most of the time the ratings and reviews are created contraposition between them because of the users' inadvertent in giving ratings and reviews. And it has been the main problem to fix those bugs using user ratings for software developers. For this reason, we conduct this average rating calculation process based on the sentiment of user reviews to help software developers. We use BERT-based sentiment annotation to create unbiased datasets and hybridize RNN with LSTM to find calculated ratings based on the unbiased reviews dataset. Out of four models trained on four different datasets, we found promising performance in two datasets containing a necessarily large amount of unbiased reviews. The results show that the reviews have more positive sentiments than the actual ratings. Our results found an average of 3.60 stars rating, where the actual average rating found in dataset is 3.08 stars. We use reviews of more than 250 apps from the Google Play app store. The results of our can provide more promising if we can use a large dataset only containing the reviews of the Zoom Cloud Meeting app.}, } @article {pmid36967390, year = {2023}, author = {Camacho, C and Boratyn, GM and Joukov, V and Vera Alvarez, R and Madden, TL}, title = {ElasticBLAST: accelerating sequence search via cloud computing.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {117}, pmid = {36967390}, issn = {1471-2105}, mesh = {*Cloud Computing ; *Software ; Computational Biology/methods ; Databases, Factual ; Costs and Cost Analysis ; }, abstract = {BACKGROUND: Biomedical researchers use alignments produced by BLAST (Basic Local Alignment Search Tool) to categorize their query sequences. Producing such alignments is an essential bioinformatics task that is well suited for the cloud. The cloud can perform many calculations quickly as well as store and access large volumes of data. Bioinformaticians can also use it to collaborate with other researchers, sharing their results, datasets and even their pipelines on a common platform.

RESULTS: We present ElasticBLAST, a cloud native application to perform BLAST alignments in the cloud. ElasticBLAST can handle anywhere from a few to many thousands of queries and run the searches on thousands of virtual CPUs (if desired), deleting resources when it is done. It uses cloud native tools for orchestration and can request discounted instances, lowering cloud costs for users. It is supported on Amazon Web Services and Google Cloud Platform. It can search BLAST databases that are user provided or from the National Center for Biotechnology Information.

CONCLUSION: We show that ElasticBLAST is a useful application that can efficiently perform BLAST searches for the user in the cloud, demonstrating that with two examples. At the same time, it hides much of the complexity of working in the cloud, lowering the threshold to move work to the cloud.}, } @article {pmid36961920, year = {2023}, author = {Monteiro, MG and Pantani, D and Pinsky, I and Hernandes Rocha, TA}, title = {Using the Pan American Health Organization Digital Conversational Agent to Educate the Public on Alcohol Use and Health: Preliminary Analysis.}, journal = {JMIR formative research}, volume = {7}, number = {}, pages = {e43165}, pmid = {36961920}, issn = {2561-326X}, abstract = {BACKGROUND: There is widespread misinformation about the effects of alcohol consumption on health, which was amplified during the COVID-19 pandemic through social media and internet channels. Chatbots and conversational agents became an important piece of the World Health Organization (WHO) response during the COVID-19 pandemic to quickly disseminate evidence-based information related to COVID-19 and tobacco to the public. The Pan American Health Organization (PAHO) seized the opportunity to develop a conversational agent to talk about alcohol-related topics and therefore complement traditional forms of health education that have been promoted in the past.

OBJECTIVE: This study aimed to develop and deploy a digital conversational agent to interact with an unlimited number of users anonymously, 24 hours a day, about alcohol topics, including ways to reduce risks from drinking, that is accessible in several languages, at no cost, and through various devices.

METHODS: The content development was based on the latest scientific evidence on the impacts of alcohol on health, social norms about drinking, and data from the WHO and PAHO. The agent itself was developed through a nonexclusive license agreement with a private company (Soul Machines) and included Google Digital Flow ES as the natural language processing software and Amazon Web Services for cloud services. Another company was contracted to program all the conversations, following the technical advice of PAHO staff.

RESULTS: The conversational agent was named Pahola, and it was deployed on November 19, 2021, through the PAHO website after a launch event with high publicity. No identifiable data were used and all interactions were anonymous, and therefore, this was not considered research with human subjects. Pahola speaks in English, Spanish, and Portuguese and interacts anonymously with a potentially infinite number of users through various digital devices. Users were required to accept the terms and conditions to enable access to their camera and microphone to interact with Pahola. Pahola attracted good attention from the media and reached 1.6 million people, leading to 236,000 clicks on its landing page, mostly through mobile devices. Only 1532 users had a conversation after clicking to talk to Pahola. The average time users spent talking to Pahola was 5 minutes. Major dropouts were observed in different steps of the conversation flow. Some questions asked by users were not anticipated during programming and could not be answered.

CONCLUSIONS: Our findings showed several limitations to using a conversational agent for alcohol education to the general public. Improvements are needed to expand the content to make it more meaningful and engaging to the public. The potential of chatbots to educate the public on alcohol-related topics seems enormous but requires a long-term investment of resources and research to be useful and reach many more people.}, } @article {pmid36958108, year = {2023}, author = {Menghani, RR and Das, A and Kraft, RH}, title = {A sensor-enabled cloud-based computing platform for computational brain biomechanics.}, journal = {Computer methods and programs in biomedicine}, volume = {233}, number = {}, pages = {107470}, doi = {10.1016/j.cmpb.2023.107470}, pmid = {36958108}, issn = {1872-7565}, mesh = {*Cloud Computing ; Biomechanical Phenomena ; *Head ; Brain/physiology ; Software ; }, abstract = {BACKGROUND AND OBJECTIVES: Driven by the risk of repetitive head trauma, sensors have been integrated into mouthguards to measure head impacts in contact sports and military activities. These wearable devices, referred to as "instrumented" or "smart" mouthguards are being actively developed by various research groups and organizations. These instrumented mouthguards provide an opportunity to further study and understand the brain biomechanics due to impact. In this study, we present a brain modeling service that can use information from these sensors to predict brain injury metrics in an automated fashion.

METHODS: We have built a brain modeling platform using several of Amazon's Web Services (AWS) to enable cloud computing and scalability. We use a custom-built cloud-based finite element modeling code to compute the physics-based nonlinear response of the intracranial brain tissue and provide a frontend web application and an application programming interface for groups working on head impact sensor technology to include simulated injury predictions into their research pipeline.

RESULTS: The platform results have been validated against experimental data available in literature for brain-skull relative displacements, brain strains and intracranial pressure. The parallel processing capability of the platform has also been tested and verified. We also studied the accuracy of the custom head surfaces generated by Avatar 3D.

CONCLUSION: We present a validated cloud-based computational brain modeling platform that uses sensor data as input for numerical brain models and outputs a quantitative description of brain tissue strains and injury metrics. The platform is expected to generate transparent, reproducible, and traceable brain computing results.}, } @article {pmid36950362, year = {2023}, author = {Gonzalez, EM and Zarei, A and Hendler, N and Simmons, T and Zarei, A and Demieville, J and Strand, R and Rozzi, B and Calleja, S and Ellingson, H and Cosi, M and Davey, S and Lavelle, DO and Truco, MJ and Swetnam, TL and Merchant, N and Michelmore, RW and Lyons, E and Pauli, D}, title = {PhytoOracle: Scalable, modular phenomics data processing pipelines.}, journal = {Frontiers in plant science}, volume = {14}, number = {}, pages = {1112973}, pmid = {36950362}, issn = {1664-462X}, abstract = {As phenomics data volume and dimensionality increase due to advancements in sensor technology, there is an urgent need to develop and implement scalable data processing pipelines. Current phenomics data processing pipelines lack modularity, extensibility, and processing distribution across sensor modalities and phenotyping platforms. To address these challenges, we developed PhytoOracle (PO), a suite of modular, scalable pipelines for processing large volumes of field phenomics RGB, thermal, PSII chlorophyll fluorescence 2D images, and 3D point clouds. PhytoOracle aims to (i) improve data processing efficiency; (ii) provide an extensible, reproducible computing framework; and (iii) enable data fusion of multi-modal phenomics data. PhytoOracle integrates open-source distributed computing frameworks for parallel processing on high-performance computing, cloud, and local computing environments. Each pipeline component is available as a standalone container, providing transferability, extensibility, and reproducibility. The PO pipeline extracts and associates individual plant traits across sensor modalities and collection time points, representing a unique multi-system approach to addressing the genotype-phenotype gap. To date, PO supports lettuce and sorghum phenotypic trait extraction, with a goal of widening the range of supported species in the future. At the maximum number of cores tested in this study (1,024 cores), PO processing times were: 235 minutes for 9,270 RGB images (140.7 GB), 235 minutes for 9,270 thermal images (5.4 GB), and 13 minutes for 39,678 PSII images (86.2 GB). These processing times represent end-to-end processing, from raw data to fully processed numerical phenotypic trait data. Repeatability values of 0.39-0.95 (bounding area), 0.81-0.95 (axis-aligned bounding volume), 0.79-0.94 (oriented bounding volume), 0.83-0.95 (plant height), and 0.81-0.95 (number of points) were observed in Field Scanalyzer data. We also show the ability of PO to process drone data with a repeatability of 0.55-0.95 (bounding area).}, } @article {pmid36949901, year = {2023}, author = {Cossío, F and Schurz, H and Engström, M and Barck-Holst, C and Tsirikoglou, A and Lundström, C and Gustafsson, H and Smith, K and Zackrisson, S and Strand, F}, title = {VAI-B: a multicenter platform for the external validation of artificial intelligence algorithms in breast imaging.}, journal = {Journal of medical imaging (Bellingham, Wash.)}, volume = {10}, number = {6}, pages = {061404}, pmid = {36949901}, issn = {2329-4302}, abstract = {PURPOSE: Multiple vendors are currently offering artificial intelligence (AI) computer-aided systems for triage detection, diagnosis, and risk prediction of breast cancer based on screening mammography. There is an imminent need to establish validation platforms that enable fair and transparent testing of these systems against external data.

APPROACH: We developed validation of artificial intelligence for breast imaging (VAI-B), a platform for independent validation of AI algorithms in breast imaging. The platform is a hybrid solution, with one part implemented in the cloud and another in an on-premises environment at Karolinska Institute. Cloud services provide the flexibility of scaling the computing power during inference time, while secure on-premises clinical data storage preserves their privacy. A MongoDB database and a python package were developed to store and manage the data on-premises. VAI-B requires four data components: radiological images, AI inferences, radiologist assessments, and cancer outcomes.

RESULTS: To pilot test VAI-B, we defined a case-control population based on 8080 patients diagnosed with breast cancer and 36,339 healthy women based on the Swedish national quality registry for breast cancer. Images and radiological assessments from more than 100,000 mammography examinations were extracted from hospitals in three regions of Sweden. The images were processed by AI systems from three vendors in a virtual private cloud to produce abnormality scores related to signs of cancer in the images. A total of 105,706 examinations have been processed and stored in the database.

CONCLUSIONS: We have created a platform that will allow downstream evaluation of AI systems for breast cancer detection, which enables faster development cycles for participating vendors and safer AI adoption for participating hospitals. The platform was designed to be scalable and ready to be expanded should a new vendor want to evaluate their system or should a new hospital wish to obtain an evaluation of different AI systems on their images.}, } @article {pmid36947346, year = {2023}, author = {Abler, D and Schaer, R and Oreiller, V and Verma, H and Reichenbach, J and Aidonopoulos, O and Evéquoz, F and Jreige, M and Prior, JO and Depeursinge, A}, title = {QuantImage v2: a comprehensive and integrated physician-centered cloud platform for radiomics and machine learning research.}, journal = {European radiology experimental}, volume = {7}, number = {1}, pages = {16}, pmid = {36947346}, issn = {2509-9280}, support = {205320/179069//Schweizerischer Nationalfonds zur Förderung der Wissenschaftlichen Forschung/ ; IMAGINE//Swiss Personalized Health Network (SPHN)/ ; MSXplain//Hasler Stiftung/ ; EPICS//Hasler Stiftung/ ; QA4IQI//Swiss Personalized Health Network (SPHN)/ ; }, mesh = {*Radiology/instrumentation/methods ; *Computational Biology ; *Cloud Computing ; Research ; Software ; Models, Theoretical ; Forecasting ; Carcinoma/diagnostic imaging ; Lung Neoplasms/diagnostic imaging ; Humans ; Machine Learning ; }, abstract = {BACKGROUND: Radiomics, the field of image-based computational medical biomarker research, has experienced rapid growth over the past decade due to its potential to revolutionize the development of personalized decision support models. However, despite its research momentum and important advances toward methodological standardization, the translation of radiomics prediction models into clinical practice only progresses slowly. The lack of physicians leading the development of radiomics models and insufficient integration of radiomics tools in the clinical workflow contributes to this slow uptake.

METHODS: We propose a physician-centered vision of radiomics research and derive minimal functional requirements for radiomics research software to support this vision. Free-to-access radiomics tools and frameworks were reviewed to identify best practices and reveal the shortcomings of existing software solutions to optimally support physician-driven radiomics research in a clinical environment.

RESULTS: Support for user-friendly development and evaluation of radiomics prediction models via machine learning was found to be missing in most tools. QuantImage v2 (QI2) was designed and implemented to address these shortcomings. QI2 relies on well-established existing tools and open-source libraries to realize and concretely demonstrate the potential of a one-stop tool for physician-driven radiomics research. It provides web-based access to cohort management, feature extraction, and visualization and supports "no-code" development and evaluation of machine learning models against patient-specific outcome data.

CONCLUSIONS: QI2 fills a gap in the radiomics software landscape by enabling "no-code" radiomics research, including model validation, in a clinical environment. Further information about QI2, a public instance of the system, and its source code is available at https://medgift.github.io/quantimage-v2-info/ . Key points As domain experts, physicians play a key role in the development of radiomics models. Existing software solutions do not support physician-driven research optimally. QuantImage v2 implements a physician-centered vision for radiomics research. QuantImage v2 is a web-based, "no-code" radiomics research platform.}, } @article {pmid36944981, year = {2023}, author = {Varesio, C and De Giorgis, V and Veggiotti, P and Nardocci, N and Granata, T and Ragona, F and Pasca, L and Mensi, MM and Borgatti, R and Olivotto, S and Previtali, R and Riva, A and Mancardi, MM and Striano, P and Cavallin, M and Guerrini, R and Operto, FF and Pizzolato, A and Di Maulo, R and Martino, F and Lodi, A and Marini, C}, title = {GLUT1-DS Italian registry: past, present, and future: a useful tool for rare disorders.}, journal = {Orphanet journal of rare diseases}, volume = {18}, number = {1}, pages = {63}, pmid = {36944981}, issn = {1750-1172}, mesh = {Female ; Humans ; Male ; *Glucose Transporter Type 1/deficiency ; Italy ; Prospective Studies ; *Rare Diseases ; Registries ; Retrospective Studies ; Infant ; }, abstract = {BACKGROUND: GLUT1 deficiency syndrome is a rare, genetically determined neurological disorder for which Ketogenic Dietary Treatment represents the gold standard and lifelong treatment. Patient registries are powerful tools providing insights and real-world data on rare diseases.

OBJECTIVE: To describe the implementation of a national web-based registry for GLUT1-DS.

METHODS: This is a retrospective and prospective, multicenter, observational registry developed in collaboration with the Italian GLUT1-DS association and based on an innovative, flexible and configurable cloud computing technology platform, structured according to the most rigorous requirements for the management of patient's sensitive data. The Glut1 Registry collects baseline and follow-up data on the patient's demographics, history, symptoms, genotype, clinical, and instrumental evaluations and therapies.

RESULTS: Five Centers in Italy joined the registry, and two more Centers are currently joining. In the first two years of running, data from 67 patients (40 females and 27 males) have been collected. Age at symptom onset was within the first year of life in most (40, 60%) patients. The diagnosis was formulated in infancy in almost half of the cases (34, 51%). Symptoms at onset were mainly paroxysmal (mostly epileptic seizure and paroxysmal ocular movement disorder) or mixed paroxysmal and fixed symptoms (mostly psychomotor delay). Most patients (53, 79%) are currently under Ketogenic dietary treatments.

CONCLUSIONS: We describe the principles behind the design, development, and deployment of the web-based nationwide GLUT1-DS registry. It represents a stepping stone towards a more comprehensive understanding of the disease from onset to adulthood. It also represents a virtuous model from a technical, legal, and organizational point of view, thus representing a possible paradigmatic example for other rare disease registry implementation.}, } @article {pmid36937654, year = {2023}, author = {Selvarajan, S and Srivastava, G and Khadidos, AO and Khadidos, AO and Baza, M and Alshehri, A and Lin, JC}, title = {An artificial intelligence lightweight blockchain security model for security and privacy in IIoT systems.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {38}, pmid = {36937654}, issn = {2192-113X}, abstract = {The Industrial Internet of Things (IIoT) promises to deliver innovative business models across multiple domains by providing ubiquitous connectivity, intelligent data, predictive analytics, and decision-making systems for improved market performance. However, traditional IIoT architectures are highly susceptible to many security vulnerabilities and network intrusions, which bring challenges such as lack of privacy, integrity, trust, and centralization. This research aims to implement an Artificial Intelligence-based Lightweight Blockchain Security Model (AILBSM) to ensure privacy and security of IIoT systems. This novel model is meant to address issues that can occur with security and privacy when dealing with Cloud-based IIoT systems that handle data in the Cloud or on the Edge of Networks (on-device). The novel contribution of this paper is that it combines the advantages of both lightweight blockchain and Convivial Optimized Sprinter Neural Network (COSNN) based AI mechanisms with simplified and improved security operations. Here, the significant impact of attacks is reduced by transforming features into encoded data using an Authentic Intrinsic Analysis (AIA) model. Extensive experiments are conducted to validate this system using various attack datasets. In addition, the results of privacy protection and AI mechanisms are evaluated separately and compared using various indicators. By using the proposed AILBSM framework, the execution time is minimized to 0.6 seconds, the overall classification accuracy is improved to 99.8%, and detection performance is increased to 99.7%. Due to the inclusion of auto-encoder based transformation and blockchain authentication, the anomaly detection performance of the proposed model is highly improved, when compared to other techniques.}, } @article {pmid36937168, year = {2023}, author = {Sadasivan, H and Maric, M and Dawson, E and Iyer, V and Israeli, J and Narayanasamy, S}, title = {Accelerating Minimap2 for Accurate Long Read Alignment on GPUs.}, journal = {Journal of biotechnology and biomedicine}, volume = {6}, number = {1}, pages = {13-23}, pmid = {36937168}, issn = {2642-9128}, support = {R01 HL144125/HL/NHLBI NIH HHS/United States ; }, abstract = {Long read sequencing technology is becoming increasingly popular for Precision Medicine applications like Whole Genome Sequencing (WGS) and microbial abundance estimation. Minimap2 is the state-of-the-art aligner and mapper used by the leading long read sequencing technologies, today. However, Minimap2 on CPUs is very slow for long noisy reads. ~60-70% of the run-time on a CPU comes from the highly sequential chaining step in Minimap2. On the other hand, most Point-of-Care computational workflows in long read sequencing use Graphics Processing Units (GPUs). We present minimap2-accelerated (mm2-ax), a heterogeneous design for sequence mapping and alignment where minimap2's compute intensive chaining step is sped up on the GPU and demonstrate its time and cost benefits. We extract better intra-read parallelism from chaining without losing mapping accuracy by forward transforming Minimap2's chaining algorithm. Moreover, we better utilize the high memory available on modern cloud instances apart from better workload balancing, data locality and minimal branch divergence on the GPU. We show mm2-ax on an NVIDIA A100 GPU improves the chaining step with 5.41 - 2.57X speedup and 4.07 - 1.93X speedup : costup over the fastest version of Minimap2, mm2-fast, benchmarked on a Google Cloud Platform instance of 30 SIMD cores.}, } @article {pmid36936667, year = {2023}, author = {Namoun, A and Tufail, A and Nawas, W and BenRhouma, O and Alshanqiti, A}, title = {A Systematic Literature Review on Service Composition for People with Disabilities: Taxonomies, Solutions, and Open Research Challenges.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {5934548}, pmid = {36936667}, issn = {1687-5273}, mesh = {Humans ; *Disabled Persons ; *Self-Help Devices ; }, abstract = {Integrating smart heterogeneous objects, IoT devices, data sources, and software services to produce new business processes and functionalities continues to attract considerable attention from the research community due to its unraveled advantages, including reusability, adaptation, distribution, and pervasiveness. However, the exploitation of service-oriented computing technologies (e.g., SOC, SOA, and microservice architectures) by people with special needs is underexplored and often overlooked. Furthermore, the existing challenges in this area are yet to be identified clearly. This research study presents a rigorous literature survey of the recent advances in service-oriented composition approaches and solutions for disabled people, their domains of application, and the major challenges, covering studies published between January 2010 and October 2022. To this end, we applied the systematic literature review (SLR) methodology to retrieve and collate only the articles presenting and discussing service composition solutions tailored to produce digitally accessible services for consumption by people who suffer from an impairment or loss of some physical or mental functions. We searched six renowned bibliographic databases, particularly IEEE Xplore, Web of Science, Springer Link, ACM Library, ScienceDirect, and Google Scholar, to synthesize a final pool of 38 related articles. Our survey contributes a comprehensive taxonomy of service composition solutions, techniques, and practices that are utilized to create assistive technologies and services. The seven-facet taxonomy helps researchers and practitioners to quickly understand and analyze the fundamental conceptualizations and characteristics of accessible service composition for people with disabilities. Key findings showed that services are fused to assist disabled persons to carry out their daily activities, mainly in smart homes and ambient intelligent environments. Despite the emergence of immersive technologies (e.g., wearable computing), user-service interactions are enabled primarily through tactile and speech modalities. Service descriptions mainly incorporate functional features (e.g., performance, latency, and cost) of service quality, largely ignoring accessibility features. Moreover, the outstanding research problems revolve around (1) the unavailability of assistive services datasets, (2) the underspecification of accessibility aspects of disabilities, (3) the weak adoption of accessible and universal design practices, (4) the abstraction of service composition approaches, and (5) the rare experimental testing of composition approaches with disabled users. We conclude our survey with a set of guidelines to realize effective assistive service composition in IoT and cloud environments. Researchers and practitioners are advised to create assistive services that support the social relationships of disabled users and model their accessibility needs as part of the quality of service (QoS). Moreover, they should exploit AI/ML models to address the evolving requirements of disabled users in their unique environments. Furthermore, weaknesses of service composition solutions and research challenges are exposed as notable opportunities for future research.}, } @article {pmid36923109, year = {2023}, author = {Wu, H}, title = {Sharing and Cooperation of Improved Cross-Entropy Optimization Algorithm in Telemedicine Multimedia Information Processing.}, journal = {International journal of telemedicine and applications}, volume = {2023}, number = {}, pages = {7353489}, pmid = {36923109}, issn = {1687-6415}, abstract = {In order to improve the efficiency of medical multimedia information sharing, this paper combines cloud computing technology and SOA (service-oriented architecture) technology to build a medical multimedia information sharing system. Building a medical information sharing platform requires integrating information resources stored in information systems of medical institutions and nonmedical information systems related to medical information and forming a huge resource pool. It is important to mine and analyze the information resources in the resource pool to realize the sharing and interaction of medical information. To this end, this paper proposes a gain-adaptive control algorithm with online adjustable parameters and investigates the extension of the mutual entropy optimization algorithm in the control domain and its integrated processing capability in the process of medical multimedia information processing. In addition, this paper constructs a medical multimedia information sharing and collaboration platform with medical multimedia information sharing and telemedicine as the core and verifies the effectiveness of the platform through experiments. The simulation results and comparison results with other systems prove that the system in this paper can realize fast data processing, retrieve and analyze massive data, and meet the demand of remote intelligent diagnosis under the premise of safety and stability. Meanwhile, the system in this paper can help hospitals achieve fast and accurate diagnosis, which has strong theoretical and practical values.}, } @article {pmid36914133, year = {2023}, author = {Aman, MA and Chu, HJ}, title = {Long-term river extent dynamics and transition detection using remote sensing: Case studies of Mekong and Ganga River.}, journal = {The Science of the total environment}, volume = {876}, number = {}, pages = {162774}, doi = {10.1016/j.scitotenv.2023.162774}, pmid = {36914133}, issn = {1879-1026}, abstract = {River dynamics are currently comprehensively studied at either a bankline or reach-scale level. Monitoring large-scale and long-term river extent dynamics provides fundamental insights relevant to the impact of climatic factors and anthropogenic activities on fluvial geomorphology. This study analyzed the two most populous rivers, Ganga and Mekong, to understand the river extent dynamics using 32 years of Landsat satellite data (1990-2022) in a cloud computing platform. This study categorizes river dynamics and transitions using the combination of pixel-wise water frequency and temporal trends. This approach can demarcate the river channel stability, areas affected by erosion and sedimentation, and the seasonal transitions in the river. The results illustrate that the Ganga river channel is found to be relatively unstable and very prone to meandering and migration as almost 40 % of the river channel has been altered in the past 32 years. The seasonal transitions, such as lost seasonal and seasonal to permanent changes are more prominent in the Ganga river, and the dominance of meandering and sedimentation in the lower course is also illustrated. In contrast, the Mekong river has a more stable course with erosion and sedimentation observed at sparse locations in the lower course. However, the lost seasonal and seasonal to permanent changes are also dominant in the Mekong river. Since 1990, Ganga and Mekong rivers have lost approximately 13.3 % and 4.7 % of their seasonal water respectively, as compared to the other transitions and categories. Factors such as climate change, floods, and man-made reservoirs could all be critical in triggering these morphological changes.}, } @article {pmid36913423, year = {2023}, author = {Paulraj, D and Sethukarasi, T and Neelakandan, S and Prakash, M and Baburaj, E}, title = {An Efficient Hybrid Job Scheduling Optimization (EHJSO) approach to enhance resource search using Cuckoo and Grey Wolf Job Optimization for cloud environment.}, journal = {PloS one}, volume = {18}, number = {3}, pages = {e0282600}, pmid = {36913423}, issn = {1932-6203}, mesh = {*Software ; *Algorithms ; Cloud Computing ; Internet ; }, abstract = {Cloud computing has now evolved as an unavoidable technology in the fields of finance, education, internet business, and nearly all organisations. The cloud resources are practically accessible to cloud users over the internet to accomplish the desired task of the cloud users. The effectiveness and efficacy of cloud computing services depend on the tasks that the cloud users submit and the time taken to complete the task as well. By optimising resource allocation and utilisation, task scheduling is crucial to enhancing the effectiveness and performance of a cloud system. In this context, cloud computing offers a wide range of advantages, such as cost savings, security, flexibility, mobility, quality control, disaster recovery, automatic software upgrades, and sustainability. According to a recent research survey, more and more tech-savvy companies and industry executives are recognize and utilize the advantages of the Cloud computing. Hence, as the number of users of the Cloud increases, so did the need to regulate the resource allocation as well. However, the scheduling of jobs in the cloud necessitates a smart and fast algorithm that can discover the resources that are accessible and schedule the jobs that are requested by different users. Consequently, for better resource allocation and job scheduling, a fast, efficient, tolerable job scheduling algorithm is required. Efficient Hybrid Job Scheduling Optimization (EHJSO) utilises Cuckoo Search Optimization and Grey Wolf Job Optimization (GWO). Due to some cuckoo species' obligate brood parasitism (laying eggs in other species' nests), the Cuckoo search optimization approach was developed. Grey wolf optimization (GWO) is a population-oriented AI system inspired by grey wolf social structure and hunting strategies. Make span, computation time, fitness, iteration-based performance, and success rate were utilised to compare previous studies. Experiments show that the recommended method is superior.}, } @article {pmid36910722, year = {2023}, author = {Yang, M and Ge, C and Zhao, X and Kou, H}, title = {FSPLO: a fast sensor placement location optimization method for cloud-aided inspection of smart buildings.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {31}, pmid = {36910722}, issn = {2192-113X}, abstract = {With the awakening of health awareness, people are raising a series of health-related requirements for the buildings they live in, with a view to improving their living conditions. In this context, BIM (Building Information Modeling) makes full use of cutting-edge theories and technologies in many domains such as health, environment, and information technology to provide a new way for engineers to design and build various healthy and green buildings. Specifically, sensors are playing an important role in achieving smart building goals by monitoring the surroundings of buildings, objects and people with the help of cloud computing technology. In addition, it is necessary to quickly determine the optimal sensor placement to save energy and minimize the number of sensors for a building, which is a de-trial task for the cloud platform due to the limited number of sensors available and massive candidate locations for each sensor. In this paper, we propose a Fast Sensor Placement Location Optimization approach (FSPLO) to solve the BIM problem in cloud-aided smart buildings. In particular, we quickly filter out the repeated candidate locations of sensors in FSPLO using Locality Sensitive Hashing (LSH) techniques to maintain only a small number of optimized locations for deploying sensors around buildings. In this way, we can significantly reduce the number of sensors used for health and green buildings. Finally, a set of simulation experiments demonstrates the excellent performance of our proposed FSPLO method.}, } @article {pmid36904959, year = {2023}, author = {Salat, L and Davis, M and Khan, N}, title = {DNS Tunnelling, Exfiltration and Detection over Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904959}, issn = {1424-8220}, abstract = {The domain name system (DNS) protocol is fundamental to the operation of the internet, however, in recent years various methodologies have been developed that enable DNS attacks on organisations. In the last few years, the increased use of cloud services by organisations has created further security challenges as cyber criminals use numerous methodologies to exploit cloud services, configurations and the DNS protocol. In this paper, two different DNS tunnelling methods, Iodine and DNScat, have been conducted in the cloud environment (Google and AWS) and positive results of exfiltration have been achieved under different firewall configurations. Detection of malicious use of DNS protocol can be a challenge for organisations with limited cybersecurity support and expertise. In this study, various DNS tunnelling detection techniques were utilised in a cloud environment to create an effective monitoring system with a reliable detection rate, low implementation cost, and ease of use for organisations with limited detection capabilities. The Elastic stack (an open-source framework) was used to configure a DNS monitoring system and to analyse the collected DNS logs. Furthermore, payload and traffic analysis techniques were implemented to identify different tunnelling methods. This cloud-based monitoring system offers various detection techniques that can be used for monitoring DNS activities of any network especially accessible to small organisations. Moreover, the Elastic stack is open-source and it has no limitation with regards to the data that can be uploaded daily.}, } @article {pmid36904927, year = {2023}, author = {Saban, M and Bekkour, M and Amdaouch, I and El Gueri, J and Ait Ahmed, B and Chaari, MZ and Ruiz-Alzola, J and Rosado-Muñoz, A and Aghzout, O}, title = {A Smart Agricultural System Based on PLC and a Cloud Computing Web Application Using LoRa and LoRaWan.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904927}, issn = {1424-8220}, abstract = {The increasing challenges of agricultural processes and the growing demand for food globally are driving the industrial agriculture sector to adopt the concept of 'smart farming'. Smart farming systems, with their real-time management and high level of automation, can greatly improve productivity, food safety, and efficiency in the agri-food supply chain. This paper presents a customized smart farming system that uses a low-cost, low-power, and wide-range wireless sensor network based on Internet of Things (IoT) and Long Range (LoRa) technologies. In this system, LoRa connectivity is integrated with existing Programmable Logic Controllers (PLCs), which are commonly used in industry and farming to control multiple processes, devices, and machinery through the Simatic IOT2040. The system also includes a newly developed web-based monitoring application hosted on a cloud server, which processes data collected from the farm environment and allows for remote visualization and control of all connected devices. A Telegram bot is included for automated communication with users through this mobile messaging app. The proposed network structure has been tested, and the path loss in the wireless LoRa is evaluated.}, } @article {pmid36904909, year = {2023}, author = {Lin, HY and Tsai, TT and Ting, PY and Fan, YR}, title = {Identity-Based Proxy Re-Encryption Scheme Using Fog Computing and Anonymous Key Generation.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904909}, issn = {1424-8220}, support = {MOST 110-2221-E-019-041-MY3//Ministry of Science and Technology of Republic of China/ ; }, abstract = {In the fog computing architecture, a fog is a node closer to clients and responsible for responding to users' requests as well as forwarding messages to clouds. In some medical applications such as the remote healthcare, a sensor of patients will first send encrypted data of sensed information to a nearby fog such that the fog acting as a re-encryption proxy could generate a re-encrypted ciphertext designated for requested data users in the cloud. Specifically, a data user can request access to cloud ciphertexts by sending a query to the fog node that will forward this query to the corresponding data owner who preserves the right to grant or deny the permission to access his/her data. When the access request is granted, the fog node will obtain a unique re-encryption key for carrying out the re-encryption process. Although some previous concepts have been proposed to fulfill these application requirements, they either have known security flaws or incur higher computational complexity. In this work, we present an identity-based proxy re-encryption scheme on the basis of the fog computing architecture. Our identity-based mechanism uses public channels for key distribution and avoids the troublesome problem of key escrow. We also formally prove that the proposed protocol is secure in the IND-PrID-CPA notion. Furthermore, we show that our work exhibits better performance in terms of computational complexity.}, } @article {pmid36904869, year = {2023}, author = {Lin, HY}, title = {Secure Data Transfer Based on a Multi-Level Blockchain for Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904869}, issn = {1424-8220}, abstract = {Because of the decentralized trait of the blockchain and the Internet of vehicles, both are very suitable for the architecture of the other. This study proposes a multi-level blockchain framework to secure information security on the Internet of vehicles. The main motivation of this study is to propose a new transaction block and ensure the identity of traders and the non-repudiation of transactions through the elliptic curve digital signature algorithm ECDSA. The designed multi-level blockchain architecture distributes the operations within the intra_cluster blockchain and the inter_cluster blockchain to improve the efficiency of the entire block. On the cloud computing platform, we exploit the threshold key management protocol, and the system can recover the system key as long as the threshold partial key is collected. This avoids the occurrence of PKI single-point failure. Thus, the proposed architecture ensures the security of OBU-RSU-BS-VM. The proposed multi-level blockchain framework consists of a block, intra-cluster blockchain and inter-cluster blockchain. The roadside unit RSU is responsible for the communication of vehicles in the vicinity, similar to a cluster head on the Internet of vehicles. This study exploits RSU to manage the block, and the base station is responsible for managing the intra-cluster blockchain named intra_clusterBC, and the cloud server at the back end is responsible for the entire system blockchain named inter_clusterBC. Finally, RSU, base stations and cloud servers cooperatively construct the multi-level blockchain framework and improve the security and the efficiency of the operation of the blockchain. Overall, in order to protect the security of the transaction data of the blockchain, we propose a new transaction block structure and adopt the elliptic curve cryptographic signature ECDSA to ensure that the Merkle tree root value is not changed and also make sure the transaction identity and non-repudiation of transaction data. Finally, this study considers information security in a cloud environment, and therefore we propose a secret-sharing and secure-map-reducing architecture based on the identity confirmation scheme. The proposed scheme with decentralization is very suitable for distributed connected vehicles and can also improve the execution efficiency of the blockchain.}, } @article {pmid36904852, year = {2023}, author = {Vitali, G and Arru, M and Magnanini, E}, title = {A Scalable Device for Undisturbed Measurement of Water and CO2 Fluxes through Natural Surfaces.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904852}, issn = {1424-8220}, support = {101000256//H2020 European Research Council/ ; }, abstract = {In a climate change scenario and under a growing interest in Precision Agriculture, it is more and more important to map and record seasonal trends of the respiration of cropland and natural surfaces. Ground-level sensors to be placed in the field or integrated into autonomous vehicles are of growing interest. In this scope, a low-power IoT-compliant device for measurement of multiple surface CO2 and WV concentrations have been designed and developed. The device is described and tested under controlled and field conditions, showing ready and easy access to collected values typical of a cloud-computing-based approach. The device proved to be usable in indoor and open-air environments for a long time, and the sensors were arranged in multiple configurations to evaluate simultaneous concentrations and flows, while the low-cost, low-power (LP IoT-compliant) design is achieved by a specific design of the printed circuit board and a firmware code fitting the characteristics of the controller.}, } @article {pmid36904779, year = {2023}, author = {Kwon, Y and Kim, W and Jung, I}, title = {Neural Network Models for Driving Control of Indoor Autonomous Vehicles in Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904779}, issn = {1424-8220}, abstract = {Mobile edge computing has been proposed as a solution for solving the latency problem of traditional cloud computing. In particular, mobile edge computing is needed in areas such as autonomous driving, which requires large amounts of data to be processed without latency for safety. Indoor autonomous driving is attracting attention as one of the mobile edge computing services. Furthermore, it relies on its sensors for location recognition because indoor autonomous driving cannot use a GPS device, as is the case with outdoor driving. However, while the autonomous vehicle is being driven, the real-time processing of external events and the correction of errors are required for safety. Furthermore, an efficient autonomous driving system is required because it is a mobile environment with resource constraints. This study proposes neural network models as a machine-learning method for autonomous driving in an indoor environment. The neural network model predicts the most appropriate driving command for the current location based on the range data measured with the LiDAR sensor. We designed six neural network models to be evaluated according to the number of input data points. In addition, we made an autonomous vehicle based on the Raspberry Pi for driving and learning and an indoor circular driving track for collecting data and performance evaluation. Finally, we evaluated six neural network models in terms of confusion matrix, response time, battery consumption, and driving command accuracy. In addition, when neural network learning was applied, the effect of the number of inputs was confirmed in the usage of resources. The result will influence the choice of an appropriate neural network model for an indoor autonomous vehicle.}, } @article {pmid36904650, year = {2023}, author = {Kumar, MS and Karri, GR}, title = {EEOA: Cost and Energy Efficient Task Scheduling in a Cloud-Fog Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904650}, issn = {1424-8220}, abstract = {Cloud-fog computing is a wide range of service environments created to provide quick, flexible services to customers, and the phenomenal growth of the Internet of Things (IoT) has produced an immense amount of data on a daily basis. To complete tasks and meet service-level agreement (SLA) commitments, the provider assigns appropriate resources and employs scheduling techniques to efficiently manage the execution of received IoT tasks in fog or cloud systems. The effectiveness of cloud services is directly impacted by some other important criteria, such as energy usage and cost, which are not taken into account by many of the existing methodologies. To resolve the aforementioned problems, an effective scheduling algorithm is required to schedule the heterogeneous workload and enhance the quality of service (QoS). Therefore, a nature-inspired multi-objective task scheduling algorithm called the electric earthworm optimization algorithm (EEOA) is proposed in this paper for IoT requests in a cloud-fog framework. This method was created using the combination of the earthworm optimization algorithm (EOA) and the electric fish optimization algorithm (EFO) to improve EFO's potential to be exploited while looking for the best solution to the problem at hand. Concerning execution time, cost, makespan, and energy consumption, the suggested scheduling technique's performance was assessed using significant instances of real-world workloads such as CEA-CURIE and HPC2N. Based on simulation results, our proposed approach improves efficiency by 89%, energy consumption by 94%, and total cost by 87% over existing algorithms for the scenarios considered using different benchmarks. Detailed simulations demonstrate that the suggested approach provides a superior scheduling scheme with better results than the existing scheduling techniques.}, } @article {pmid36904580, year = {2023}, author = {Kalinagac, O and Gür, G and Alagöz, F}, title = {Prioritization Based Task Offloading in UAV-Assisted Edge Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {36904580}, issn = {1424-8220}, abstract = {Under demanding operational conditions such as traffic surges, coverage issues, and low latency requirements, terrestrial networks may become inadequate to provide the expected service levels to users and applications. Moreover, when natural disasters or physical calamities occur, the existing network infrastructure may collapse, leading to formidable challenges for emergency communications in the area served. In order to provide wireless connectivity as well as facilitate a capacity boost under transient high service load situations, a substitute or auxiliary fast-deployable network is needed. Unmanned Aerial Vehicle (UAV) networks are well suited for such needs thanks to their high mobility and flexibility. In this work, we consider an edge network consisting of UAVs equipped with wireless access points. These software-defined network nodes serve a latency-sensitive workload of mobile users in an edge-to-cloud continuum setting. We investigate prioritization-based task offloading to support prioritized services in this on-demand aerial network. To serve this end, we construct an offloading management optimization model to minimize the overall penalty due to priority-weighted delay against task deadlines. Since the defined assignment problem is NP-hard, we also propose three heuristic algorithms as well as a branch and bound style quasi-optimal task offloading algorithm and investigate how the system performs under different operating conditions by conducting simulation-based experiments. Moreover, we made an open-source contribution to Mininet-WiFi to have independent Wi-Fi mediums, which were compulsory for simultaneous packet transfers on different Wi-Fi mediums.}, } @article {pmid36900055, year = {2023}, author = {Barany, L and Hore, N and Stadlbauer, A and Buchfelder, M and Brandner, S}, title = {Prediction of the Topography of the Corticospinal Tract on T1-Weighted MR Images Using Deep-Learning-Based Segmentation.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {13}, number = {5}, pages = {}, pmid = {36900055}, issn = {2075-4418}, abstract = {INTRODUCTION: Tractography is an invaluable tool in the planning of tumor surgery in the vicinity of functionally eloquent areas of the brain as well as in the research of normal development or of various diseases. The aim of our study was to compare the performance of a deep-learning-based image segmentation for the prediction of the topography of white matter tracts on T1-weighted MR images to the performance of a manual segmentation.

METHODS: T1-weighted MR images of 190 healthy subjects from 6 different datasets were utilized in this study. Using deterministic diffusion tensor imaging, we first reconstructed the corticospinal tract on both sides. After training a segmentation model on 90 subjects of the PIOP2 dataset using the nnU-Net in a cloud-based environment with graphical processing unit (Google Colab), we evaluated its performance using 100 subjects from 6 different datasets.

RESULTS: Our algorithm created a segmentation model that predicted the topography of the corticospinal pathway on T1-weighted images in healthy subjects. The average dice score was 0.5479 (0.3513-0.7184) on the validation dataset.

CONCLUSIONS: Deep-learning-based segmentation could be applicable in the future to predict the location of white matter pathways in T1-weighted scans.}, } @article {pmid36899558, year = {2023}, author = {Zhang, H and Wang, P and Zhang, S and Wu, Z}, title = {An adaptive offloading framework for license plate detection in collaborative edge and cloud computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {20}, number = {2}, pages = {2793-2814}, doi = {10.3934/mbe.2023131}, pmid = {36899558}, issn = {1551-0018}, abstract = {With the explosive growth of edge computing, huge amounts of data are being generated in billions of edge devices. It is really difficult to balance detection efficiency and detection accuracy at the same time for object detection on multiple edge devices. However, there are few studies to investigate and improve the collaboration between cloud computing and edge computing considering realistic challenges, such as limited computation capacities, network congestion and long latency. To tackle these challenges, we propose a new multi-model license plate detection hybrid methodology with the tradeoff between efficiency and accuracy to process the tasks of license plate detection at the edge nodes and the cloud server. We also design a new probability-based offloading initialization algorithm that not only obtains reasonable initial solutions but also facilitates the accuracy of license plate detection. In addition, we introduce an adaptive offloading framework by gravitational genetic searching algorithm (GGSA), which can comprehensively consider influential factors such as license plate detection time, queuing time, energy consumption, image quality, and accuracy. GGSA is helpful for Quality-of-Service (QoS) enhancement. Extensive experiments show that our proposed GGSA offloading framework exhibits good performance in collaborative edge and cloud computing of license plate detection compared with other methods. It demonstrate that when compared with traditional all tasks are executed on the cloud server (AC), the offloading effect of GGSA can be improved by 50.31%. Besides, the offloading framework has strong portability when making real-time offloading decisions.}, } @article {pmid36878917, year = {2023}, author = {Grossman, RL}, title = {Ten lessons for data sharing with a data commons.}, journal = {Scientific data}, volume = {10}, number = {1}, pages = {120}, pmid = {36878917}, issn = {2052-4463}, support = {U2CHL138346,//U.S. Department of Health & Human Services | NIH | National Heart, Lung, and Blood Institute (NHLBI)/ ; }, abstract = {A data commons is a cloud-based data platform with a governance structure that allows a community to manage, analyze and share its data. Data commons provide a research community with the ability to manage and analyze large datasets using the elastic scalability provided by cloud computing and to share data securely and compliantly, and, in this way, accelerate the pace of research. Over the past decade, a number of data commons have been developed and we discuss some of the lessons learned from this effort.}, } @article {pmid36867158, year = {2024}, author = {Kumar, D and Mandal, N and Kumar, Y}, title = {Cloud-Based Advanced Shuffled Frog Leaping Algorithm for Tasks Scheduling.}, journal = {Big data}, volume = {12}, number = {2}, pages = {110-126}, doi = {10.1089/big.2022.0095}, pmid = {36867158}, issn = {2167-647X}, mesh = {*Cloud Computing ; *Algorithms ; Learning ; }, abstract = {In recent years, the world has seen incremental growth in online activities owing to which the volume of data in cloud servers has also been increasing exponentially. With rapidly increasing data, load on cloud servers has increased in the cloud computing environment. With rapidly evolving technology, various cloud-based systems were developed to enhance the user experience. But, the increased online activities around the globe have also increased data load on the cloud-based systems. To maintain the efficiency and performance of the applications hosted in cloud servers, task scheduling has become very important. The task scheduling process helps in reducing the makespan time and average cost by scheduling the tasks to virtual machines (VMs). The task scheduling depends on assigning tasks to VMs to process the incoming tasks. The task scheduling should follow some algorithm for assigning tasks to VMs. Many researchers have proposed different scheduling algorithms for task scheduling in the cloud computing environment. In this article, an advanced form of the shuffled frog optimization algorithm, which works on the nature and behavior of frogs searching for food, has been proposed. The authors have introduced a new algorithm to shuffle the position of frogs in memeplex to obtain the best result. By using this optimization technique, the cost function of the central processing unit, makespan, and fitness function were calculated. The fitness function is the sum of the budget cost function and the makespan time. The proposed method helps in reducing the makespan time as well as the average cost by scheduling the tasks to VMs effectively. Finally, the performance of the proposed advanced shuffled frog optimization method is compared with existing task scheduling methods such as whale optimization-based scheduler (W-Scheduler), sliced particle swarm optimization (SPSO-SA), inverted ant colony optimization algorithm, and static learning particle swarm optimization (SLPSO-SA) in terms of average cost and metric makespan. Experimentally, it was concluded that the proposed advanced frog optimization algorithm can schedule tasks to the VMs more effectively as compared with other scheduling methods with a makespan of 6, average cost of 4, and fitness of 10.}, } @article {pmid36860419, year = {2023}, author = {Singh, J and Chen, J and Singh, SP and Singh, MP and Hassan, MM and Hassan, MM and Awal, H}, title = {Load-Balancing Strategy: Employing a Capsule Algorithm for Cutting Down Energy Consumption in Cloud Data Centers for Next Generation Wireless Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2023}, number = {}, pages = {6090282}, pmid = {36860419}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Data Accuracy ; Electric Power Supplies ; Happiness ; }, abstract = {Per-user pricing is possible with cloud computing, a relatively new technology. It provides remote testing and commissioning services through the web, and it utilizes virtualization to make available computing resources. In order to host and store firm data, cloud computing relies on data centers. Data centers are made up of networked computers, cables, power supplies, and other components. Cloud data centers have always had to prioritise high performance over energy efficiency. The biggest obstacle is finding a happy medium between system performance and energy consumption, namely, lowering energy use without compromising system performance or service quality. These results were obtained using the PlanetLab dataset. In order to implement the strategy we recommend, it is crucial to get a complete picture of how energy is being consumed in the cloud. Using proper optimization criteria and guided by energy consumption models, this article offers the Capsule Significance Level of Energy Consumption (CSLEC) pattern, which demonstrates how to conserve more energy in cloud data centers. Capsule optimization's prediction phase F1-score of 96.7 percent and 97 percent data accuracy allow for more precise projections of future value.}, } @article {pmid36855338, year = {2023}, author = {Calcaterra, D and Tomarchio, O}, title = {Policy-Based Holistic Application Management with BPMN and TOSCA.}, journal = {SN computer science}, volume = {4}, number = {3}, pages = {232}, pmid = {36855338}, issn = {2661-8907}, abstract = {With the wide adoption of cloud computing across technology industries and research institutions, an ever-growing interest in cloud orchestration frameworks has emerged over the past few years. These orchestration frameworks enable the automated provisioning and decommissioning of cloud applications in a timely and efficient manner, but they offer limited or no support for application management. While management functionalities, such as configuring, monitoring and scaling single components, can be directly covered by cloud providers and configuration management tools, holistic management features, such as backing up, testing and updating multiple components, cannot be automated using these approaches. In this paper, we propose a concept to automatically generate executable holistic management workflows based on the TOSCA standard. The practical feasibility of the approach is validated through a prototype implementation and a case study.}, } @article {pmid36852030, year = {2023}, author = {Manconi, A and Gnocchi, M and Milanesi, L and Marullo, O and Armano, G}, title = {Framing Apache Spark in life sciences.}, journal = {Heliyon}, volume = {9}, number = {2}, pages = {e13368}, pmid = {36852030}, issn = {2405-8440}, abstract = {Advances in high-throughput and digital technologies have required the adoption of big data for handling complex tasks in life sciences. However, the drift to big data led researchers to face technical and infrastructural challenges for storing, sharing, and analysing them. In fact, this kind of tasks requires distributed computing systems and algorithms able to ensure efficient processing. Cutting edge distributed programming frameworks allow to implement flexible algorithms able to adapt the computation to the data over on-premise HPC clusters or cloud architectures. In this context, Apache Spark is a very powerful HPC engine for large-scale data processing on clusters. Also thanks to specialised libraries for working with structured and relational data, it allows to support machine learning, graph-based computation, and stream processing. This review article is aimed at helping life sciences researchers to ascertain the features of Apache Spark and to assess whether it can be successfully used in their research activities.}, } @article {pmid36850940, year = {2023}, author = {Antonini, M and Pincheira, M and Vecchio, M and Antonelli, F}, title = {An Adaptable and Unsupervised TinyML Anomaly Detection System for Extreme Industrial Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850940}, issn = {1424-8220}, abstract = {Industrial assets often feature multiple sensing devices to keep track of their status by monitoring certain physical parameters. These readings can be analyzed with machine learning (ML) tools to identify potential failures through anomaly detection, allowing operators to take appropriate corrective actions. Typically, these analyses are conducted on servers located in data centers or the cloud. However, this approach increases system complexity and is susceptible to failure in cases where connectivity is unavailable. Furthermore, this communication restriction limits the approach's applicability in extreme industrial environments where operating conditions affect communication and access to the system. This paper proposes and evaluates an end-to-end adaptable and configurable anomaly detection system that uses the Internet of Things (IoT), edge computing, and Tiny-MLOps methodologies in an extreme industrial environment such as submersible pumps. The system runs on an IoT sensing Kit, based on an ESP32 microcontroller and MicroPython firmware, located near the data source. The processing pipeline on the sensing device collects data, trains an anomaly detection model, and alerts an external gateway in the event of an anomaly. The anomaly detection model uses the isolation forest algorithm, which can be trained on the microcontroller in just 1.2 to 6.4 s and detect an anomaly in less than 16 milliseconds with an ensemble of 50 trees and 80 KB of RAM. Additionally, the system employs blockchain technology to provide a transparent and irrefutable repository of anomalies.}, } @article {pmid36850847, year = {2023}, author = {Luo, G and He, B and Xiong, Y and Wang, L and Wang, H and Zhu, Z and Shi, X}, title = {An Optimized Convolutional Neural Network for the 3D Point-Cloud Compression.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850847}, issn = {1424-8220}, support = {20224BAB202016//Natural Science Foundation of Jiangxi Province/ ; 20224BAB212014//Natural Science Foundation of Jiangxi Province/ ; }, abstract = {Due to the tremendous volume taken by the 3D point-cloud models, knowing how to achieve the balance between a high compression ratio, a low distortion rate, and computing cost in point-cloud compression is a significant issue in the field of virtual reality (VR). Convolutional neural networks have been used in numerous point-cloud compression research approaches during the past few years in an effort to progress the research state. In this work, we have evaluated the effects of different network parameters, including neural network depth, stride, and activation function on point-cloud compression, resulting in an optimized convolutional neural network for compression. We first have analyzed earlier research on point-cloud compression based on convolutional neural networks before designing our own convolutional neural network. Then, we have modified our model parameters using the experimental data to further enhance the effect of point-cloud compression. Based on the experimental results, we have found that the neural network with the 4 layers and 2 strides parameter configuration using the Sigmoid activation function outperforms the default configuration by 208% in terms of the compression-distortion rate. The experimental results show that our findings are effective and universal and make a great contribution to the research of point-cloud compression using convolutional neural networks.}, } @article {pmid36850846, year = {2023}, author = {Liu, S and Yang, S and Zhang, H and Wu, W}, title = {A Federated Learning and Deep Reinforcement Learning-Based Method with Two Types of Agents for Computation Offload.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850846}, issn = {1424-8220}, support = {62002279//National Natural Science Foundation of China/ ; 2020JQ-077//Natural Science Basic Research Program of Shaanxi/ ; ZR2021LZH009//Shandong Provincial Natural Science Foundation/ ; }, abstract = {With the rise of latency-sensitive and computationally intensive applications in mobile edge computing (MEC) environments, the computation offloading strategy has been widely studied to meet the low-latency demands of these applications. However, the uncertainty of various tasks and the time-varying conditions of wireless networks make it difficult for mobile devices to make efficient decisions. The existing methods also face the problems of long-delay decisions and user data privacy disclosures. In this paper, we present the FDRT, a federated learning and deep reinforcement learning-based method with two types of agents for computation offload, to minimize the system latency. FDRT uses a multi-agent collaborative computation offloading strategy, namely, DRT. DRT divides the offloading decision into whether to compute tasks locally and whether to offload tasks to MEC servers. The designed DDQN agent considers the task information, its own resources, and the network status conditions of mobile devices, and the designed D3QN agent considers these conditions of all MEC servers in the collaborative cloud-side end MEC system; both jointly learn the optimal decision. FDRT also applies federated learning to reduce communication overhead and optimize the model training of DRT by designing a new parameter aggregation method, while protecting user data privacy. The simulation results showed that DRT effectively reduced the average task execution delay by up to 50% compared with several baselines and state-of-the-art offloading strategies. FRDT also accelerates the convergence rate of multi-agent training and reduces the training time of DRT by 61.7%.}, } @article {pmid36850813, year = {2023}, author = {Vaño, R and Lacalle, I and Sowiński, P and S-Julián, R and Palau, CE}, title = {Cloud-Native Workload Orchestration at the Edge: A Deployment Review and Future Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850813}, issn = {1424-8220}, support = {101069732//European Commission/ ; }, abstract = {Cloud-native computing principles such as virtualization and orchestration are key to transferring to the promising paradigm of edge computing. Challenges of containerization, operative models and scarce availability of established tools make a thorough review indispensable. Therefore, the authors have described the practical methods and tools found in the literature as well as in current community-led development projects, and have thoroughly exposed the future directions of the field. Container virtualization and its orchestration through Kubernetes have dominated the cloud computing domain, while major efforts have been recently recorded focused on the adaptation of these technologies to the edge. Such initiatives have addressed either the reduction of container engines and the development of specific tailored operating systems or the development of smaller K8s distributions and edge-focused adaptations (such as KubeEdge). Finally, new workload virtualization approaches, such as WebAssembly modules together with the joint orchestration of these heterogeneous workloads, seem to be the topics to pay attention to in the short to medium term.}, } @article {pmid36850794, year = {2023}, author = {Lee, S}, title = {Distributed Detection of Malicious Android Apps While Preserving Privacy Using Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850794}, issn = {1424-8220}, support = {2020R1F1A1063942//National Research Foundation of Korea/ ; }, abstract = {Recently, deep learning has been widely used to solve existing computing problems through large-scale data mining. Conventional training of the deep learning model is performed on a central (cloud) server that is equipped with high computing power, by integrating data via high computational intensity. However, integrating raw data from multiple clients raises privacy concerns that are increasingly being focused on. In federated learning (FL), clients train deep learning models in a distributed fashion using their local data; instead of sending raw data to a central server, they send parameter values of the trained local model to a central server for integration. Because FL does not transmit raw data to the outside, it is free from privacy issues. In this paper, we perform an experimental study that explores the dynamics of the FL-based Android malicious app detection method under three data distributions across clients, i.e., (i) independent and identically distributed (IID), (ii) non-IID, (iii) non-IID and unbalanced. Our experiments demonstrate that the application of FL is feasible and efficient in detecting malicious Android apps in a distributed manner on cellular networks.}, } @article {pmid36850785, year = {2023}, author = {Chang, RC and Wang, CY and Li, YH and Chiu, CD}, title = {Design of Low-Complexity Convolutional Neural Network Accelerator for Finger Vein Identification System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850785}, issn = {1424-8220}, support = {111-2218-E-005-008//National Science and Technology Council of Taiwan, R.O.C./ ; }, mesh = {Humans ; *Neural Networks, Computer ; *Algorithms ; Biometry ; Extremities ; Laboratories ; }, abstract = {In the biometric field, vein identification is a vital process that is constrained by the invisibility of veins as well as other unique features. Moreover, users generally do not wish to have their personal information uploaded to the cloud, so edge computing has become popular for the sake of protecting user privacy. In this paper, we propose a low-complexity and lightweight convolutional neural network (CNN) and we design intellectual property (IP) for shortening the inference time in finger vein recognition. This neural network system can operate independently in client mode. After fetching the user's finger vein image via a near-infrared (NIR) camera mounted on an embedded system, vein features can be efficiently extracted by vein curving algorithms and user identification can be completed quickly. Better image quality and higher recognition accuracy can be obtained by combining several preprocessing techniques and the modified CNN. Experimental data were collected by the finger vein image capture equipment developed in our laboratory based on the specifications of similar products currently on the market. Extensive experiments demonstrated the practicality and robustness of the proposed finger vein identification system.}, } @article {pmid36850784, year = {2023}, author = {Mohamed, AA and Abualigah, L and Alburaikan, A and Khalifa, HAE}, title = {AOEHO: A New Hybrid Data Replication Method in Fog Computing for IoT Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850784}, issn = {1424-8220}, abstract = {Recently, the concept of the internet of things and its services has emerged with cloud computing. Cloud computing is a modern technology for dealing with big data to perform specified operations. The cloud addresses the problem of selecting and placing iterations across nodes in fog computing. Previous studies focused on original swarm intelligent and mathematical models; thus, we proposed a novel hybrid method based on two modern metaheuristic algorithms. This paper combined the Aquila Optimizer (AO) algorithm with the elephant herding optimization (EHO) for solving dynamic data replication problems in the fog computing environment. In the proposed method, we present a set of objectives that determine data transmission paths, choose the least cost path, reduce network bottlenecks, bandwidth, balance, and speed data transfer rates between nodes in cloud computing. A hybrid method, AOEHO, addresses the optimal and least expensive path, determines the best replication via cloud computing, and determines optimal nodes to select and place data replication near users. Moreover, we developed a multi-objective optimization based on the proposed AOEHO to decrease the bandwidth and enhance load balancing and cloud throughput. The proposed method is evaluated based on data replication using seven criteria. These criteria are data replication access, distance, costs, availability, SBER, popularity, and the Floyd algorithm. The experimental results show the superiority of the proposed AOEHO strategy performance over other algorithms, such as bandwidth, distance, load balancing, data transmission, and least cost path.}, } @article {pmid36850763, year = {2023}, author = {da Silva, JCF and Silva, MC and Luz, EJS and Delabrida, S and Oliveira, RAR}, title = {Using Mobile Edge AI to Detect and Map Diseases in Citrus Orchards.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850763}, issn = {1424-8220}, mesh = {*Agriculture ; Algorithms ; Benchmarking ; *Citrus ; Artificial Intelligence ; }, abstract = {Deep Learning models have presented promising results when applied to Agriculture 4.0. Among other applications, these models can be used in disease detection and fruit counting. Deep Learning models usually have many layers in the architecture and millions of parameters. This aspect hinders the use of Deep Learning on mobile devices as they require a large amount of processing power for inference. In addition, the lack of high-quality Internet connectivity in the field impedes the usage of cloud computing, pushing the processing towards edge devices. This work describes the proposal of an edge AI application to detect and map diseases in citrus orchards. The proposed system has low computational demand, enabling the use of low-footprint models for both detection and classification tasks. We initially compared AI algorithms to detect fruits on trees. Specifically, we analyzed and compared YOLO and Faster R-CNN. Then, we studied lean AI models to perform the classification task. In this context, we tested and compared the performance of MobileNetV2, EfficientNetV2-B0, and NASNet-Mobile. In the detection task, YOLO and Faster R-CNN had similar AI performance metrics, but YOLO was significantly faster. In the image classification task, MobileNetMobileV2 and EfficientNetV2-B0 obtained an accuracy of 100%, while NASNet-Mobile had a 98% performance. As for the timing performance, MobileNetV2 and EfficientNetV2-B0 were the best candidates, while NASNet-Mobile was significantly worse. Furthermore, MobileNetV2 had a 10% better performance than EfficientNetV2-B0. Finally, we provide a method to evaluate the results from these algorithms towards describing the disease spread using statistical parametric models and a genetic algorithm to perform the parameters' regression. With these results, we validated the proposed pipeline, enabling the usage of adequate AI models to develop a mobile edge AI solution.}, } @article {pmid36850711, year = {2023}, author = {Chen, Z and Amani, AM and Yu, X and Jalili, M}, title = {Control and Optimisation of Power Grids Using Smart Meter Data: A Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850711}, issn = {1424-8220}, support = {LP180101309//Australian Research Council/ ; }, abstract = {This paper provides a comprehensive review of the applications of smart meters in the control and optimisation of power grids to support a smooth energy transition towards the renewable energy future. The smart grids become more complicated due to the presence of small-scale low inertia generators and the implementation of electric vehicles (EVs), which are mainly based on intermittent and variable renewable energy resources. Optimal and reliable operation of this environment using conventional model-based approaches is very difficult. Advancements in measurement and communication technologies have brought the opportunity of collecting temporal or real-time data from prosumers through Advanced Metering Infrastructure (AMI). Smart metering brings the potential of applying data-driven algorithms for different power system operations and planning services, such as infrastructure sizing and upgrade and generation forecasting. It can also be used for demand-side management, especially in the presence of new technologies such as EVs, 5G/6G networks and cloud computing. These algorithms face privacy-preserving and cybersecurity challenges that need to be well addressed. This article surveys the state-of-the-art of each of these topics, reviewing applications, challenges and opportunities of using smart meters to address them. It also stipulates the challenges that smart grids present to smart meters and the benefits that smart meters can bring to smart grids. Furthermore, the paper is concluded with some expected future directions and potential research questions for smart meters, smart grids and their interplay.}, } @article {pmid36850688, year = {2023}, author = {Fathy, C and Ali, HM}, title = {A Secure IoT-Based Irrigation System for Precision Agriculture Using the Expeditious Cipher.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850688}, issn = {1424-8220}, abstract = {Due to the recent advances in the domain of smart agriculture as a result of integrating traditional agriculture and the latest information technologies including the Internet of Things (IoT), cloud computing, and artificial intelligence (AI), there is an urgent need to address the information security-related issues and challenges in this field. In this article, we propose the integration of lightweight cryptography techniques into the IoT ecosystem for smart agriculture to meet the requirements of resource-constrained IoT devices. Moreover, we investigate the adoption of a lightweight encryption protocol, namely, the Expeditious Cipher (X-cipher), to create a secure channel between the sensing layer and the broker in the Message Queue Telemetry Transport (MQTT) protocol as well as a secure channel between the broker and its subscribers. Our case study focuses on smart irrigation systems, and the MQTT protocol is deployed as the application messaging protocol in these systems. Smart irrigation strives to decrease the misuse of natural resources by enhancing the efficiency of agricultural irrigation. This secure channel is utilized to eliminate the main security threat in precision agriculture by protecting sensors' published data from eavesdropping and theft, as well as from unauthorized changes to sensitive data that can negatively impact crops' development. In addition, the secure channel protects the irrigation decisions made by the data analytics (DA) entity regarding the irrigation time and the quantity of water that is returned to actuators from any alteration. Performance evaluation of our chosen lightweight encryption protocol revealed an improvement in terms of power consumption, execution time, and required memory usage when compared with the Advanced Encryption Standard (AES). Moreover, the selected lightweight encryption protocol outperforms the PRESENT lightweight encryption protocol in terms of throughput and memory usage.}, } @article {pmid36850563, year = {2023}, author = {Shahid, MA and Alam, MM and Su'ud, MM}, title = {Achieving Reliability in Cloud Computing by a Novel Hybrid Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850563}, issn = {1424-8220}, abstract = {Cloud computing (CC) benefits and opportunities are among the fastest growing technologies in the computer industry. Cloud computing's challenges include resource allocation, security, quality of service, availability, privacy, data management, performance compatibility, and fault tolerance. Fault tolerance (FT) refers to a system's ability to continue performing its intended task in the presence of defects. Fault-tolerance challenges include heterogeneity and a lack of standards, the need for automation, cloud downtime reliability, consideration for recovery point objects, recovery time objects, and cloud workload. The proposed research includes machine learning (ML) algorithms such as naïve Bayes (NB), library support vector machine (LibSVM), multinomial logistic regression (MLR), sequential minimal optimization (SMO), K-nearest neighbor (KNN), and random forest (RF) as well as a fault-tolerance method known as delta-checkpointing to achieve higher accuracy, lesser fault prediction error, and reliability. Furthermore, the secondary data were collected from the homonymous, experimental high-performance computing (HPC) system at the Swiss Federal Institute of Technology (ETH), Zurich, and the primary data were generated using virtual machines (VMs) to select the best machine learning classifier. In this article, the secondary and primary data were divided into two split ratios of 80/20 and 70/30, respectively, and cross-validation (5-fold) was used to identify more accuracy and less prediction of faults in terms of true, false, repair, and failure of virtual machines. Secondary data results show that naïve Bayes performed exceptionally well on CPU-Mem mono and multi blocks, and sequential minimal optimization performed very well on HDD mono and multi blocks in terms of accuracy and fault prediction. In the case of greater accuracy and less fault prediction, primary data results revealed that random forest performed very well in terms of accuracy and fault prediction but not with good time complexity. Sequential minimal optimization has good time complexity with minor differences in random forest accuracy and fault prediction. We decided to modify sequential minimal optimization. Finally, the modified sequential minimal optimization (MSMO) algorithm with the fault-tolerance delta-checkpointing (D-CP) method is proposed to improve accuracy, fault prediction error, and reliability in cloud computing.}, } @article {pmid36850350, year = {2023}, author = {Alsokhiry, F and Annuk, A and Mohamed, MA and Marinho, M}, title = {An Innovative Cloud-Fog-Based Smart Grid Scheme for Efficient Resource Utilization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {4}, pages = {}, pmid = {36850350}, issn = {1424-8220}, abstract = {Smart grids (SGs) enhance the effectiveness, reliability, resilience, and energy-efficient operation of electrical networks. Nonetheless, SGs suffer from big data transactions which limit their capabilities and can cause delays in the optimal operation and management tasks. Therefore, it is clear that a fast and reliable architecture is needed to make big data management in SGs more efficient. This paper assesses the optimal operation of the SGs using cloud computing (CC), fog computing, and resource allocation to enhance the management problem. Technically, big data management makes SG more efficient if cloud and fog computing (CFC) are integrated. The integration of fog computing (FC) with CC minimizes cloud burden and maximizes resource allocation. There are three key features for the proposed fog layer: awareness of position, short latency, and mobility. Moreover, a CFC-driven framework is proposed to manage data among different agents. In order to make the system more efficient, FC allocates virtual machines (VMs) according to load-balancing techniques. In addition, the present study proposes a hybrid gray wolf differential evolution optimization algorithm (HGWDE) that brings gray wolf optimization (GWO) and improved differential evolution (IDE) together. Simulation results conducted in MATLAB verify the efficiency of the suggested algorithm according to the high data transaction and computational time. According to the results, the response time of HGWDE is 54 ms, 82.1 ms, and 81.6 ms faster than particle swarm optimization (PSO), differential evolution (DE), and GWO. HGWDE's processing time is 53 ms, 81.2 ms, and 80.6 ms faster than PSO, DE, and GWO. Although GWO is a bit more efficient than HGWDE, the difference is not very significant.}, } @article {pmid36847779, year = {2023}, author = {Krog, D and Enghoff, MB and Köhn, C}, title = {A Monte Carlo approach to study the effect of ions on the nucleation of sulfuric acid-water clusters.}, journal = {Journal of computational chemistry}, volume = {44}, number = {13}, pages = {1250-1262}, doi = {10.1002/jcc.27076}, pmid = {36847779}, issn = {1096-987X}, abstract = {The nucleation of sulfuric acid-water clusters is a significant contribution to the formation of aerosols as precursors of cloud condensation nuclei (CCN). Depending on the temperature, there is an interplay between the clustering of particles and their evaporation controlling the efficiency of cluster growth. For typical temperatures in the atmosphere, the evaporation of H2 SO4 (?)H2 O clusters is more efficient than the clustering of the first, small clusters, and thus their growth is dampened at its early stages. Since the evaporation rates of small clusters containing an HSO 4 - ion are much smaller than for purely neutral sulfuric acid clusters, they can serve as a central body for the further attachment of H2 SO4 (?)H2 O molecules. We here present an innovative Monte Carlo model to study the growth of aqueous sulfuric acid clusters around central ions. Unlike classical thermodynamic nucleation theory or kinetic models, this model allows to trace individual particles and thus to determine properties for each individual particle. As a benchmarking case, we have performed simulations at T = 300 K a relative humidity of 50% with dipole and ion concentrations of c dipole = 5 × 10 8 - 10 9 cm - 3 and c ion = 0 - 10 7 cm - 3 . We discuss the runtime of our simulations and present the velocity distribution of ionic clusters, the size distribution of the clusters as well as the formation rate of clusters with radii R ≥ 0.85 nm . Simulations give reasonable velocity and size distributions and there is a good agreement of the formation rates with previous results, including the relevance of ions for the initial growth of sulfuric acid-water clusters. Conclusively, we present a computational method which allows studying detailed particle properties during the growth of aerosols as a precursor of CCN.}, } @article {pmid36846250, year = {2023}, author = {Gustafsson, W and Dórea, FC and Widgren, S and Frössling, J and Vidal, G and Kim, H and Cha, W and Comin, A and Rodriguez Ewerlöf, I and Rosendal, T}, title = {Data workflows and visualization in support of surveillance practice.}, journal = {Frontiers in veterinary science}, volume = {10}, number = {}, pages = {1129863}, pmid = {36846250}, issn = {2297-1769}, abstract = {The Swedish National Veterinary Institute (SVA) is working on implementing reusable and adaptable workflows for epidemiological analysis and dynamic report generation to improve disease surveillance. Important components of this work include: data access, development environment, computational resources and cloud-based management. The development environment relies on Git for code collaboration and version control and the R language for statistical computing and data visualization. The computational resources include both local and cloud-based systems, with automatic workflows managed in the cloud. The workflows are designed to be flexible and adaptable to changing data sources and stakeholder demands, with the ultimate goal to create a robust infrastructure for the delivery of actionable epidemiological information.}, } @article {pmid36842917, year = {2023}, author = {Johnson, E and Campos-Cerqueira, M and Jumail, A and Yusni, ASA and Salgado-Lynn, M and Fornace, K}, title = {Applications and advances in acoustic monitoring for infectious disease epidemiology.}, journal = {Trends in parasitology}, volume = {39}, number = {5}, pages = {386-399}, doi = {10.1016/j.pt.2023.01.008}, pmid = {36842917}, issn = {1471-5007}, mesh = {Animals ; Humans ; *Ecosystem ; Biodiversity ; Animals, Wild ; Acoustics ; *Communicable Diseases/epidemiology ; }, abstract = {Emerging infectious diseases continue to pose a significant burden on global public health, and there is a critical need to better understand transmission dynamics arising at the interface of human activity and wildlife habitats. Passive acoustic monitoring (PAM), more typically applied to questions of biodiversity and conservation, provides an opportunity to collect and analyse audio data in relative real time and at low cost. Acoustic methods are increasingly accessible, with the expansion of cloud-based computing, low-cost hardware, and machine learning approaches. Paired with purposeful experimental design, acoustic data can complement existing surveillance methods and provide a novel toolkit to investigate the key biological parameters and ecological interactions that underpin infectious disease epidemiology.}, } @article {pmid36842572, year = {2023}, author = {Andaryani, S and Nourani, V and Abbasnejad, H and Koch, J and Stisen, S and Klöve, B and Haghighi, AT}, title = {Spatio-temporal analysis of climate and irrigated vegetation cover changes and their role in lake water level depletion using a pixel-based approach and canonical correlation analysis.}, journal = {The Science of the total environment}, volume = {873}, number = {}, pages = {162326}, doi = {10.1016/j.scitotenv.2023.162326}, pmid = {36842572}, issn = {1879-1026}, abstract = {Lake Urmia, located in northwest Iran, was among the world's largest hypersaline lakes but has now experienced a 7 m decrease in water level, from 1278 m to 1271 over 1996 to 2019. There is doubt as to whether the pixel-based analysis (PBA) approach's answer to the lake's drying is a natural process or a result of human intervention. Here, a non-parametric Mann-Kendall trend test was applied to a 21-year record (2000-2020) of satellite data products, i.e., temperature, precipitation, snow cover, and irrigated vegetation cover (IVC). The Google Earth Engine (GEE) cloud-computing platform utilized over 10 sub-basins in three provinces surrounding Lake Urmia to obtain and calculate pixel-based monthly and seasonal scales for the products. Canonical correlation analysis was employed in order to understand the correlation between variables and lake water level (LWL). The trend analysis results show significant increases in temperature (from 1 to 2 °C during 2000-2020) over May-September, i.e., in 87 %-25 % of the basin. However, precipitation has seen an insignificant decrease (from 3 to 9 mm during 2000-2019) in the rainy months (April and May). Snow cover has also decreased and, when compared with precipitation, shows a change in precipitation patterns from snow to rain. IVC has increased significantly in all sub-basins, especially the southern parts of the lake, with the West province making the largest contribution to the development of IVC. According to the PBA, this analysis underpins the very high contribution of IVC to the drying of the lake in more detail, although the contribution of climate change in this matter is also apparent. The development of IVC leads to increased water consumption through evapotranspiration and excess evaporation caused by the storage of water for irrigation. Due to the decreased runoff caused by consumption exceeding the basin's capacity, the lake cannot be fed sufficiently.}, } @article {pmid36832716, year = {2023}, author = {Yuan, L and Wang, Z and Sun, P and Wei, Y}, title = {An Efficient Virtual Machine Consolidation Algorithm for Cloud Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832716}, issn = {1099-4300}, abstract = {With the rapid development of integration in blockchain and IoT, virtual machine consolidation (VMC) has become a heated topic because it can effectively improve the energy efficiency and service quality of cloud computing in the blockchain. The current VMC algorithm is not effective enough because it does not regard the load of the virtual machine (VM) as an analyzed time series. Therefore, we proposed a VMC algorithm based on load forecast to improve efficiency. First, we proposed a migration VM selection strategy based on load increment prediction called LIP. Combined with the current load and load increment, this strategy can effectively improve the accuracy of selecting VM from the overloaded physical machines (PMs). Then, we proposed a VM migration point selection strategy based on the load sequence prediction called SIR. We merged VMs with complementary load series into the same PM, effectively improving the stability of the PM load, thereby reducing the service level agreement violation (SLAV) and the number of VM migrations due to the resource competition of the PM. Finally, we proposed a better virtual machine consolidation (VMC) algorithm based on the load prediction of LIP and SIR. The experimental results show that our VMC algorithm can effectively improve energy efficiency.}, } @article {pmid36832692, year = {2023}, author = {Tsuruyama, T}, title = {Kullback-Leibler Divergence of an Open-Queuing Network of a Cell-Signal-Transduction Cascade.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832692}, issn = {1099-4300}, support = {P2013-201//Ministry of Education, Culture, Sports, Science and Technology/ ; }, abstract = {Queuing networks (QNs) are essential models in operations research, with applications in cloud computing and healthcare systems. However, few studies have analyzed the cell's biological signal transduction using QN theory. This study entailed the modeling of signal transduction as an open Jackson's QN (JQN) to theoretically determine cell signal transduction, under the assumption that the signal mediator queues in the cytoplasm, and the mediator is exchanged from one signaling molecule to another through interactions between the signaling molecules. Each signaling molecule was regarded as a network node in the JQN. The JQN Kullback-Leibler divergence (KLD) was defined using the ratio of the queuing time (λ) to the exchange time (μ), λ/μ. The mitogen-activated protein kinase (MAPK) signal-cascade model was applied, and the KLD rate per signal-transduction-period was shown to be conserved when the KLD was maximized. Our experimental study on MAPK cascade supported this conclusion. This result is similar to the entropy-rate conservation of chemical kinetics and entropy coding reported in our previous studies. Thus, JQN can be used as a novel framework to analyze signal transduction.}, } @article {pmid36832652, year = {2023}, author = {Chen, D and Zhang, Y}, title = {Diversity-Aware Marine Predators Algorithm for Task Scheduling in Cloud Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832652}, issn = {1099-4300}, abstract = {With the increase in cloud users and internet of things (IoT) applications, advanced task scheduling (TS) methods are required to reasonably schedule tasks in cloud computing. This study proposes a diversity-aware marine predators algorithm (DAMPA) for solving TS in cloud computing. In DAMPA, to enhance the premature convergence avoidance ability, the predator crowding degree ranking and comprehensive learning strategies were adopted in the second stage to maintain the population diversity and thereby inhibit premature convergence. Additionally, a stage-independent control of the stepsize-scaling strategy that uses different control parameters in three stages was designed to balance the exploration and exploitation abilities. Two case experiments were conducted to evaluate the proposed algorithm. Compared with the latest algorithm, in the first case, DAMPA reduced the makespan and energy consumption by 21.06% and 23.47% at most, respectively. In the second case, the makespan and energy consumption are reduced by 34.35% and 38.60% on average, respectively. Meanwhile, the algorithm achieved greater throughput in both cases.}, } @article {pmid36832648, year = {2023}, author = {Liu, Y and Luo, J and Yang, Y and Wang, X and Gheisari, M and Luo, F}, title = {ShrewdAttack: Low Cost High Accuracy Model Extraction.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832648}, issn = {1099-4300}, support = {JCYJ20190806142601687//Shenzhen Basic Research (General Project)/ ; GXWD20201230155427003-20200821160539001//Shenzhen Stable Supporting Program (General Project)/ ; PCL2021A02//Peng Cheng Laboratory Project/ ; 2022B1212010005//Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies/ ; }, abstract = {Machine learning as a service (MLaaS) plays an essential role in the current ecosystem. Enterprises do not need to train models by themselves separately. Instead, they can use well-trained models provided by MLaaS to support business activities. However, such an ecosystem could be threatened by model extraction attacks-an attacker steals the functionality of a trained model provided by MLaaS and builds a substitute model locally. In this paper, we proposed a model extraction method with low query costs and high accuracy. In particular, we use pre-trained models and task-relevant data to decrease the size of query data. We use instance selection to reduce query samples. In addition, we divided query data into two categories, namely low-confidence data and high-confidence data, to reduce the budget and improve accuracy. We then conducted attacks on two models provided by Microsoft Azure as our experiments. The results show that our scheme achieves high accuracy at low cost, with the substitution models achieving 96.10% and 95.24% substitution while querying only 7.32% and 5.30% of their training data on the two models, respectively. This new attack approach creates additional security challenges for models deployed on cloud platforms. It raises the need for novel mitigation strategies to secure the models. In future work, generative adversarial networks and model inversion attacks can be used to generate more diverse data to be applied to the attacks.}, } @article {pmid36832632, year = {2023}, author = {Byrne, E and Gnilke, OW and Kliewer, J}, title = {Straggler- and Adversary-Tolerant Secure Distributed Matrix Multiplication Using Polynomial Codes.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {2}, pages = {}, pmid = {36832632}, issn = {1099-4300}, support = {54584//UCD Seed Funding - Horizon Scanning Scheme/ ; 1815322, 1908756, 2107370//U.S. National Science Foundation grants/ ; }, abstract = {Large matrix multiplications commonly take place in large-scale machine-learning applications. Often, the sheer size of these matrices prevent carrying out the multiplication at a single server. Therefore, these operations are typically offloaded to a distributed computing platform with a master server and a large amount of workers in the cloud, operating in parallel. For such distributed platforms, it has been recently shown that coding over the input data matrices can reduce the computational delay by introducing a tolerance against straggling workers, i.e., workers for which execution time significantly lags with respect to the average. In addition to exact recovery, we impose a security constraint on both matrices to be multiplied. Specifically, we assume that workers can collude and eavesdrop on the content of these matrices. For this problem, we introduce a new class of polynomial codes with fewer non-zero coefficients than the degree +1. We provide closed-form expressions for the recovery threshold and show that our construction improves the recovery threshold of existing schemes in the literature, in particular for larger matrix dimensions and a moderate to large number of colluding workers. In the absence of any security constraints, we show that our construction is optimal in terms of recovery threshold.}, } @article {pmid36819757, year = {2023}, author = {Borhani, F and Shafiepour Motlagh, M and Ehsani, AH and Rashidi, Y and Ghahremanloo, M and Amani, M and Moghimi, A}, title = {Current Status and Future Forecast of Short-lived Climate-Forced Ozone in Tehran, Iran, derived from Ground-Based and Satellite Observations.}, journal = {Water, air, and soil pollution}, volume = {234}, number = {2}, pages = {134}, pmid = {36819757}, issn = {0049-6979}, abstract = {In this study, the distribution and alterations of ozone concentrations in Tehran, Iran, in 2021 were investigated. The impacts of precursors (i.e., CO, NO2, and NO) on ozone were examined using the data collected over 12 months (i.e., January 2021 to December 2021) from 21 stations of the Air Quality Control Company (AQCC). The results of monthly heat mapping of tropospheric ozone concentrations indicated the lowest value in December and the highest value in July. The lowest and highest seasonal concentrations were in winter and summer, respectively. Moreover, there was a negative correlation between ozone and its precursors. The Inverse Distance Weighting (IDW) method was then implemented to obtain air pollution zoning maps. Then, ozone concentration modeled by the IDW method was compared with the average monthly change of total column density of ozone derived from Sentinel-5 satellite data in the Google Earth Engine (GEE) cloud platform. A good agreement was discovered despite the harsh circumstances that both ground-based and satellite measurements were subjected to. The results obtained from both datasets showed that the west of the city of Tehran had the highest averaged O3 concentration. In this study, the status of the concentration of ozone precursors and tropospheric ozone in 2022 was also predicted. For this purpose, the Box-Jenkins Seasonal Autoregressive Integrated Moving Average (SARIMA) approach was implemented to predict the monthly air quality parameters. Overall, it was observed that the SARIMA approach was an efficient tool for forecasting air quality. Finally, the results showed that the trends of ozone obtained from terrestrial and satellite observations throughout 2021 were slightly different due to the contribution of the tropospheric ozone precursor concentration and meteorology conditions.}, } @article {pmid36818051, year = {2023}, author = {Stewart, CA and Costa, CM and Wernert, JA and Snapp-Childs, W and Bland, M and Blood, P and Campbell, T and Couvares, P and Fischer, J and Hancock, DY and Hart, DL and Jankowski, H and Knepper, R and McMullen, DF and Mehringer, S and Pierce, M and Rogers, G and Sinkovits, RS and Towns, J}, title = {Use of accounting concepts to study research: return on investment in XSEDE, a US cyberinfrastructure service.}, journal = {Scientometrics}, volume = {128}, number = {6}, pages = {3225-3255}, pmid = {36818051}, issn = {0138-9130}, abstract = {This paper uses accounting concepts-particularly the concept of Return on Investment (ROI)-to reveal the quantitative value of scientific research pertaining to a major US cyberinfrastructure project (XSEDE-the eXtreme Science and Engineering Discovery Environment). XSEDE provides operational and support services for advanced information technology systems, cloud systems, and supercomputers supporting non-classified US research, with an average budget for XSEDE of US$20M+ per year over the period studied (2014-2021). To assess the financial effectiveness of these services, we calculated a proxy for ROI, and converted quantitative measures of XSEDE service delivery into financial values using costs for service from the US marketplace. We calculated two estimates of ROI: a Conservative Estimate, functioning as a lower bound and using publicly available data for a lower valuation of XSEDE services; and a Best Available Estimate, functioning as a more accurate estimate, but using some unpublished valuation data. Using the largest dataset assembled for analysis of ROI for a cyberinfrastructure project, we found a Conservative Estimate of ROI of 1.87, and a Best Available Estimate of ROI of 3.24. Through accounting methods, we show that XSEDE services offer excellent value to the US government, that the services offered uniquely by XSEDE (that is, not otherwise available for purchase) were the most valuable to the facilitation of US research activities, and that accounting-based concepts hold great value for understanding the mechanisms of scientific research generally.}, } @article {pmid36812648, year = {2022}, author = {Jiang, P and Gao, F and Liu, S and Zhang, S and Zhang, X and Xia, Z and Zhang, W and Jiang, T and Zhu, JL and Zhang, Z and Shu, Q and Snyder, M and Li, J}, title = {Longitudinally tracking personal physiomes for precision management of childhood epilepsy.}, journal = {PLOS digital health}, volume = {1}, number = {12}, pages = {e0000161}, pmid = {36812648}, issn = {2767-3170}, abstract = {Our current understanding of human physiology and activities is largely derived from sparse and discrete individual clinical measurements. To achieve precise, proactive, and effective health management of an individual, longitudinal, and dense tracking of personal physiomes and activities is required, which is only feasible by utilizing wearable biosensors. As a pilot study, we implemented a cloud computing infrastructure to integrate wearable sensors, mobile computing, digital signal processing, and machine learning to improve early detection of seizure onsets in children. We recruited 99 children diagnosed with epilepsy and longitudinally tracked them at single-second resolution using a wearable wristband, and prospectively acquired more than one billion data points. This unique dataset offered us an opportunity to quantify physiological dynamics (e.g., heart rate, stress response) across age groups and to identify physiological irregularities upon epilepsy onset. The high-dimensional personal physiome and activity profiles displayed a clustering pattern anchored by patient age groups. These signatory patterns included strong age and sex-specific effects on varying circadian rhythms and stress responses across major childhood developmental stages. For each patient, we further compared the physiological and activity profiles associated with seizure onsets with the personal baseline and developed a machine learning framework to accurately capture these onset moments. The performance of this framework was further replicated in another independent patient cohort. We next referenced our predictions with the electroencephalogram (EEG) signals on selected patients and demonstrated that our approach could detect subtle seizures not recognized by humans and could detect seizures prior to clinical onset. Our work demonstrated the feasibility of a real-time mobile infrastructure in a clinical setting, which has the potential to be valuable in caring for epileptic patients. Extension of such a system has the potential to be leveraged as a health management device or longitudinal phenotyping tool in clinical cohort studies.}, } @article {pmid36812592, year = {2023}, author = {Tabata, K and Mihara, H and Nanjo, S and Motoo, I and Ando, T and Teramoto, A and Fujinami, H and Yasuda, I}, title = {Artificial intelligence model for analyzing colonic endoscopy images to detect changes associated with irritable bowel syndrome.}, journal = {PLOS digital health}, volume = {2}, number = {2}, pages = {e0000058}, pmid = {36812592}, issn = {2767-3170}, abstract = {IBS is not considered to be an organic disease and usually shows no abnormality on lower gastrointestinal endoscopy, although biofilm formation, dysbiosis, and histological microinflammation have recently been reported in patients with IBS. In this study, we investigated whether an artificial intelligence (AI) colorectal image model can identify minute endoscopic changes, which cannot typically be detected by human investigators, that are associated with IBS. Study subjects were identified based on electronic medical records and categorized as IBS (Group I; n = 11), IBS with predominant constipation (IBS-C; Group C; n = 12), and IBS with predominant diarrhea (IBS-D; Group D; n = 12). The study subjects had no other diseases. Colonoscopy images from IBS patients and from asymptomatic healthy subjects (Group N; n = 88) were obtained. Google Cloud Platform AutoML Vision (single-label classification) was used to construct AI image models to calculate sensitivity, specificity, predictive value, and AUC. A total of 2479, 382, 538, and 484 images were randomly selected for Groups N, I, C and D, respectively. The AUC of the model discriminating between Group N and I was 0.95. Sensitivity, specificity, positive predictive value, and negative predictive value of Group I detection were 30.8%, 97.6%, 66.7%, and 90.2%, respectively. The overall AUC of the model discriminating between Groups N, C, and D was 0.83; sensitivity, specificity, and positive predictive value of Group N were 87.5%, 46.2%, and 79.9%, respectively. Using the image AI model, colonoscopy images of IBS could be discriminated from healthy subjects at AUC 0.95. Prospective studies are needed to further validate whether this externally validated model has similar diagnostic capabilities at other facilities and whether it can be used to determine treatment efficacy.}, } @article {pmid36805192, year = {2023}, author = {Brinkhaus, HO and Rajan, K and Schaub, J and Zielesny, A and Steinbeck, C}, title = {Open data and algorithms for open science in AI-driven molecular informatics.}, journal = {Current opinion in structural biology}, volume = {79}, number = {}, pages = {102542}, doi = {10.1016/j.sbi.2023.102542}, pmid = {36805192}, issn = {1879-033X}, mesh = {*Artificial Intelligence ; *Machine Learning ; Algorithms ; Software ; Informatics ; }, abstract = {Recent years have seen a sharp increase in the development of deep learning and artificial intelligence-based molecular informatics. There has been a growing interest in applying deep learning to several subfields, including the digital transformation of synthetic chemistry, extraction of chemical information from the scientific literature, and AI in natural product-based drug discovery. The application of AI to molecular informatics is still constrained by the fact that most of the data used for training and testing deep learning models are not available as FAIR and open data. As open science practices continue to grow in popularity, initiatives which support FAIR and open data as well as open-source software have emerged. It is becoming increasingly important for researchers in the field of molecular informatics to embrace open science and to submit data and software in open repositories. With the advent of open-source deep learning frameworks and cloud computing platforms, academic researchers are now able to deploy and test their own deep learning models with ease. With the development of new and faster hardware for deep learning and the increasing number of initiatives towards digital research data management infrastructures, as well as a culture promoting open data, open source, and open science, AI-driven molecular informatics will continue to grow. This review examines the current state of open data and open algorithms in molecular informatics, as well as ways in which they could be improved in future.}, } @article {pmid36797269, year = {2023}, author = {Lall, A and Tallur, S}, title = {Deep reinforcement learning-based pairwise DNA sequence alignment method compatible with embedded edge devices.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {2773}, pmid = {36797269}, issn = {2045-2322}, mesh = {Sequence Alignment ; *Algorithms ; *Neural Networks, Computer ; Computers ; DNA ; }, abstract = {Sequence alignment is an essential component of bioinformatics, for identifying regions of similarity that may indicate functional, structural, or evolutionary relationships between the sequences. Genome-based diagnostics relying on DNA sequencing have benefited hugely from the boom in computing power in recent decades, particularly due to cloud-computing and the rise of graphics processing units (GPUs) and other advanced computing platforms for running advanced algorithms. Translating the success of such breakthroughs in diagnostics to affordable solutions for low-cost healthcare requires development of algorithms that can operate on the edge instead of in the cloud, using low-cost and low-power electronic systems such as microcontrollers and field programmable gate arrays (FPGAs). In this work, we present EdgeAlign, a deep reinforcement learning based method for performing pairwise DNA sequence alignment on stand-alone edge devices. EdgeAlign uses deep reinforcement learning to train a deep Q-network (DQN) agent for performing sequence alignment on fixed length sub-sequences, using a sliding window that is scanned over the length of the entire sequence. The hardware resource-consumption for implementing this scheme is thus independent of the lengths of the sequences to be aligned, and is further optimized using a novel AutoML based method for neural network model size reduction. Unlike other algorithms for sequence alignment reported in literature, the model demonstrated in this work is highly compact and deployed on two edge devices (NVIDIA Jetson Nano Developer Kit and Digilent Arty A7-100T, containing Xilinx XC7A35T Artix-7 FPGA) for demonstration of alignment for sequences from the publicly available Influenza sequences at the National Center for Biotechnology Information (NCBI) Virus Data Hub.}, } @article {pmid36793418, year = {2023}, author = {A, A and Dahan, F and Alroobaea, R and Alghamdi, WY and Mustafa Khaja Mohammed, and Hajjej, F and Deema Mohammed Alsekait, and Raahemifar, K}, title = {A smart IoMT based architecture for E-healthcare patient monitoring system using artificial intelligence algorithms.}, journal = {Frontiers in physiology}, volume = {14}, number = {}, pages = {1125952}, pmid = {36793418}, issn = {1664-042X}, abstract = {Generally, cloud computing is integrated with wireless sensor network to enable the monitoring systems and it improves the quality of service. The sensed patient data are monitored with biosensors without considering the patient datatype and this minimizes the work of hospitals and physicians. Wearable sensor devices and the Internet of Medical Things (IoMT) have changed the health service, resulting in faster monitoring, prediction, diagnosis, and treatment. Nevertheless, there have been difficulties that need to be resolved by the use of AI methods. The primary goal of this study is to introduce an AI-powered, IoMT telemedicine infrastructure for E-healthcare. In this paper, initially the data collection from the patient body is made using the sensed devices and the information are transmitted through the gateway/Wi-Fi and is stored in IoMT cloud repository. The stored information is then acquired, preprocessed to refine the collected data. The features from preprocessed data are extracted by means of high dimensional Linear Discriminant analysis (LDA) and the best optimal features are selected using reconfigured multi-objective cuckoo search algorithm (CSA). The prediction of abnormal/normal data is made by using Hybrid ResNet 18 and GoogleNet classifier (HRGC). The decision is then made whether to send alert to hospitals/healthcare personnel or not. If the expected results are satisfactory, the participant information is saved in the internet for later use. At last, the performance analysis is carried so as to validate the efficiency of proposed mechanism.}, } @article {pmid36789435, year = {2023}, author = {Camacho, C and Boratyn, GM and Joukov, V and Alvarez, RV and Madden, TL}, title = {ElasticBLAST: Accelerating Sequence Search via Cloud Computing.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {36789435}, abstract = {BACKGROUND: Biomedical researchers use alignments produced by BLAST (Basic Local Alignment Search Tool) to categorize their query sequences. Producing such alignments is an essential bioinformatics task that is well suited for the cloud. The cloud can perform many calculations quickly as well as store and access large volumes of data. Bioinformaticians can also use it to collaborate with other researchers, sharing their results, datasets and even their pipelines on a common platform.

RESULTS: We present ElasticBLAST, a cloud native application to perform BLAST alignments in the cloud. ElasticBLAST can handle anywhere from a few to many thousands of queries and run the searches on thousands of virtual CPUs (if desired), deleting resources when it is done. It uses cloud native tools for orchestration and can request discounted instances, lowering cloud costs for users. It is supported on Amazon Web Services and Google Cloud Platform. It can search BLAST databases that are user provided or from the National Center for Biotechnology Information.

CONCLUSION: We show that ElasticBLAST is a useful application that can efficiently perform BLAST searches for the user in the cloud, demonstrating that with two examples. At the same time, it hides much of the complexity of working in the cloud, lowering the threshold to move work to the cloud.}, } @article {pmid36789367, year = {2023}, author = {Guo, YG and Yin, Q and Wang, Y and Xu, J and Zhu, L}, title = {Efficiency and optimization of government service resource allocation in a cloud computing environment.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {18}, pmid = {36789367}, issn = {2192-113X}, abstract = {According to the connotation and structure of government service resources, data of government service resources in L city from 2019 to 2021 are used to calculate the efficiency of government service resource allocation in each county and region in different periods, particularly by adding the government cloud platform and cloud computing resources to the government service resource data and applying the data envelopment analysis (DEA) method, which has practical significance for the development and innovation of government services. On this basis, patterns and evolutionary trends of government service resource allocation efficiency in each region during the study period are analyzed and discussed. Results are as follows. i) Overall efficiency level in the allocation of government service resources in L city is not high, showing an increasing annual trend among the high and low staggering. ii) Relative difference of allocation efficiency of government service resources is a common phenomenon of regional development, the existence and evolution of which are the direct or indirect influence and reflection of various aspects, such as economic strength and reform effort. iii) Data analysis for the specific points indicates that increased input does not necessarily lead to increased efficiency, some indicators have insufficient input or redundant output. Therefore, optimization of the physical, human, and financial resource allocation methods; and the intelligent online processing of government services achieved by the adoption of government cloud platform and cloud computing resources are the current objective choices to realize maximum efficiency in the allocation of government service resources.}, } @article {pmid36788990, year = {2023}, author = {Shrestha, S and Stapp, J and Taylor, M and Leach, R and Carreiro, S and Indic, P}, title = {Towards Device Agnostic Detection of Stress and Craving in Patients with Substance Use Disorder.}, journal = {Proceedings of the ... Annual Hawaii International Conference on System Sciences. Annual Hawaii International Conference on System Sciences}, volume = {2023}, number = {}, pages = {3156-3163}, pmid = {36788990}, issn = {1530-1605}, support = {R44 DA046151/DA/NIDA NIH HHS/United States ; }, abstract = {Novel technologies have great potential to improve the treatment of individuals with substance use disorder (SUD) and to reduce the current high rate of relapse (i.e. return to drug use). Wearable sensor-based systems that continuously measure physiology can provide information about behavior and opportunities for real-time interventions. We have previously developed an mHealth system which includes a wearable sensor, a mobile phone app, and a cloud-based server with embedded machine learning algorithms which detect stress and craving. The system functions as a just-in-time intervention tool to help patients de-escalate and as a tool for clinicians to tailor treatment based on stress and craving patterns observed. However, in our pilot work we found that to deploy the system to diverse socioeconomic populations and to increase usability, the system must be able to work efficiently with cost-effective and popular commercial wearable devices. To make the system device agnostic, methods to transform the data from a commercially available wearable for use in algorithms developed from research grade wearable sensor are proposed. The accuracy of these transformations in detecting stress and craving in individuals with SUD is further explored.}, } @article {pmid36785195, year = {2023}, author = {Zhao, Y and Bu, JW and Liu, W and Ji, JH and Yang, QH and Lin, SF}, title = {Implementation of a full-color holographic system using RGB-D salient object detection and divided point cloud gridding.}, journal = {Optics express}, volume = {31}, number = {2}, pages = {1641-1655}, doi = {10.1364/OE.477666}, pmid = {36785195}, issn = {1094-4087}, abstract = {At present, a real objects-based full-color holographic system usually uses a digital single-lens reflex (DSLR) camera array or depth camera to collect data. It then relies on a spatial light modulator to modulate the input light source for the reconstruction of the 3-D scene of the real objects. However, the main challenges the high-quality holographic 3-D display faced were the limitation of generation speed and the low accuracy of the computer-generated holograms. This research generates more effective and accurate point cloud data by developing an RGB-D salient object detection model in the acquisition unit. In addition, a divided point cloud gridding method is proposed to enhance the computing speed of hologram generation. In the RGB channels, we categorized each object point into depth grids with identical depth values. The depth girds are divided into M × N parts, and only the effective parts will be calculated. Compared with traditional methods, the calculation time is dramatically reduced. The feasibility of our proposed approach is established through experiments.}, } @article {pmid36776787, year = {2023}, author = {Zahid, MA and Shafiq, B and Vaidya, J and Afzal, A and Shamail, S}, title = {Collaborative Business Process Fault Resolution in the Services Cloud.}, journal = {IEEE transactions on services computing}, volume = {16}, number = {1}, pages = {162-176}, doi = {10.1109/tsc.2021.3112525}, pmid = {36776787}, issn = {1939-1374}, support = {R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {The emergence of cloud and edge computing has enabled rapid development and deployment of Internet-centric distributed applications. There are many platforms and tools that can facilitate users to develop distributed business process (BP) applications by composing relevant service components in a plug and play manner. However, there is no guarantee that a BP application developed in this way is fault-free. In this paper, we formalize the problem of collaborative BP fault resolution which aims to utilize information from existing fault-free BPs that use similar services to resolve faults in a user developed BP. We present an approach based on association analysis of pairwise transformations between a faulty BP and existing BPs to identify the smallest possible set of transformations to resolve the fault(s) in the user developed BP. An extensive experimental evaluation over both synthetically generated faulty BPs and real BPs developed by users shows the effectiveness of our approach.}, } @article {pmid36761837, year = {2022}, author = {Tercan, B and Qin, G and Kim, TK and Aguilar, B and Phan, J and Longabaugh, W and Pot, D and Kemp, CJ and Chambwe, N and Shmulevich, I}, title = {SL-Cloud: A Cloud-based resource to support synthetic lethal interaction discovery.}, journal = {F1000Research}, volume = {11}, number = {}, pages = {493}, pmid = {36761837}, issn = {2046-1402}, support = {U01 CA217883/CA/NCI NIH HHS/United States ; P01 CA077852/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; HHSN261201400008C/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; *Neoplasms/genetics ; Systems Biology ; Multiomics ; }, abstract = {Synthetic lethal interactions (SLIs), genetic interactions in which the simultaneous inactivation of two genes leads to a lethal phenotype, are promising targets for therapeutic intervention in cancer, as exemplified by the recent success of PARP inhibitors in treating BRCA1/2-deficient tumors. We present SL-Cloud, a new component of the Institute for Systems Biology Cancer Gateway in the Cloud (ISB-CGC), that provides an integrated framework of cloud-hosted data resources and curated workflows to enable facile prediction of SLIs. This resource addresses two main challenges related to SLI inference: the need to wrangle and preprocess large multi-omic datasets and the availability of multiple comparable prediction approaches. SL-Cloud enables customizable computational inference of SLIs and testing of prediction approaches across multiple datasets. We anticipate that cancer researchers will find utility in this tool for discovery of SLIs to support further investigation into potential drug targets for anticancer therapies.}, } @article {pmid36772751, year = {2023}, author = {Gayathri, R and Usharani, S and Mahdal, M and Vezhavendhan, R and Vincent, R and Rajesh, M and Elangovan, M}, title = {Detection and Mitigation of IoT-Based Attacks Using SNMP and Moving Target Defense Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772751}, issn = {1424-8220}, support = {SP2022/60//AEC (Czechia)/ ; }, abstract = {This paper proposes a solution for ensuring the security of IoT devices in the cloud environment by protecting against distributed denial-of-service (DDoS) and false data injection attacks. The proposed solution is based on the integration of simple network management protocol (SNMP), Kullback-Leibler distance (KLD), access control rules (ACL), and moving target defense (MTD) techniques. The SNMP and KLD techniques are used to detect DDoS and false data sharing attacks, while the ACL and MTD techniques are applied to mitigate these attacks by hardening the target and reducing the attack surface. The effectiveness of the proposed framework is validated through experimental simulations on the Amazon Web Service (AWS) platform, which shows a significant reduction in attack probabilities and delays. The integration of IoT and cloud technologies is a powerful combination that can deliver customized and critical solutions to major business vendors. However, ensuring the confidentiality and security of data among IoT devices, storage, and access to the cloud is crucial to maintaining trust among internet users. This paper demonstrates the importance of implementing robust security measures to protect IoT devices in the cloud environment and highlights the potential of the proposed solution in protecting against DDoS and false data injection attacks.}, } @article {pmid36772680, year = {2023}, author = {Bourechak, A and Zedadra, O and Kouahla, MN and Guerrieri, A and Seridi, H and Fortino, G}, title = {At the Confluence of Artificial Intelligence and Edge Computing in IoT-Based Applications: A Review and New Perspectives.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772680}, issn = {1424-8220}, support = {CUP H24I17000070001//Italian MIUR/ ; #101092912//European Union/ ; IR0000013//European Union/ ; }, abstract = {Given its advantages in low latency, fast response, context-aware services, mobility, and privacy preservation, edge computing has emerged as the key support for intelligent applications and 5G/6G Internet of things (IoT) networks. This technology extends the cloud by providing intermediate services at the edge of the network and improving the quality of service for latency-sensitive applications. Many AI-based solutions with machine learning, deep learning, and swarm intelligence have exhibited the high potential to perform intelligent cognitive sensing, intelligent network management, big data analytics, and security enhancement for edge-based smart applications. Despite its many benefits, there are still concerns about the required capabilities of intelligent edge computing to deal with the computational complexity of machine learning techniques for big IoT data analytics. Resource constraints of edge computing, distributed computing, efficient orchestration, and synchronization of resources are all factors that require attention for quality of service improvement and cost-effective development of edge-based smart applications. In this context, this paper aims to explore the confluence of AI and edge in many application domains in order to leverage the potential of the existing research around these factors and identify new perspectives. The confluence of edge computing and AI improves the quality of user experience in emergency situations, such as in the Internet of vehicles, where critical inaccuracies or delays can lead to damage and accidents. These are the same factors that most studies have used to evaluate the success of an edge-based application. In this review, we first provide an in-depth analysis of the state of the art of AI in edge-based applications with a focus on eight application areas: smart agriculture, smart environment, smart grid, smart healthcare, smart industry, smart education, smart transportation, and security and privacy. Then, we present a qualitative comparison that emphasizes the main objective of the confluence, the roles and the use of artificial intelligence at the network edge, and the key enabling technologies for edge analytics. Then, open challenges, future research directions, and perspectives are identified and discussed. Finally, some conclusions are drawn.}, } @article {pmid36772662, year = {2023}, author = {Witanto, EN and Stanley, B and Lee, SG}, title = {Distributed Data Integrity Verification Scheme in Multi-Cloud Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772662}, issn = {1424-8220}, support = {2018R1D1A1B07047601//National Research Foundation of Korea/ ; }, abstract = {Most existing data integrity auditing protocols in cloud storage rely on proof of probabilistic data possession. Consequently, the sampling rate of data integrity verification is low to prevent expensive costs to the auditor. However, in the case of a multi-cloud environment, the amount of stored data will be huge. As a result, a higher sampling rate is needed. It will also have an increased cost for the auditor as a consequence. Therefore, this paper proposes a blockchain-based distributed data integrity verification protocol in multi-cloud environments that enables data verification using multi-verifiers. The proposed scheme aims to increase the sampling rate of data verification without increasing the costs significantly. The performance analysis shows that this protocol achieved a lower time consumption required for verification tasks using multi-verifiers than a single verifier. Furthermore, utilizing multi-verifiers also decreases each verifier's computation and communication costs.}, } @article {pmid36772584, year = {2023}, author = {Alexandrescu, A}, title = {Parallel Processing of Sensor Data in a Distributed Rules Engine Environment through Clustering and Data Flow Reconfiguration.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772584}, issn = {1424-8220}, abstract = {An emerging reality is the development of smart buildings and cities, which improve residents' comfort. These environments employ multiple sensor networks, whose data must be acquired and processed in real time by multiple rule engines, which trigger events that enable specific actuators. The problem is how to handle those data in a scalable manner by using multiple processing instances to maximize the system throughput. This paper considers the types of sensors that are used in these scenarios and proposes a model for abstracting the information flow as a weighted dependency graph. Two parallel computing methods are then proposed for obtaining an efficient data flow: a variation of the parallel k-means clustering algorithm and a custom genetic algorithm. Simulation results show that the two proposed flow reconfiguration algorithms reduce the rule processing times and provide an efficient solution for increasing the scalability of the considered environment. Another aspect being discussed is using an open-source cloud solution to manage the system and how to use the two algorithms to increase efficiency. These methods allow for a seamless increase in the number of sensors in the environment by making smart use of the available resources.}, } @article {pmid36772562, year = {2023}, author = {Kim, SH and Kim, T}, title = {Local Scheduling in KubeEdge-Based Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772562}, issn = {1424-8220}, support = {NRF-2022R1I1A3072355//National Research Foundation of Korea/ ; }, abstract = {KubeEdge is an open-source platform that orchestrates containerized Internet of Things (IoT) application services in IoT edge computing environments. Based on Kubernetes, it supports heterogeneous IoT device protocols on edge nodes and provides various functions necessary to build edge computing infrastructure, such as network management between cloud and edge nodes. However, the resulting cloud-based systems are subject to several limitations. In this study, we evaluated the performance of KubeEdge in terms of the computational resource distribution and delay between edge nodes. We found that forwarding traffic between edge nodes degrades the throughput of clusters and causes service delay in edge computing environments. Based on these results, we proposed a local scheduling scheme that handles user traffic locally at each edge node. The performance evaluation results revealed that local scheduling outperforms the existing load-balancing algorithm in the edge computing environment.}, } @article {pmid36772506, year = {2023}, author = {Wang, M and Li, C and Wang, X and Piao, Z and Yang, Y and Dai, W and Zhang, Q}, title = {Research on Comprehensive Evaluation and Early Warning of Transmission Lines' Operation Status Based on Dynamic Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772506}, issn = {1424-8220}, support = {20220201075GX.//Science and Technology Department Plan Project of Jilin Province of China/ ; }, abstract = {The current methods for evaluating the operating condition of electricity transmission lines (ETLs) and providing early warning have several problems, such as the low correlation of data, ignoring the influence of seasonal factors, and strong subjectivity. This paper analyses the sensitive factors that influence dynamic key evaluation indices such as grounding resistance, sag, and wire corrosion, establishes the evaluation criteria of the ETL operation state, and proposes five ETL status levels and seven principles for selecting evaluation indices. Nine grade I evaluation indices and twenty-nine grade II evaluation indices, including passageway and meteorological environments, are determined. The cloud model theory is embedded and used to propose a warning technology for the operation state of ETLs based on inspection defect parameters and the cloud model. Combined with the inspection defect parameters of a line in the Baicheng district of Jilin Province and the critical evaluation index data such as grounding resistance, sag, and wire corrosion, which are used to calculate the timeliness of the data, the solid line is evaluated. The research shows that the dynamic evaluation model is correct and that the ETL status evaluation and early warning method have reasonable practicability.}, } @article {pmid36772424, year = {2023}, author = {Mangalampalli, S and Karri, GR and Elngar, AA}, title = {An Efficient Trust-Aware Task Scheduling Algorithm in Cloud Computing Using Firefly Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772424}, issn = {1424-8220}, abstract = {Task scheduling in the cloud computing paradigm poses a challenge for researchers as the workloads that come onto cloud platforms are dynamic and heterogeneous. Therefore, scheduling these heterogeneous tasks to the appropriate virtual resources is a huge challenge. The inappropriate assignment of tasks to virtual resources leads to the degradation of the quality of services and thereby leads to a violation of the SLA metrics, ultimately leading to the degradation of trust in the cloud provider by the cloud user. Therefore, to preserve trust in the cloud provider and to improve the scheduling process in the cloud paradigm, we propose an efficient task scheduling algorithm that considers the priorities of tasks as well as virtual machines, thereby scheduling tasks accurately to appropriate VMs. This scheduling algorithm is modeled using firefly optimization. The workload for this approach is considered by using fabricated datasets with different distributions and the real-time worklogs of HPC2N and NASA were considered. This algorithm was implemented by using a Cloudsim simulation environment and, finally, our proposed approach is compared over the baseline approaches of ACO, PSO, and the GA. The simulation results revealed that our proposed approach has shown a significant impact over the baseline approaches by minimizing the makespan, availability, success rate, and turnaround efficiency.}, } @article {pmid36772335, year = {2023}, author = {Markus, A and Al-Haboobi, A and Kecskemeti, G and Kertesz, A}, title = {Simulating IoT Workflows in DISSECT-CF-Fog.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772335}, issn = {1424-8220}, support = {UNKP-22-3//New National Excellence Program of the Ministry for Innovation and Technology from the source of the National Research, Development and Innovation Fund/ ; OTKA FK 131793//Hungarian Scientific Research Fund/ ; TKP2021-NVA-09//Ministry of Innovation and Technology of Hungary from the National Research, Development and Innovation Fund/ ; }, abstract = {The modelling of IoT applications utilising the resources of cloud and fog computing is not straightforward because they have to support various trigger-based events that make human life easier. The sequence of tasks, such as performing a service call, receiving a data packet in the form of a message sent by an IoT device, and managing actuators or executing a computational task on a virtual machine, are often associated with and composed of IoT workflows. The development and deployment of such IoT workflows and their management systems in real life, including communication and network operations, can be complicated due to high operation costs and access limitations. Therefore, simulation solutions are often applied for such purposes. In this paper, we introduce a novel simulator extension of the DISSECT-CF-Fog simulator that leverages the workflow scheduling and its execution capabilities to model real-life IoT use cases. We also show that state-of-the-art simulators typically omit the IoT factor in the case of the scientific workflow evaluation. Therefore, we present a scalability study focusing on scientific workflows and on the interoperability of scientific and IoT workflows in DISSECT-CF-Fog.}, } @article {pmid36772304, year = {2023}, author = {Yu, L and He, M and Liang, H and Xiong, L and Liu, Y}, title = {A Blockchain-Based Authentication and Authorization Scheme for Distributed Mobile Cloud Computing Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772304}, issn = {1424-8220}, support = {2019M663475//China Postdoctoral Science Foundation/ ; 2020JDRC0100//Science and Technology Fund of Sichuan Province/ ; 2021-YF08-00151-GX//Chengdu Science and Technology Program under grant/ ; }, abstract = {Authentication and authorization constitute the essential security component, access control, for preventing unauthorized access to cloud services in mobile cloud computing (MCC) environments. Traditional centralized access control models relying on third party trust face a critical challenge due to a high trust cost and single point of failure. Blockchain can achieve the distributed trust for access control designs in a mutual untrustworthy scenario, but it also leads to expensive storage overhead. Considering the above issues, this work constructed an authentication and authorization scheme based on blockchain that can provide a dynamic update of access permissions by utilizing the smart contract. Compared with the conventional authentication scheme, the proposed scheme integrates an extra authorization function without additional computation and communication costs in the authentication phase. To improve the storage efficiency and system scalability, only one transaction is required to be stored in blockchain to record a user's access privileges on different service providers (SPs). In addition, mobile users in the proposed scheme are able to register with an arbitrary SP once and then utilize the same credential to access different SPs with different access levels. The security analysis indicates that the proposed scheme is secure under the random oracle model. The performance analysis clearly shows that the proposed scheme possesses superior computation and communication efficiencies and requires a low blockchain storage capacity for accomplishing user registration and updates.}, } @article {pmid36772101, year = {2023}, author = {Yang, J and Zheng, J and Wang, H and Li, J and Sun, H and Han, W and Jiang, N and Tan, YA}, title = {Edge-Cloud Collaborative Defense against Backdoor Attacks in Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {3}, pages = {}, pmid = {36772101}, issn = {1424-8220}, support = {62072037//National Natural Science Foundation of China/ ; }, abstract = {Federated learning has a distributed collaborative training mode, widely used in IoT scenarios of edge computing intelligent services. However, federated learning is vulnerable to malicious attacks, mainly backdoor attacks. Once an edge node implements a backdoor attack, the embedded backdoor mode will rapidly expand to all relevant edge nodes, which poses a considerable challenge to security-sensitive edge computing intelligent services. In the traditional edge collaborative backdoor defense method, only the cloud server is trusted by default. However, edge computing intelligent services have limited bandwidth and unstable network connections, which make it impossible for edge devices to retrain their models or update the global model. Therefore, it is crucial to detect whether the data of edge nodes are polluted in time. This paper proposes a layered defense framework for edge-computing intelligent services. At the edge, we combine the gradient rising strategy and attention self-distillation mechanism to maximize the correlation between edge device data and edge object categories and train a clean model as much as possible. On the server side, we first implement a two-layer backdoor detection mechanism to eliminate backdoor updates and use the attention self-distillation mechanism to restore the model performance. Our results show that the two-stage defense mode is more suitable for the security protection of edge computing intelligent services. It can not only weaken the effectiveness of the backdoor at the edge end but also conduct this defense at the server end, making the model more secure. The precision of our model on the main task is almost the same as that of the clean model.}, } @article {pmid36770943, year = {2023}, author = {Kumar, A and Arantes, PR and Saha, A and Palermo, G and Wong, BM}, title = {GPU-Enhanced DFTB Metadynamics for Efficiently Predicting Free Energies of Biochemical Systems.}, journal = {Molecules (Basel, Switzerland)}, volume = {28}, number = {3}, pages = {}, pmid = {36770943}, issn = {1420-3049}, support = {R01 GM141329/GM/NIGMS NIH HHS/United States ; CHE-2144823//National Science Foundation/ ; CHE-2028365//National Science Foundation/ ; R01GM141329/NH/NIH HHS/United States ; }, abstract = {Metadynamics calculations of large chemical systems with ab initio methods are computationally prohibitive due to the extensive sampling required to simulate the large degrees of freedom in these systems. To address this computational bottleneck, we utilized a GPU-enhanced density functional tight binding (DFTB) approach on a massively parallelized cloud computing platform to efficiently calculate the thermodynamics and metadynamics of biochemical systems. To first validate our approach, we calculated the free-energy surfaces of alanine dipeptide and showed that our GPU-enhanced DFTB calculations qualitatively agree with computationally-intensive hybrid DFT benchmarks, whereas classical force fields give significant errors. Most importantly, we show that our GPU-accelerated DFTB calculations are significantly faster than previous approaches by up to two orders of magnitude. To further extend our GPU-enhanced DFTB approach, we also carried out a 10 ns metadynamics simulation of remdesivir, which is prohibitively out of reach for routine DFT-based metadynamics calculations. We find that the free-energy surfaces of remdesivir obtained from DFTB and classical force fields differ significantly, where the latter overestimates the internal energy contribution of high free-energy states. Taken together, our benchmark tests, analyses, and extensions to large biochemical systems highlight the use of GPU-enhanced DFTB simulations for efficiently predicting the free-energy surfaces/thermodynamics of large biochemical systems.}, } @article {pmid36768346, year = {2023}, author = {Sarkar, C and Das, B and Rawat, VS and Wahlang, JB and Nongpiur, A and Tiewsoh, I and Lyngdoh, NM and Das, D and Bidarolli, M and Sony, HT}, title = {Artificial Intelligence and Machine Learning Technology Driven Modern Drug Discovery and Development.}, journal = {International journal of molecular sciences}, volume = {24}, number = {3}, pages = {}, pmid = {36768346}, issn = {1422-0067}, mesh = {Humans ; *Artificial Intelligence ; *Machine Learning ; Neural Networks, Computer ; Drug Discovery/methods ; Technology ; Drug Design ; }, abstract = {The discovery and advances of medicines may be considered as the ultimate relevant translational science effort that adds to human invulnerability and happiness. But advancing a fresh medication is a quite convoluted, costly, and protracted operation, normally costing USD ~2.6 billion and consuming a mean time span of 12 years. Methods to cut back expenditure and hasten new drug discovery have prompted an arduous and compelling brainstorming exercise in the pharmaceutical industry. The engagement of Artificial Intelligence (AI), including the deep-learning (DL) component in particular, has been facilitated by the employment of classified big data, in concert with strikingly reinforced computing prowess and cloud storage, across all fields. AI has energized computer-facilitated drug discovery. An unrestricted espousing of machine learning (ML), especially DL, in many scientific specialties, and the technological refinements in computing hardware and software, in concert with various aspects of the problem, sustain this progress. ML algorithms have been extensively engaged for computer-facilitated drug discovery. DL methods, such as artificial neural networks (ANNs) comprising multiple buried processing layers, have of late seen a resurgence due to their capability to power automatic attribute elicitations from the input data, coupled with their ability to obtain nonlinear input-output pertinencies. Such features of DL methods augment classical ML techniques which bank on human-contrived molecular descriptors. A major part of the early reluctance concerning utility of AI in pharmaceutical discovery has begun to melt, thereby advancing medicinal chemistry. AI, along with modern experimental technical knowledge, is anticipated to invigorate the quest for new and improved pharmaceuticals in an expeditious, economical, and increasingly compelling manner. DL-facilitated methods have just initiated kickstarting for some integral issues in drug discovery. Many technological advances, such as "message-passing paradigms", "spatial-symmetry-preserving networks", "hybrid de novo design", and other ingenious ML exemplars, will definitely come to be pervasively widespread and help dissect many of the biggest, and most intriguing inquiries. Open data allocation and model augmentation will exert a decisive hold during the progress of drug discovery employing AI. This review will address the impending utilizations of AI to refine and bolster the drug discovery operation.}, } @article {pmid36763944, year = {2023}, author = {Shahinyan, GK and Hu, MY and Jiang, T and Osadchiy, V and Sigalos, JT and Mills, JN and Kachroo, N and Eleswarapu, SV}, title = {Cannabis and male sexual health: contemporary qualitative review and insight into perspectives of young men on the internet.}, journal = {Sexual medicine reviews}, volume = {11}, number = {2}, pages = {139-150}, doi = {10.1093/sxmrev/qeac010}, pmid = {36763944}, issn = {2050-0521}, mesh = {Humans ; Male ; United States ; *Sexual Health ; *Cannabis/adverse effects ; Quality of Life ; Men's Health ; Internet ; }, abstract = {INTRODUCTION: Cannabis use is increasing across the United States, yet its short- and long-term effects on sexual function remain controversial. Currently, there is a paucity of studies exploring the relationship between cannabis and men's health.

OBJECTIVES: To summarize the available literature on cannabis and men's health and provide insight into lay perceptions of this topic.

METHODS: We performed a qualitative PubMed review of the existing literature on cannabis and men's health according to the PRISMA guidelines. Separately, we analyzed relevant themes in online men's health forums. We utilized a Google cloud-based platform (BigQuery) to extract relevant posts from 5 men's health Reddit forums from August 2018 to August 2019. We conducted a qualitative thematic analysis of the posts and quantitatively analyzed them using natural language processing and a meaning extraction method with principal component analysis.

RESULTS: Our literature review revealed a mix of animal and human studies demonstrating the negative effects of cannabis on semen parameters and varying effects on erectile function and hormone levels. In our analysis of 372 686 Reddit posts, 1190 (0.3%) included relevant discussion on cannabis and men's health. An overall 272 posts were manually analyzed, showing that online discussions revolve around seeking answers and sharing the effects of cannabis on various aspects of sexual health and quality of life, often with conflicting experiences. Quantitative analysis revealed 1 thematic cluster related to cannabis, insecurity, and mental/physical health.

CONCLUSIONS: There is a limited number of quality human studies investigating the effects of cannabis on men's health. Men online are uncertain about how cannabis affects their sexual health and seek more information. As the prevalence of cannabis use increases, so does the need for research in this area.}, } @article {pmid36757918, year = {2023}, author = {Pollak, DJ and Chawla, G and Andreev, A and Prober, DA}, title = {First steps into the cloud: Using Amazon data storage and computing with Python notebooks.}, journal = {PloS one}, volume = {18}, number = {2}, pages = {e0278316}, pmid = {36757918}, issn = {1932-6203}, support = {R35 NS122172/NS/NINDS NIH HHS/United States ; UF1 NS126562/NS/NINDS NIH HHS/United States ; T32 NS105595/NS/NINDS NIH HHS/United States ; }, mesh = {Animals ; *Zebrafish ; *Software ; Programming Languages ; Information Storage and Retrieval ; Cloud Computing ; }, abstract = {With the oncoming age of big data, biologists are encountering more use cases for cloud-based computing to streamline data processing and storage. Unfortunately, cloud platforms are difficult to learn, and there are few resources for biologists to demystify them. We have developed a guide for experimental biologists to set up cloud processing on Amazon Web Services to cheaply outsource data processing and storage. Here we provide a guide for setting up a computing environment in the cloud and showcase examples of using Python and Julia programming languages. We present example calcium imaging data in the zebrafish brain and corresponding analysis using suite2p software. Tools for budget and user management are further discussed in the attached protocol. Using this guide, researchers with limited coding experience can get started with cloud-based computing or move existing coding infrastructure into the cloud environment.}, } @article {pmid36754821, year = {2023}, author = {Bosia, F and Zheng, P and Vaucher, A and Weymuth, T and Dral, PO and Reiher, M}, title = {Ultra-fast semi-empirical quantum chemistry for high-throughput computational campaigns with Sparrow.}, journal = {The Journal of chemical physics}, volume = {158}, number = {5}, pages = {054118}, doi = {10.1063/5.0136404}, pmid = {36754821}, issn = {1089-7690}, abstract = {Semi-empirical quantum chemical approaches are known to compromise accuracy for the feasibility of calculations on huge molecules. However, the need for ultrafast calculations in interactive quantum mechanical studies, high-throughput virtual screening, and data-driven machine learning has shifted the emphasis toward calculation runtimes recently. This comes with new constraints for the software implementation as many fast calculations would suffer from a large overhead of the manual setup and other procedures that are comparatively fast when studying a single molecular structure, but which become prohibitively slow for high-throughput demands. In this work, we discuss the effect of various well-established semi-empirical approximations on calculation speed and relate this to data transfer rates from the raw-data source computer to the results of the visualization front end. For the former, we consider desktop computers, local high performance computing, and remote cloud services in order to elucidate the effect on interactive calculations, for web and cloud interfaces in local applications, and in world-wide interactive virtual sessions. The models discussed in this work have been implemented into our open-source software SCINE Sparrow.}, } @article {pmid36753980, year = {2023}, author = {Cubillos, LH and Augenstein, TE and Ranganathan, R and Krishnan, C}, title = {Breaking the barriers to designing online experiments: A novel open-source platform for supporting procedural skill learning experiments.}, journal = {Computers in biology and medicine}, volume = {154}, number = {}, pages = {106627}, doi = {10.1016/j.compbiomed.2023.106627}, pmid = {36753980}, issn = {1879-0534}, mesh = {Humans ; *Psychomotor Performance ; *Motor Skills ; Learning ; Hand ; }, abstract = {BACKGROUND: Motor learning experiments are typically performed in laboratory environments, which can be time-consuming and require dedicated equipment/personnel, thus limiting the ability to gather data from large samples. To address this problem, some researchers have transitioned to unsupervised online experiments, showing advantages in participant recruitment without losing validity. However, most online platforms require coding experience or time-consuming setups to create and run experiments, limiting their usage across the field.

METHOD: To tackle this issue, an open-source web-based platform was developed (https://experiments.neurro-lab.engin.umich.edu/) to create, run, and manage procedural skill learning experiments without coding or setup requirements. The feasibility of the platform and the comparability of the results between supervised (n = 17) and unsupervised (n = 24) were tested in 41 naive right-handed participants using an established sequential finger tapping task. The study also tested if a previously reported rapid form of offline consolidation (i.e., microscale learning) in procedural skill learning could be replicated with the developed platform and evaluated the extent of interlimb transfer associated with the finger tapping task.

RESULTS: The results indicated that the performance metrics were comparable between the supervised and unsupervised groups (all p's > 0.05). The learning curves, mean tapping speeds, and micro-scale learning were similar to previous studies. Training led to significant improvements in mean tapping speed (2.22 ± 1.48 keypresses/s, p < 0.001) and a significant interlimb transfer of learning (1.22 ± 1.43 keypresses/s, p < 0.05).

CONCLUSIONS: The results show that the presented platform may serve as a valuable tool for conducting online procedural skill-learning experiments.}, } @article {pmid36750410, year = {2023}, author = {Raucci, U and Weir, H and Sakshuwong, S and Seritan, S and Hicks, CB and Vannucci, F and Rea, F and Martínez, TJ}, title = {Interactive Quantum Chemistry Enabled by Machine Learning, Graphical Processing Units, and Cloud Computing.}, journal = {Annual review of physical chemistry}, volume = {74}, number = {}, pages = {313-336}, doi = {10.1146/annurev-physchem-061020-053438}, pmid = {36750410}, issn = {1545-1593}, abstract = {Modern quantum chemistry algorithms are increasingly able to accurately predict molecular properties that are useful for chemists in research and education. Despite this progress, performing such calculations is currently unattainable to the wider chemistry community, as they often require domain expertise, computer programming skills, and powerful computer hardware. In this review, we outline methods to eliminate these barriers using cutting-edge technologies. We discuss the ingredients needed to create accessible platforms that can compute quantum chemistry properties in real time, including graphical processing units-accelerated quantum chemistry in the cloud, artificial intelligence-driven natural molecule input methods, and extended reality visualization. We end by highlighting a series of exciting applications that assemble these components to create uniquely interactive platforms for computing and visualizing spectra, 3D structures, molecular orbitals, and many other chemical properties.}, } @article {pmid36747613, year = {2024}, author = {Koenig, Z and Yohannes, MT and Nkambule, LL and Zhao, X and Goodrich, JK and Kim, HA and Wilson, MW and Tiao, G and Hao, SP and Sahakian, N and Chao, KR and Walker, MA and Lyu, Y and , and Rehm, HL and Neale, BM and Talkowski, ME and Daly, MJ and Brand, H and Karczewski, KJ and Atkinson, EG and Martin, AR}, title = {A harmonized public resource of deeply sequenced diverse human genomes.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, pmid = {36747613}, support = {P30 DK043351/DK/NIDDK NIH HHS/United States ; R00 MH117229/MH/NIMH NIH HHS/United States ; R01 DE031261/DE/NIDCR NIH HHS/United States ; R01 MH115957/MH/NIMH NIH HHS/United States ; }, abstract = {Underrepresented populations are often excluded from genomic studies due in part to a lack of resources supporting their analyses. The 1000 Genomes Project (1kGP) and Human Genome Diversity Project (HGDP), which have recently been sequenced to high coverage, are valuable genomic resources because of the global diversity they capture and their open data sharing policies. Here, we harmonized a high quality set of 4,094 whole genomes from HGDP and 1kGP with data from the Genome Aggregation Database (gnomAD) and identified over 153 million high-quality SNVs, indels, and SVs. We performed a detailed ancestry analysis of this cohort, characterizing population structure and patterns of admixture across populations, analyzing site frequency spectra, and measuring variant counts at global and subcontinental levels. We also demonstrate substantial added value from this dataset compared to the prior versions of the component resources, typically combined via liftover and variant intersection; for example, we catalog millions of new genetic variants, mostly rare, compared to previous releases. In addition to unrestricted individual-level public release, we provide detailed tutorials for conducting many of the most common quality control steps and analyses with these data in a scalable cloud-computing environment and publicly release this new phased joint callset for use as a haplotype resource in phasing and imputation pipelines. This jointly called reference panel will serve as a key resource to support research of diverse ancestry populations.}, } @article {pmid36733938, year = {2023}, author = {Healthcare Engineering, JO}, title = {Retracted: Discussion on Health Service System of Mobile Medical Institutions Based on Internet of Things and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2023}, number = {}, pages = {9892481}, pmid = {36733938}, issn = {2040-2309}, abstract = {[This retracts the article DOI: 10.1155/2022/5235349.].}, } @article {pmid36723167, year = {2023}, author = {Klukowski, P and Riek, R and Güntert, P}, title = {NMRtist: an online platform for automated biomolecular NMR spectra analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {39}, number = {2}, pages = {}, pmid = {36723167}, issn = {1367-4811}, support = {891690//European Union/ ; }, mesh = {Humans ; Nuclear Magnetic Resonance, Biomolecular ; *Software ; *Proteins/chemistry ; Magnetic Resonance Spectroscopy ; Magnetic Resonance Imaging ; }, abstract = {SUMMARY: We present NMRtist, an online platform that combines deep learning, large-scale optimization and cloud computing to automate protein NMR spectra analysis. Our website provides virtual storage for NMR spectra deposition together with a set of applications designed for automated peak picking, chemical shift assignment and protein structure determination. The system can be used by non-experts and allows protein assignments and structures to be determined within hours after the measurements, strictly without any human intervention.

NMRtist is freely available to non-commercial users at https://nmrtist.org.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid36721327, year = {2023}, author = {Batorsky, A and Bowden, AE and Darwin, J and Fields, AJ and Greco, CM and Harris, RE and Hue, TF and Kakyomya, J and Mehling, W and O'Neill, C and Patterson, CG and Piva, SR and Sollmann, N and Toups, V and Wasan, AD and Wasserman, R and Williams, DA and Vo, NV and Psioda, MA and McCumber, M}, title = {The Back Pain Consortium (BACPAC) Research Program Data Harmonization: Rationale for Data Elements and Standards.}, journal = {Pain medicine (Malden, Mass.)}, volume = {24}, number = {Suppl 1}, pages = {S95-S104}, doi = {10.1093/pm/pnad008}, pmid = {36721327}, issn = {1526-4637}, support = {UH2 AR076719/AR/NIAMS NIH HHS/United States ; UH3 AR076719/AR/NIAMS NIH HHS/United States ; UH3 AR076723/AR/NIAMS NIH HHS/United States ; 1UH2AR076731-01/NH/NIH HHS/United States ; }, mesh = {Humans ; *Low Back Pain/therapy ; Outcome Assessment, Health Care ; Research Design ; }, abstract = {OBJECTIVE: One aim of the Back Pain Consortium (BACPAC) Research Program is to develop an integrated model of chronic low back pain that is informed by combined data from translational research and clinical trials. We describe efforts to maximize data harmonization and accessibility to facilitate Consortium-wide analyses.

METHODS: Consortium-wide working groups established harmonized data elements to be collected in all studies and developed standards for tabular and nontabular data (eg, imaging and omics). The BACPAC Data Portal was developed to facilitate research collaboration across the Consortium.

RESULTS: Clinical experts developed the BACPAC Minimum Dataset with required domains and outcome measures to be collected by use of questionnaires across projects. Other nonrequired domain-specific measures are collected by multiple studies. To optimize cross-study analyses, a modified data standard was developed on the basis of the Clinical Data Interchange Standards Consortium Study Data Tabulation Model to harmonize data structures and facilitate integration of baseline characteristics, participant-reported outcomes, chronic low back pain treatments, clinical exam, functional performance, psychosocial characteristics, quantitative sensory testing, imaging, and biomechanical data. Standards to accommodate the unique features of chronic low back pain data were adopted. Research units submit standardized study data to the BACPAC Data Portal, developed as a secure cloud-based central data repository and computing infrastructure for researchers to access and conduct analyses on data collected by or acquired for BACPAC.

CONCLUSIONS: BACPAC harmonization efforts and data standards serve as an innovative model for data integration that could be used as a framework for other consortia with multiple, decentralized research programs.}, } @article {pmid36720454, year = {2023}, author = {Firoz, A and Ravanan, P and Saha, P and Prashar, T and Talwar, P}, title = {Genome-wide screening and identification of potential kinases involved in endoplasmic reticulum stress responses.}, journal = {Life sciences}, volume = {317}, number = {}, pages = {121452}, doi = {10.1016/j.lfs.2023.121452}, pmid = {36720454}, issn = {1879-0631}, mesh = {Animals ; Humans ; Mice ; Rats ; Base Sequence ; *DNA-Binding Proteins/genetics ; *Endoplasmic Reticulum/metabolism ; Endoplasmic Reticulum Stress ; HeLa Cells ; Mammals/metabolism ; Transcription Factors/metabolism ; Phosphotransferases ; }, abstract = {AIM: This study aims to identify endoplasmic reticulum stress response elements (ERSE) in the human genome to explore potentially regulated genes, including kinases and transcription factors, involved in the endoplasmic reticulum (ER) stress and its related diseases.

MATERIALS AND METHODS: Python-based whole genome screening of ERSE was performed using the Amazon Web Services elastic computing system. The Kinome database was used to filter out the kinases from the extracted list of ERSE-related genes. Additionally, network analysis and genome enrichment were achieved using NDEx, the Network and Data Exchange software, and web-based computational tools. To validate the gene expression, quantitative RT-PCR was performed for selected kinases from the list by exposing the HeLa cells to tunicamycin and brefeldin, ER stress inducers, for various time points.

KEY FINDINGS: The overall number of ERSE-associated genes follows a similar pattern in humans, mice, and rats, demonstrating the ERSE's conservation in mammals. A total of 2705 ERSE sequences were discovered in the human genome (GRCh38.p14), from which we identified 36 kinases encoding genes. Gene expression analysis has shown a significant change in the expression of selected genes under ER stress conditions in HeLa cells, supporting our finding.

SIGNIFICANCE: In this study, we have introduced a rapid method using Amazon cloud-based services for genome-wide screening of ERSE sequences from both positive and negative strands, which covers the entire genome reference sequences. Approximately 10 % of human protein-protein interactomes were found to be associated with ERSE-related genes. Our study also provides a rich resource of human ER stress-response-based protein networks and transcription factor interactions and a reference point for future research aiming at targeted therapeutics.}, } @article {pmid36717471, year = {2023}, author = {Nandasena, WDKV and Brabyn, L and Serrao-Neumann, S}, title = {Monitoring invasive pines using remote sensing: a case study from Sri Lanka.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {2}, pages = {347}, pmid = {36717471}, issn = {1573-2959}, mesh = {*Remote Sensing Technology/methods ; Sri Lanka ; Conservation of Natural Resources/methods ; Environmental Monitoring/methods ; Ecosystem ; *Pinus ; }, abstract = {Production plantation forestry has many economic benefits but can also have negative environmental impacts such as the spreading of invasive pines to native forest habitats. Monitoring forest for the presence of invasive pines helps with the management of this issue. However, detection of vegetation change over a large time period is difficult due to changes in image quality and sensor types, and by the spectral similarity of evergreen species and frequent cloud cover in the study area. The costs of high-resolution images are also prohibitive for routine monitoring in resource-constrained countries. This research investigated the use of remote sensing to identify the spread of Pinus caribaea over a 21-year period (2000 to 2021) in Belihuloya, Sri Lanka, using Landsat images. It applied a range of techniques to produce cloud free images, extract vegetation features, and improve vegetation classification accuracy, followed by the use of Geographical Information System to spatially analyze the spread of invasive pines. The results showed most invading pines were found within 100 m of the pine plantations' borders where broadleaved forests and grasslands are vulnerable to invasion. However, the extent of invasive pine had an overall decline of 4 ha over the 21 years. The study confirmed that remote sensing combined with spatial analysis are effective tools for monitoring invasive pines in countries with limited resources. This study also provides information to conservationists and forest managers to conduct strategic planning for sustainable forest management and conservation in Sri Lanka.}, } @article {pmid36714386, year = {2023}, author = {Patel, YS and Bedi, J}, title = {MAG-D: A multivariate attention network based approach for cloud workload forecasting.}, journal = {Future generations computer systems : FGCS}, volume = {142}, number = {}, pages = {376-392}, pmid = {36714386}, issn = {0167-739X}, abstract = {The Coronavirus pandemic and the work-from-home have drastically changed the working style and forced us to rapidly shift towards cloud-based platforms & services for seamless functioning. The pandemic has accelerated a permanent shift in cloud migration. It is estimated that over 95% of digital workloads will reside in cloud-native platforms. Real-time workload forecasting and efficient resource management are two critical challenges for cloud service providers. As cloud workloads are highly volatile and chaotic due to their time-varying nature; thus classical machine learning-based prediction models failed to acquire accurate forecasting. Recent advances in deep learning have gained massive popularity in forecasting highly nonlinear cloud workloads; however, they failed to achieve excellent forecasting outcomes. Consequently, demands for designing more accurate forecasting algorithms exist. Therefore, in this work, we propose 'MAG-D', a Multivariate Attention and Gated recurrent unit based Deep learning approach for Cloud workload forecasting in data centers. We performed an extensive set of experiments on the Google cluster traces, and we confirm that MAG-DL exploits the long-range nonlinear dependencies of cloud workload and improves the prediction accuracy on average compared to the recent techniques applying hybrid methods using Long Short Term Memory Network (LSTM), Convolutional Neural Network (CNN), Gated Recurrent Units (GRU), and Bidirectional Long Short Term Memory Network (BiLSTM).}, } @article {pmid36712619, year = {2023}, author = {He, R and Xie, W and Wu, B and Brandon, NP and Liu, X and Li, X and Yang, S}, title = {Towards interactional management for power batteries of electric vehicles.}, journal = {RSC advances}, volume = {13}, number = {3}, pages = {2036-2056}, pmid = {36712619}, issn = {2046-2069}, abstract = {With the ever-growing digitalization and mobility of electric transportation, lithium-ion batteries are facing performance and safety issues with the appearance of new materials and the advance of manufacturing techniques. This paper presents a systematic review of burgeoning multi-scale modelling and design for battery efficiency and safety management. The rise of cloud computing provides a tactical solution on how to efficiently achieve the interactional management and control of power batteries based on the battery system and traffic big data. The potential of selecting adaptive strategies in emerging digital management is covered systematically from principles and modelling, to machine learning. Specifically, multi-scale optimization is expounded in terms of materials, structures, manufacturing and grouping. The progress on modelling, state estimation and management methods is summarized and discussed in detail. Moreover, this review demonstrates the innovative progress of machine learning based data analysis in battery research so far, laying the foundation for future cloud and digital battery management to develop reliable onboard applications.}, } @article {pmid36711159, year = {2023}, author = {D'Souza, G and Reddy, NVS and Manjunath, KN}, title = {Localization of lung abnormalities on chest X-rays using self-supervised equivariant attention.}, journal = {Biomedical engineering letters}, volume = {13}, number = {1}, pages = {21-30}, pmid = {36711159}, issn = {2093-985X}, abstract = {UNLABELLED: Chest X-Ray (CXR) images provide most anatomical details and the abnormalities on a 2D plane. Therefore, a 2D view of the 3D anatomy is sometimes sufficient for the initial diagnosis. However, close to fourteen commonly occurring diseases are sometimes difficult to identify by visually inspecting the images. Therefore, there is a drift toward developing computer-aided assistive systems to help radiologists. This paper proposes a deep learning model for the classification and localization of chest diseases by using image-level annotations. The model consists of a modified Resnet50 backbone for extracting feature corpus from the images, a classifier, and a pixel correlation module (PCM). During PCM training, the network is a weight-shared siamese architecture where the first branch applies the affine transform to the image before feeding to the network, while the second applies the same transform to the network output. The method was evaluated on CXR from the clinical center in the ratio of 70:20 for training and testing. The model was developed and tested using the cloud computing platform Google Colaboratory (NVidia Tesla P100 GPU, 16 GB of RAM). A radiologist subjectively validated the results. Our model trained with the configurations mentioned in this paper outperformed benchmark results.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s13534-022-00249-5.}, } @article {pmid36704354, year = {2022}, author = {Alvarellos, M and Sheppard, HE and Knarston, I and Davison, C and Raine, N and Seeger, T and Prieto Barja, P and Chatzou Dunford, M}, title = {Democratizing clinical-genomic data: How federated platforms can promote benefits sharing in genomics.}, journal = {Frontiers in genetics}, volume = {13}, number = {}, pages = {1045450}, pmid = {36704354}, issn = {1664-8021}, abstract = {Since the first sequencing of the human genome, associated sequencing costs have dramatically lowered, leading to an explosion of genomic data. This valuable data should in theory be of huge benefit to the global community, although unfortunately the benefits of these advances have not been widely distributed. Much of today's clinical-genomic data is siloed and inaccessible in adherence with strict governance and privacy policies, with more than 97% of hospital data going unused, according to one reference. Despite these challenges, there are promising efforts to make clinical-genomic data accessible and useful without compromising security. Specifically, federated data platforms are emerging as key resources to facilitate secure data sharing without having to physically move the data from outside of its organizational or jurisdictional boundaries. In this perspective, we summarize the overarching progress in establishing federated data platforms, and highlight critical considerations on how they should be managed to ensure patient and public trust. These platforms are enabling global collaboration and improving representation of underrepresented groups, since sequencing efforts have not prioritized diverse population representation until recently. Federated data platforms, when combined with advances in no-code technology, can be accessible to the diverse end-users that make up the genomics workforce, and we discuss potential strategies to develop sustainable business models so that the platforms can continue to enable research long term. Although these platforms must be carefully managed to ensure appropriate and ethical use, they are democratizing access and insights to clinical-genomic data that will progress research and enable impactful therapeutic findings.}, } @article {pmid36702751, year = {2023}, author = {Bang, I and Lee, SM and Park, S and Park, JY and Nong, LK and Gao, Y and Palsson, BO and Kim, D}, title = {Deep-learning optimized DEOCSU suite provides an iterable pipeline for accurate ChIP-exo peak calling.}, journal = {Briefings in bioinformatics}, volume = {24}, number = {2}, pages = {}, doi = {10.1093/bib/bbad024}, pmid = {36702751}, issn = {1477-4054}, mesh = {*Chromatin Immunoprecipitation Sequencing ; *Deep Learning ; Chromatin Immunoprecipitation ; DNA-Binding Proteins/metabolism ; Software ; Algorithms ; Binding Sites ; Sequence Analysis, DNA ; }, abstract = {Recognizing binding sites of DNA-binding proteins is a key factor for elucidating transcriptional regulation in organisms. ChIP-exo enables researchers to delineate genome-wide binding landscapes of DNA-binding proteins with near single base-pair resolution. However, the peak calling step hinders ChIP-exo application since the published algorithms tend to generate false-positive and false-negative predictions. Here, we report the development of DEOCSU (DEep-learning Optimized ChIP-exo peak calling SUite), a novel machine learning-based ChIP-exo peak calling suite. DEOCSU entails the deep convolutional neural network model which was trained with curated ChIP-exo peak data to distinguish the visualized data of bona fide peaks from false ones. Performance validation of the trained deep-learning model indicated its high accuracy, high precision and high recall of over 95%. Applying the new suite to both in-house and publicly available ChIP-exo datasets obtained from bacteria, eukaryotes and archaea revealed an accurate prediction of peaks containing canonical motifs, highlighting the versatility and efficiency of DEOCSU. Furthermore, DEOCSU can be executed on a cloud computing platform or the local environment. With visualization software included in the suite, adjustable options such as the threshold of peak probability, and iterable updating of the pre-trained model, DEOCSU can be optimized for users' specific needs.}, } @article {pmid36696392, year = {2023}, author = {Kim, J and Karyadi, DM and Hartley, SW and Zhu, B and Wang, M and Wu, D and Song, L and Armstrong, GT and Bhatia, S and Robison, LL and Yasui, Y and Carter, B and Sampson, JN and Freedman, ND and Goldstein, AM and Mirabello, L and Chanock, SJ and Morton, LM and Savage, SA and Stewart, DR}, title = {Inflated expectations: Rare-variant association analysis using public controls.}, journal = {PloS one}, volume = {18}, number = {1}, pages = {e0280951}, pmid = {36696392}, issn = {1932-6203}, support = {U24 CA055727/CA/NCI NIH HHS/United States ; }, mesh = {*Motivation ; *High-Throughput Nucleotide Sequencing/methods ; Polymorphism, Single Nucleotide ; Software ; }, abstract = {The use of publicly available sequencing datasets as controls (hereafter, "public controls") in studies of rare variant disease associations has great promise but can increase the risk of false-positive discovery. The specific factors that could contribute to inflated distribution of test statistics have not been systematically examined. Here, we leveraged both public controls, gnomAD v2.1 and several datasets sequenced in our laboratory to systematically investigate factors that could contribute to the false-positive discovery, as measured by λΔ95, a measure to quantify the degree of inflation in statistical significance. Analyses of datasets in this investigation found that 1) the significantly inflated distribution of test statistics decreased substantially when the same variant caller and filtering pipelines were employed, 2) differences in library prep kits and sequencers did not affect the false-positive discovery rate and, 3) joint vs. separate variant-calling of cases and controls did not contribute to the inflation of test statistics. Currently available methods do not adequately adjust for the high false-positive discovery. These results, especially if replicated, emphasize the risks of using public controls for rare-variant association tests in which individual-level data and the computational pipeline are not readily accessible, which prevents the use of the same variant-calling and filtering pipelines on both cases and controls. A plausible solution exists with the emergence of cloud-based computing, which can make it possible to bring containerized analytical pipelines to the data (rather than the data to the pipeline) and could avert or minimize these issues. It is suggested that future reports account for this issue and provide this as a limitation in reporting new findings based on studies that cannot practically analyze all data on a single pipeline.}, } @article {pmid36695636, year = {2023}, author = {Wang, J and Zheng, J and Lee, EE and Aguilar, B and Phan, J and Abdilleh, K and Taylor, RC and Longabaugh, W and Johansson, B and Mertens, F and Mitelman, F and Pot, D and LaFramboise, T}, title = {A cloud-based resource for genome coordinate-based exploration and large-scale analysis of chromosome aberrations and gene fusions in cancer.}, journal = {Genes, chromosomes & cancer}, volume = {62}, number = {8}, pages = {441-448}, pmid = {36695636}, issn = {1098-2264}, support = {R21 CA249138/CA/NCI NIH HHS/United States ; /CA/NCI NIH HHS/United States ; /HH/HHS/United States ; HHSN261201400008C/CA/NCI NIH HHS/United States ; R01 CA217992/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; R01LM013067/NH/NIH HHS/United States ; R21CA249138/NH/NIH HHS/United States ; R01 LM013067/LM/NLM NIH HHS/United States ; R01CA217992/NH/NIH HHS/United States ; }, mesh = {Humans ; *Cloud Computing ; Chromosome Aberrations ; Karyotyping ; *Neoplasms/genetics ; Gene Fusion ; }, abstract = {Cytogenetic analysis provides important information on the genetic mechanisms of cancer. The Mitelman Database of Chromosome Aberrations and Gene Fusions in Cancer (Mitelman DB) is the largest catalog of acquired chromosome aberrations, presently comprising >70 000 cases across multiple cancer types. Although this resource has enabled the identification of chromosome abnormalities leading to specific cancers and cancer mechanisms, a large-scale, systematic analysis of these aberrations and their downstream implications has been difficult due to the lack of a standard, automated mapping from aberrations to genomic coordinates. We previously introduced CytoConverter as a tool that automates such conversions. CytoConverter has now been updated with improved interpretation of karyotypes and has been integrated with the Mitelman DB, providing a comprehensive mapping of the 70 000+ cases to genomic coordinates, as well as visualization of the frequencies of chromosomal gains and losses. Importantly, all CytoConverter-generated genomic coordinates are publicly available in Google BigQuery, a cloud-based data warehouse, facilitating data exploration and integration with other datasets hosted by the Institute for Systems Biology Cancer Gateway in the Cloud (ISB-CGC) Resource. We demonstrate the use of BigQuery for integrative analysis of Mitelman DB with other cancer datasets, including a comparison of the frequency of imbalances identified in Mitelman DB cases with those found in The Cancer Genome Atlas (TCGA) copy number datasets. This solution provides opportunities to leverage the power of cloud computing for low-cost, scalable, and integrated analysis of chromosome aberrations and gene fusions in cancer.}, } @article {pmid36694127, year = {2023}, author = {Digby, B and Finn, SP and Ó Broin, P}, title = {nf-core/circrna: a portable workflow for the quantification, miRNA target prediction and differential expression analysis of circular RNAs.}, journal = {BMC bioinformatics}, volume = {24}, number = {1}, pages = {27}, pmid = {36694127}, issn = {1471-2105}, support = {18/CRT/6214/SFI_/Science Foundation Ireland/Ireland ; }, mesh = {*MicroRNAs/genetics/metabolism ; RNA, Circular ; Workflow ; Software ; Sequence Analysis, RNA ; }, abstract = {BACKGROUND: Circular RNAs (circRNAs) are a class of covalenty closed non-coding RNAs that have garnered increased attention from the research community due to their stability, tissue-specific expression and role as transcriptional modulators via sequestration of miRNAs. Currently, multiple quantification tools capable of detecting circRNAs exist, yet none delineate circRNA-miRNA interactions, and only one employs differential expression analysis. Efforts have been made to bridge this gap by way of circRNA workflows, however these workflows are limited by both the types of analyses available and computational skills required to run them.

RESULTS: We present nf-core/circrna, a multi-functional, automated high-throughput pipeline implemented in nextflow that allows users to characterise the role of circRNAs in RNA Sequencing datasets via three analysis modules: (1) circRNA quantification, robust filtering and annotation (2) miRNA target prediction of the mature spliced sequence and (3) differential expression analysis. nf-core/circrna has been developed within the nf-core framework, ensuring robust portability across computing environments via containerisation, parallel deployment on cluster/cloud-based infrastructures, comprehensive documentation and maintenance support.

CONCLUSION: nf-core/circrna reduces the barrier to entry for researchers by providing an easy-to-use, platform-independent and scalable workflow for circRNA analyses. Source code, documentation and installation instructions are freely available at https://nf-co.re/circrna and https://github.com/nf-core/circrna .}, } @article {pmid36691672, year = {2023}, author = {Ørka, HO and Gailis, J and Vege, M and Gobakken, T and Hauglund, K}, title = {Analysis-ready satellite data mosaics from Landsat and Sentinel-2 imagery.}, journal = {MethodsX}, volume = {10}, number = {}, pages = {101995}, pmid = {36691672}, issn = {2215-0161}, abstract = {Today's enormous amounts of freely available high-resolution satellite imagery provide the demand for effective preprocessing methods. One such preprocessing method needed in many applications utilizing optical satellite imagery from the Landsat and Sentinel-2 archives is mosaicking. Merging hundreds of single scenes into a single satellite data mosaic before conducting analysis such as land cover classification, change detection, or modelling is often a prerequisite. Maintaining the original data structure and preserving metadata for further modelling or classification would be advantageous for many applications. Furthermore, in other applications, e.g., connected to land cover classification creating the mosaic for a specific period matching the phenological state of the phenomena in nature would be beneficial. In addition, supporting in-house and computing centers not directly connected to a specific cloud provider could be a requirement for some institutions or companies. In the current work, we present a method called Geomosaic that meets these criteria and produces analysis-ready satellite data mosaics from Landsat and Sentinel-2 imagery.•The method described produces analysis-ready satellite data mosaics.•The satellite data mosaics contain pixel metadata usable for further analysis.•The algorithm is available as an open-source tool coded in Python and can be used on multiple platforms.}, } @article {pmid36691530, year = {2023}, author = {Oñate, W and Sanz, R}, title = {Analysis of architectures implemented for IIoT.}, journal = {Heliyon}, volume = {9}, number = {1}, pages = {e12868}, pmid = {36691530}, issn = {2405-8440}, abstract = {Several technological blocks are being developed to provide solutions to the requirements necessary for the implementation of industrial IoT. However, this is feasible with the resources offered by the Cloud, such as processing, applications and services. Despite this, there are negative aspects such as bandwidth, Internet service variability, latency, lack of filtering of junk data transmitted to the cloud and security. From another perspective, these situations emerge as challenges that are being studied to meet the needs of this new industrial era, which means that the important contribution of academia, companies and consortiums, are achieving a change of course, by taking advantage of the potential of the Cloud but now from the vicinity or perimeter of a production plant. To achieve this task, some pillars of IoT technology are being used as a basis, such as the designs of Fog Computing Platforms (FCP), Edge Computing (EC) and considering the need for cooperation between IT and operation technologies (IT and OT), with which it is intended to accelerate the paradigm shift that this situation has generated. The objective of this study is to show a systematic literature review (SLR) of recent studies on hierarchical and flat peer-to-peer (P2P) architectures implemented for manufacturing IIoT, analyzing those successes and weaknesses derived from them such as latency, security, computing methodologies, virtualization architectures, Fog Computing (FC) in Manufacturing Execution Systems (MES), Quality of Service (QoS) and connectivity, with the aim of motivating possible research points when implementing IIoT with these new technologies.}, } @article {pmid36690091, year = {2023}, author = {Li, Z and Demir, I}, title = {U-net-based semantic classification for flood extent extraction using SAR imagery and GEE platform: A case study for 2019 central US flooding.}, journal = {The Science of the total environment}, volume = {869}, number = {}, pages = {161757}, doi = {10.1016/j.scitotenv.2023.161757}, pmid = {36690091}, issn = {1879-1026}, abstract = {Data-driven models for water body extraction have experienced accelerated growth in recent years, thanks to advances in processing techniques and computational resources, as well as improved data availability. In this study, we modified the standard U-Net, a convolutional neural network (CNN) method, to extract water bodies from scenes captured from Sentinel-1 satellites of selected areas during the 2019 Central US flooding. We compared the results to several benchmark models, including the standard U-Net and ResNet50, an advanced thresholding method, Bmax Otsu, and a recently introduced flood inundation map archive. Then, we looked at how data input types, input resolution, and using pre-trained weights affect the model performance. We adopted a three-category classification frame to test whether and how permanent water and flood pixels behave differently. Most of the data in this study were gathered and pre-processed utilizing the open access Google Earth Engine (GEE) cloud platform. According to the results, the adjusted U-Net outperformed all other benchmark models and datasets. Adding a slope layer enhances model performance with the 30 m input data compared to training the model on only VV and VH bands of SAR images. Adding DEM and Height Above Nearest Drainage (HAND) model data layer improved performance for models trained on 10 m datasets. The results also suggested that CNN-based semantic segmentation may fail to correctly classify pixels around narrow river channels. Furthermore, our findings revealed that it is necessary to differentiate permanent water and flood pixels because they behave differently. Finally, the results indicated that using pre-trained weights from a coarse dataset can significantly minimize initial training loss on finer datasets and speed up convergence.}, } @article {pmid36687286, year = {2023}, author = {Ali, O and AlAhmad, A and Kahtan, H}, title = {A review of advanced technologies available to improve the healthcare performance during COVID-19 pandemic.}, journal = {Procedia computer science}, volume = {217}, number = {}, pages = {205-216}, pmid = {36687286}, issn = {1877-0509}, abstract = {Information technology (IT) has enabled the initiation of an innovative healthcare system. An innovative healthcare system integrates new technologies such as cloud computing, the internet of things, and artificial intelligence (AI), to transform the healthcare to be more efficient, more convenient and more personalized. This review aims to identify the key technologies that will help to support an innovative healthcare system. A case study approach was used in this research analysis to enable a researcher to closely analyze the data in a particular context. It presents a case study of the coronavirus (COVID-19) as a means of exploring the use of advanced technologies in an innovative healthcare system to help address a worldwide health crisis. An innovative healthcare system can help to promote better patient self-management, reduce costs, relieve staff pressures, help with resource and knowledge management, and improve the patient experience. An innovative healthcare system can reduce the expense and time for research, and increase the overall efficacy of the research. Overall, this research identifies how innovative technologies can improve the performance of the healthcare system. Advanced technologies can assist with pandemic control and can help in the recognition of the virus, clinical treatment, medical protection, intelligent diagnosis, and outbreak analysis. The review provides an analysis of the future prospects of an innovative healthcare system.}, } @article {pmid36686545, year = {2023}, author = {Wang, SH and Satapathy, SC and Xie, MX and Zhang, YD}, title = {ELUCNN for explainable COVID-19 diagnosis.}, journal = {Soft computing}, volume = {}, number = {}, pages = {1-17}, pmid = {36686545}, issn = {1432-7643}, abstract = {COVID-19 is a positive-sense single-stranded RNA virus caused by a strain of coronavirus, severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). Several noteworthy variants of SARS-CoV-2 were declared by WHO as Alpha, Beta, Gamma, Delta, and Omicron. Till 13/Dec/2022, it has caused 6.65 million death tolls, and over 649 million confirmed positive cases. Based on the convolutional neural network (CNN), this study first proposes a ten-layer CNN as the backbone model. Then, the exponential linear unit (ELU) is introduced to replace ReLU, and the traditional convolutional block is now transformed into conv-ELU. Finally, an ELU-based CNN (ELUCNN) model is proposed for COVID-19 diagnosis. Besides, the MDA strategy is used to enhance the size of the training set. We develop a mobile app integrating ELUCNN, and this web app is run on a client-server modeled structure. Ten runs of the tenfold cross-validation experiment show our model yields a sensitivity of 94.41 ± 0.98 , a specificity of 94.84 ± 1.21 , an accuracy of 94.62 ± 0.96 , and an F1 score of 94.61 ± 0.95 . The ELUCNN model and mobile app are effective in COVID-19 diagnosis and give better results than 14 state-of-the-art COVID-19 diagnosis models concerning accuracy.}, } @article {pmid36685273, year = {2023}, author = {Yilmaz, OS and Acar, U and Sanli, FB and Gulgen, F and Ates, AM}, title = {Mapping burn severity and monitoring CO content in Türkiye's 2021 Wildfires, using Sentinel-2 and Sentinel-5P satellite data on the GEE platform.}, journal = {Earth science informatics}, volume = {16}, number = {1}, pages = {221-240}, pmid = {36685273}, issn = {1865-0473}, abstract = {This study investigated forest fires in the Mediterranean of Türkiye between July 28, 2021, and August 11, 2021. Burn severity maps were produced with the difference normalised burned ratio index (dNBR) and difference normalised difference vegetation index (dNDVI) using Sentinel-2 images on the Google Earth Engine (GEE) cloud platform. The burned areas were estimated based on the determined burning severity degrees. Vegetation density losses in burned areas were analysed using the normalised difference vegetation index (NDVI) time series. At the same time, the post-fire Carbon Monoxide (CO) column number densities were determined using the Sentinel-5P satellite data. According to the burn severity maps obtained with dNBR, the sum of high and moderate severity areas constitutes 34.64%, 20.57%, 46.43%, 51.50% and 18.88% of the entire area in Manavgat, Gündoğmuş, Marmaris, Bodrum and Köyceğiz districts, respectively. Likewise, according to the burn severity maps obtained with dNDVI, the sum of the areas of very high severity and high severity constitutes 41.17%, 30.16%, 30.50%, 42.35%, and 10.40% of the entire region, respectively. In post-fire NDVI time series analyses, sharp decreases were observed in NDVI values from 0.8 to 0.1 in all burned areas. While the Tropospheric CO column number density was 0.03 mol/m[2] in all regions burned before the fire, it was observed that this value increased to 0.14 mol/m[2] after the fire. Moreover, when the area was examined more broadly with Sentinel 5P data, it was observed that the amount of CO increased up to a maximum value of 0.333 mol/m[2]. The results of this study present significant information in terms of determining the severity of forest fires in the Mediterranean region in 2021 and the determination of the CO column number density after the fire. In addition, monitoring polluting gases with RS techniques after forest fires is essential in understanding the extent of the damage they can cause to the environment.}, } @article {pmid36679810, year = {2023}, author = {Yang, H and Zhou, H and Liu, Z and Deng, X}, title = {Energy Optimization of Wireless Sensor Embedded Cloud Computing Data Monitoring System in 6G Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679810}, issn = {1424-8220}, support = {110822150//Southwest Forestry University/ ; 194309//Southwest Forestry University/ ; 111022001//Southwest Forestry University/ ; }, mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; *Wireless Technology ; Algorithms ; Physical Phenomena ; }, abstract = {With the construction and development of modern and smart cities, people's lives are becoming more intelligent and diversified. Surveillance systems increasingly play an active role in target tracking, vehicle identification, traffic management, etc. In the 6G network environment, facing the massive and large-scale data information in the monitoring system, it is difficult for the ordinary processing platform to meet this computing demand. This paper provides a data governance solution based on a 6G environment. The shortcomings of critical technologies in wireless sensor networks are addressed through ZigBee energy optimization to address the shortage of energy supply and high energy consumption in the practical application of wireless sensor networks. At the same time, this improved routing algorithm is combined with embedded cloud computing to optimize the monitoring system and achieve efficient data processing. The ZigBee-optimized wireless sensor network consumes less energy in practice and also increases the service life of the network, as proven by research and experiments. This optimized data monitoring system ensures data security and reliability.}, } @article {pmid36679800, year = {2023}, author = {Oztoprak, K and Tuncel, YK and Butun, I}, title = {Technological Transformation of Telco Operators towards Seamless IoT Edge-Cloud Continuum.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679800}, issn = {1424-8220}, mesh = {Humans ; *Technology ; Automation ; *Cloud Computing ; Industry ; Information Technology ; }, abstract = {This article investigates and discusses challenges in the telecommunication field from multiple perspectives, both academic and industry sides are catered for, surveying the main points of technological transformation toward edge-cloud continuum from the view of a telco operator to show the complete picture, including the evolution of cloud-native computing, Software-Defined Networking (SDN), and network automation platforms. The cultural shift in software development and management with DevOps enabled the development of significant technologies in the telecommunication world, including network equipment, application development, and system orchestration. The effect of the aforementioned cultural shift to the application area, especially from the IoT point of view, is investigated. The enormous change in service diversity and delivery capabilities to mass devices are also discussed. During the last two decades, desktop and server virtualization has played an active role in the Information Technology (IT) world. With the use of OpenFlow, SDN, and Network Functions Virtualization (NFV), the network revolution has got underway. The shift from monolithic application development and deployment to micro-services changed the whole picture. On the other hand, the data centers evolved in several generations where the control plane cannot cope with all the networks without an intelligent decision-making process, benefiting from the AI/ML techniques. AI also enables operators to forecast demand more accurately, anticipate network load, and adjust capacity and throughput automatically. Going one step further, zero-touch networking and service management (ZSM) is proposed to get high-level human intents to generate a low-level configuration for network elements with validated results, minimizing the ratio of faults caused by human intervention. Harmonizing all signs of progress in different communication technologies enabled the use of edge computing successfully. Low-powered (from both energy and processing perspectives) IoT networks have disrupted the customer and end-point demands within the sector, as such paved the path towards devising the edge computing concept, which finalized the whole picture of the edge-cloud continuum.}, } @article {pmid36679795, year = {2023}, author = {Yin, HC and Lien, JJ}, title = {Cascaded Segmentation U-Net for Quality Evaluation of Scraping Workpiece.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679795}, issn = {1424-8220}, mesh = {*Algorithms ; Cloud Computing ; *Data Compression ; Industry ; Judgment ; Image Processing, Computer-Assisted ; }, abstract = {In the terms of industry, the hand-scraping method is a key technology for achieving high precision in machine tools, and the quality of scraping workpieces directly affects the accuracy and service life of the machine tool. However, most of the quality evaluation of the scraping workpieces is carried out by the scraping worker's subjective judgment, which results in differences in the quality of the scraping workpieces and is time-consuming. Hence, in this research, an edge-cloud computing system was developed to obtain the relevant parameters, which are the percentage of point (POP) and the peak point per square inch (PPI), for evaluating the quality of scraping workpieces. On the cloud computing server-side, a novel network called cascaded segmentation U-Net is proposed to high-quality segment the height of points (HOP) (around 40 μm height) in favor of small datasets training and then carries out a post-processing algorithm that automatically calculates POP and PPI. This research emphasizes the architecture of the network itself instead. The design of the components of our network is based on the basic idea of identity function, which not only solves the problem of the misjudgment of the oil ditch and the residual pigment but also allows the network to be end-to-end trained effectively. At the head of the network, a cascaded multi-stage pixel-wise classification is designed for obtaining more accurate HOP borders. Furthermore, the "Cross-dimension Compression" stage is used to fuse high-dimensional semantic feature maps across the depth of the feature maps into low-dimensional feature maps, producing decipherable content for final pixel-wise classification. Our system can achieve an error rate of 3.7% and 0.9 points for POP and PPI. The novel network achieves an Intersection over Union (IoU) of 90.2%.}, } @article {pmid36679792, year = {2023}, author = {Kopras, B and Idzikowski, F and Bossy, B and Kryszkiewicz, P and Bogucka, H}, title = {Communication and Computing Task Allocation for Energy-Efficient Fog Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679792}, issn = {1424-8220}, support = {Bailout supporting development of young scientists in 2021/22 within task "Optimization 402 of the operation of wireless networks and compression of test data"//Polish Ministry of Education and Science/ ; 2021/41/N/ST7/03941 on "Fresh and Green Cellular IoT Edge Computing Networks - FitNets"//National Science Centre, Poland/ ; }, mesh = {*Communication ; *Algorithms ; Cloud Computing ; Heuristics ; }, abstract = {The well known cloud computing is being extended by the idea of fog with the computing nodes placed closer to end users to allow for task processing with tighter latency requirements. However, offloading of tasks (from end devices to either the cloud or to the fog nodes) should be designed taking energy consumption for both transmission and computation into account. The task allocation procedure can be challenging considering the high number of arriving tasks with various computational, communication and delay requirements, and the high number of computing nodes with various communication and computing capabilities. In this paper, we propose an optimal task allocation procedure, minimizing consumed energy for a set of users connected wirelessly to a network composed of FN located at AP and CN. We optimize the assignment of AP and computing nodes to offloaded tasks as well as the operating frequencies of FN. The considered problem is formulated as a Mixed-Integer Nonlinear Programming problem. The utilized energy consumption and delay models as well as their parameters, related to both the computation and communication costs, reflect the characteristics of real devices. The obtained results show that it is profitable to split the processing of tasks between multiple FNs and the cloud, often choosing different nodes for transmission and computation. The proposed algorithm manages to find the optimal allocations and outperforms all the considered alternative allocation strategies resulting in the lowest energy consumption and task rejection rate. Moreover, a heuristic algorithm that decouples the optimization of wireless transmission from implemented computations and wired transmission is proposed. It finds the optimal or close-to-optimal solutions for all of the studied scenarios.}, } @article {pmid36679619, year = {2023}, author = {Mirza, IB and Georgakopoulos, D and Yavari, A}, title = {Cyber-Physical-Social Awareness Platform for Comprehensive Situation Awareness.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679619}, issn = {1424-8220}, mesh = {Humans ; *Awareness ; Cities ; *Disasters ; Information Sources ; Intelligence ; }, abstract = {Cyber-physical-social computing system integrates the interactions between cyber, physical, and social spaces by fusing information from these spaces. The result of this fusion can be used to drive many applications in areas such as intelligent transportation, smart cities, and healthcare. Situation Awareness was initially used in military services to provide knowledge of what is happening in a combat zone but has been used in many other areas such as disaster mitigation. Various applications have been developed to provide situation awareness using either IoT sensors or social media information spaces and, more recently, using both IoT sensors and social media information spaces. The information from these spaces is heterogeneous and, at their intersection, is sparse. In this paper, we propose a highly scalable, novel Cyber-physical-social Awareness (CPSA) platform that provides situation awareness by using and intersecting information from both IoT sensors and social media. By combining and fusing information from both social media and IoT sensors, the CPSA platform provides more comprehensive and accurate situation awareness than any other existing solutions that rely only on data from social media and IoT sensors. The CPSA platform achieves that by semantically describing and integrating the information extracted from sensors and social media spaces and intersects this information for enriching situation awareness. The CPSA platform uses user-provided situation models to refine and intersect cyber, physical, and social information. The CPSA platform analyses social media and IoT data using pretrained machine learning models deployed in the cloud, and provides coordination between information sources and fault tolerance. The paper describes the implementation and evaluation of the CPSA platform. The evaluation of the CPSA platform is measured in terms of capabilities such as the ability to semantically describe and integrate heterogenous information, fault tolerance, and time constraints such as processing time and throughput when performing real-world experiments. The evaluation shows that the CPSA platform can reliably process and intersect with large volumes of IoT sensor and social media data to provide enhanced situation awareness.}, } @article {pmid36679524, year = {2023}, author = {Chen, J and Zhou, J and Liu, L and Shu, C and Shen, M and Yao, W}, title = {Sow Farrowing Early Warning and Supervision for Embedded Board Implementations.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679524}, issn = {1424-8220}, support = {32272929//National Natural Science Foundation of China/ ; KYCYXT2022019//Fundamental Research Funds for the Central Universities/ ; }, mesh = {Animals ; Swine ; Humans ; Animals, Newborn ; *Breeding ; }, abstract = {Sow farrowing is an important part of pig breeding. The accurate and effective early warning of sow behaviors in farrowing helps breeders determine whether it is necessary to intervene with the farrowing process in a timely manner and is thus essential for increasing the survival rate of piglets and the profits of pig farms. For large pig farms, human resources and costs are important considerations in farrowing supervision. The existing method, which uses cloud computing-based deep learning to supervise sow farrowing, has a high equipment cost and requires uploading all data to a cloud data center, requiring a large network bandwidth. Thus, this paper proposes an approach for the early warning and supervision of farrowing behaviors based on the embedded artificial-intelligence computing platform (NVIDIA Jetson Nano). This lightweight deep learning method allows the rapid processing of sow farrowing video data at edge nodes, reducing the bandwidth requirement and ensuring data security in the network transmission. Experiments indicated that after the model was migrated to the Jetson Nano, its precision of sow postures and newborn piglets detection was 93.5%, with a recall rate of 92.2%, and the detection speed was increased by a factor larger than 8. The early warning of 18 approaching farrowing (5 h) sows were tested. The mean error of warning was 1.02 h.}, } @article {pmid36679463, year = {2023}, author = {Hussain, MM and Azar, AT and Ahmed, R and Umar Amin, S and Qureshi, B and Dinesh Reddy, V and Alam, I and Khan, ZI}, title = {SONG: A Multi-Objective Evolutionary Algorithm for Delay and Energy Aware Facility Location in Vehicular Fog Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679463}, issn = {1424-8220}, support = {TBA//Prince Sultan University/ ; }, mesh = {*Algorithms ; *Transportation ; Physical Phenomena ; Biological Evolution ; }, abstract = {With the emergence of delay- and energy-critical vehicular applications, forwarding sense-actuate data from vehicles to the cloud became practically infeasible. Therefore, a new computational model called Vehicular Fog Computing (VFC) was proposed. It offloads the computation workload from passenger devices (PDs) to transportation infrastructures such as roadside units (RSUs) and base stations (BSs), called static fog nodes. It can also exploit the underutilized computation resources of nearby vehicles that can act as vehicular fog nodes (VFNs) and provide delay- and energy-aware computing services. However, the capacity planning and dimensioning of VFC, which come under a class of facility location problems (FLPs), is a challenging issue. The complexity arises from the spatio-temporal dynamics of vehicular traffic, varying resource demand from PD applications, and the mobility of VFNs. This paper proposes a multi-objective optimization model to investigate the facility location in VFC networks. The solutions to this model generate optimal VFC topologies pertaining to an optimized trade-off (Pareto front) between the service delay and energy consumption. Thus, to solve this model, we propose a hybrid Evolutionary Multi-Objective (EMO) algorithm called Swarm Optimized Non-dominated sorting Genetic algorithm (SONG). It combines the convergence and search efficiency of two popular EMO algorithms: the Non-dominated Sorting Genetic Algorithm (NSGA-II) and Speed-constrained Particle Swarm Optimization (SMPSO). First, we solve an example problem using the SONG algorithm to illustrate the delay-energy solution frontiers and plotted the corresponding layout topology. Subsequently, we evaluate the evolutionary performance of the SONG algorithm on real-world vehicular traces against three quality indicators: Hyper-Volume (HV), Inverted Generational Distance (IGD) and CPU delay gap. The empirical results show that SONG exhibits improved solution quality over the NSGA-II and SMPSO algorithms and hence can be utilized as a potential tool by the service providers for the planning and design of VFC networks.}, } @article {pmid36679436, year = {2023}, author = {Gec, S and Stankovski, V and Lavbič, D and Kochovski, P}, title = {A Recommender System for Robust Smart Contract Template Classification.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679436}, issn = {1424-8220}, support = {957338//European Commission/ ; P2-0426//Research Agency of the Republic of Slovenia/ ; }, mesh = {*Ecosystem ; *Algorithms ; Cloud Computing ; Documentation ; Trust ; }, abstract = {IoT environments are becoming increasingly heterogeneous in terms of their distributions and included entities by collaboratively involving not only data centers known from Cloud computing but also the different types of third-party entities that can provide computing resources. To transparently provide such resources and facilitate trust between the involved entities, it is necessary to develop and implement smart contracts. However, when developing smart contracts, developers face many challenges and concerns, such as security, contracts' correctness, a lack of documentation and/or design patterns, and others. To address this problem, we propose a new recommender system to facilitate the development and implementation of low-cost EVM-enabled smart contracts. The recommender system's algorithm provides the smart contract developer with smart contract templates that match their requirements and that are relevant to the typology of the fog architecture. It mainly relies on OpenZeppelin, a modular, reusable, and secure smart contract library that we use when classifying the smart contracts. The evaluation results indicate that by using our solution, the smart contracts' development times are overall reduced. Moreover, such smart contracts are sustainable for fog-computing IoT environments and applications in low-cost EVM-based ledgers. The recommender system has been successfully implemented in the ONTOCHAIN ecosystem, thus presenting its applicability.}, } @article {pmid36679409, year = {2023}, author = {Sakaguchi, Y and Bakibillah, ASM and Kamal, MAS and Yamada, K}, title = {A Cyber-Physical Framework for Optimal Coordination of Connected and Automated Vehicles on Multi-Lane Freeways.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679409}, issn = {1424-8220}, support = {Grant-in-Aids for Scientific Research (C) 20K04531//Japan Society for the Promotion of Science/ ; }, mesh = {Humans ; *Accidents, Traffic ; Autonomous Vehicles ; *Automobile Driving ; Acceleration ; Travel ; }, abstract = {Uncoordinated driving behavior is one of the main reasons for bottlenecks on freeways. This paper presents a novel cyber-physical framework for optimal coordination of connected and automated vehicles (CAVs) on multi-lane freeways. We consider that all vehicles are connected to a cloud-based computing framework, where a traffic coordination system optimizes the target trajectories of individual vehicles for smooth and safe lane changing or merging. In the proposed framework, the vehicles are coordinated into groups or platoons, and their trajectories are successively optimized in a receding horizon control (RHC) approach. Optimization of the traffic coordination system aims to provide sufficient gaps when a lane change is necessary while minimizing the speed deviation and acceleration of all vehicles. The coordination information is then provided to individual vehicles equipped with local controllers, and each vehicle decides its control acceleration to follow the target trajectories while ensuring a safe distance. Our proposed method guarantees fast optimization and can be used in real-time. The proposed coordination system was evaluated using microscopic traffic simulations and benchmarked with the traditional driving (human-based) system. The results show significant improvement in fuel economy, average velocity, and travel time for various traffic volumes.}, } @article {pmid36679360, year = {2023}, author = {Khan, AQ and Nikolov, N and Matskin, M and Prodan, R and Roman, D and Sahin, B and Bussler, C and Soylu, A}, title = {Smart Data Placement Using Storage-as-a-Service Model for Big Data Pipelines.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {2}, pages = {}, pmid = {36679360}, issn = {1424-8220}, mesh = {*Algorithms ; *Big Data ; Software ; Computers ; Computer Security ; }, abstract = {Big data pipelines are developed to process data characterized by one or more of the three big data features, commonly known as the three Vs (volume, velocity, and variety), through a series of steps (e.g., extract, transform, and move), making the ground work for the use of advanced analytics and ML/AI techniques. Computing continuum (i.e., cloud/fog/edge) allows access to virtually infinite amount of resources, where data pipelines could be executed at scale; however, the implementation of data pipelines on the continuum is a complex task that needs to take computing resources, data transmission channels, triggers, data transfer methods, integration of message queues, etc., into account. The task becomes even more challenging when data storage is considered as part of the data pipelines. Local storage is expensive, hard to maintain, and comes with several challenges (e.g., data availability, data security, and backup). The use of cloud storage, i.e., storage-as-a-service (StaaS), instead of local storage has the potential of providing more flexibility in terms of scalability, fault tolerance, and availability. In this article, we propose a generic approach to integrate StaaS with data pipelines, i.e., computation on an on-premise server or on a specific cloud, but integration with StaaS, and develop a ranking method for available storage options based on five key parameters: cost, proximity, network performance, server-side encryption, and user weights/preferences. The evaluation carried out demonstrates the effectiveness of the proposed approach in terms of data transfer performance, utility of the individual parameters, and feasibility of dynamic selection of a storage option based on four primary user scenarios.}, } @article {pmid36673212, year = {2022}, author = {Gavreev, MA and Kiktenko, EO and Mastiukova, AS and Fedorov, AK}, title = {Suppressing Decoherence in Quantum State Transfer with Unitary Operations.}, journal = {Entropy (Basel, Switzerland)}, volume = {25}, number = {1}, pages = {}, pmid = {36673212}, issn = {1099-4300}, abstract = {Decoherence is the fundamental obstacle limiting the performance of quantum information processing devices. The problem of transmitting a quantum state (known or unknown) from one place to another is of great interest in this context. In this work, by following the recent theoretical proposal, we study an application of quantum state-dependent pre- and post-processing unitary operations for protecting the given (multi-qubit) quantum state against the effect of decoherence acting on all qubits. We observe the increase in the fidelity of the output quantum state both in a quantum emulation experiment, where all protecting unitaries are perfect, and in a real experiment with a cloud-accessible quantum processor, where protecting unitaries themselves are affected by the noise. We expect the considered approach to be useful for analyzing capabilities of quantum information processing devices in transmitting known quantum states. We also demonstrate the applicability of the developed approach for suppressing decoherence in the process of distributing a two-qubit state over remote physical qubits of a quantum processor.}, } @article {pmid36670240, year = {2023}, author = {Yıldırım, E and Cicioğlu, M and Çalhan, A}, title = {Fog-cloud architecture-driven Internet of Medical Things framework for healthcare monitoring.}, journal = {Medical & biological engineering & computing}, volume = {61}, number = {5}, pages = {1133-1147}, pmid = {36670240}, issn = {1741-0444}, mesh = {Humans ; *COVID-19 ; Internet ; Algorithms ; Cloud Computing ; Communication ; }, abstract = {The new coronavirus disease (COVID-19) has increased the need for new technologies such as the Internet of Medical Things (IoMT), Wireless Body Area Networks (WBANs), and cloud computing in the health sector as well as in many areas. These technologies have also made it possible for billions of devices to connect to the internet and communicate with each other. In this study, an Internet of Medical Things (IoMT) framework consisting of Wireless Body Area Networks (WBANs) has been designed and the health big data from WBANs have been analyzed using fog and cloud computing technologies. Fog computing is used for fast and easy analysis, and cloud computing is used for time-consuming and complex analysis. The proposed IoMT framework is presented with a diabetes prediction scenario. The diabetes prediction process is carried out on fog with fuzzy logic decision-making and is achieved on cloud with support vector machine (SVM), random forest (RF), and artificial neural network (ANN) as machine learning algorithms. The dataset produced in WBANs is used for big data analysis in the scenario for both fuzzy logic and machine learning algorithm. The fuzzy logic gives 64% accuracy performance in fog and SVM, RF, and ANN have 89.5%, 88.4%, and 87.2% accuracy performance respectively in the cloud for diabetes prediction. In addition, the throughput and delay results of heterogeneous nodes with different priorities in the WBAN scenario created using the IEEE 802.15.6 standard and AODV routing protocol have been also analyzed. Fog-Cloud architecture-driven for IoMT networks • An IoMT framework is designed with important components and functions such as fog and cloud node capabilities. •Real-time data has been obtained from WBANs in Riverbed Modeler for a more realistic performance analysis of IoMT. •Fuzzy logic and machine learning algorithms (RF, SVM, and ANN) are used for diabetes predictions. •Intra and Inter-WBAN communications (IEEE 802.15.6 standard) are modeled as essential components of the IoMT framework with all functions.}, } @article {pmid36658205, year = {2023}, author = {Kazemi Garajeh, M and Salmani, B and Zare Naghadehi, S and Valipoori Goodarzi, H and Khasraei, A}, title = {An integrated approach of remote sensing and geospatial analysis for modeling and predicting the impacts of climate change on food security.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {1057}, pmid = {36658205}, issn = {2045-2322}, mesh = {Humans ; *Climate Change ; *Remote Sensing Technology ; Soil ; Agriculture/methods ; Food Security ; }, abstract = {The agriculture sector provides the majority of food supplies, ensures food security, and promotes sustainable development. Due to recent climate changes as well as trends in human population growth and environmental degradation, the need for timely agricultural information continues to rise. This study analyzes and predicts the impacts of climate change on food security (FS). For 2002-2021, Landsat, MODIS satellite images and predisposing variables (land surface temperature (LST), evapotranspiration, precipitation, sunny days, cloud ratio, soil salinity, soil moisture, groundwater quality, soil types, digital elevation model, slope, and aspect) were used. First, we used a deep learning convolutional neural network (DL-CNN) based on the Google Earth Engine (GEE) to detect agricultural land (AL). A remote sensing-based approach combined with the analytical network process (ANP) model was used to identify frost-affected areas. We then analyzed the relationship between climatic, geospatial, and topographical variables and AL and frost-affected areas. We found negative correlations of - 0.80, - 0.58, - 0.43, and - 0.45 between AL and LST, evapotranspiration, cloud ratio, and soil salinity, respectively. There is a positive correlation between AL and precipitation, sunny days, soil moisture, and groundwater quality of 0.39, 0.25, 0.21, and 0.77, respectively. The correlation between frost-affected areas and LST, evapotranspiration, cloud ratio, elevation, slope, and aspect are 0.55, 0.40, 0.52, 0.35, 0.45, and 0.39. Frost-affected areas have negative correlations with precipitation, sunny day, and soil moisture of - 0.68, - 0.23, and - 0.38, respectively. Our findings show that the increase in LST, evapotranspiration, cloud ratio, and soil salinity is associated with the decrease in AL. Additionally, AL decreases with a decreasing in precipitation, sunny days, soil moisture, and groundwater quality. It was also found that as LST, evapotranspiration, cloud ratio, elevation, slope, and aspect increase, frost-affected areas increase as well. Furthermore, frost-affected areas increase when precipitation, sunny days, and soil moisture decrease. Finally, we predicted the FS threat for 2030, 2040, 2050, and 2060 using the CA-Markov method. According to the results, the AL will decrease by 0.36% from 2030 to 2060. Between 2030 and 2060, however, the area with very high frost-affected will increase by about 10.64%. In sum, this study accentuates the critical impacts of climate change on the FS in the region. Our findings and proposed methods could be helpful for researchers to model and quantify the climate change impacts on the FS in different regions and periods.}, } @article {pmid36658166, year = {2023}, author = {Tsakanikas, V and Dagiuklas, T and Iqbal, M and Wang, X and Mumtaz, S}, title = {An intelligent model for supporting edge migration for virtual function chains in next generation internet of things.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {1063}, pmid = {36658166}, issn = {2045-2322}, abstract = {The developments on next generation IoT sensing devices, with the advances on their low power computational capabilities and high speed networking has led to the introduction of the edge computing paradigm. Within an edge cloud environment, services may generate and consume data locally, without involving cloud computing infrastructures. Aiming to tackle the low computational resources of the IoT nodes, Virtual-Function-Chain has been proposed as an intelligent distribution model for exploiting the maximum of the computational power at the edge, thus enabling the support of demanding services. An intelligent migration model with the capacity to support Virtual-Function-Chains is introduced in this work. According to this model, migration at the edge can support individual features of a Virtual-Function-Chain. First, auto-healing can be implemented with cold migrations, if a Virtual Function fails unexpectedly. Second, a Quality of Service monitoring model can trigger live migrations, aiming to avoid edge devices overload. The evaluation studies of the proposed model revealed that it has the capacity to increase the robustness of an edge-based service on low-powered IoT devices. Finally, comparison with similar frameworks, like Kubernetes, showed that the migration model can effectively react on edge network fluctuations.}, } @article {pmid36654019, year = {2022}, author = {Yin, Y and Wang, Z and Zhou, W and Gan, Y and Zhang, Y}, title = {Group key agreement protocol for edge computing in industrial internet.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {12}, pages = {12730-12743}, doi = {10.3934/mbe.2022594}, pmid = {36654019}, issn = {1551-0018}, mesh = {*Computer Security ; Cloud Computing ; Internet ; *Outsourced Services ; Communication ; }, abstract = {Industrial internet security is a critical component of cyberspace safety. Furthermore, the encryption protocol is a critical component of cyberspace security. Due to the rapid development of industrial internet and edge computing, increasingly more devices are outsourcing their data to cloud servers to save costs. Edge devices should have a secure session key to reduce communication costs and share information. However, most key generation and storage are completed by a centralized third-party organization, which carries some security risks. In this context, this paper will propose a lightweight multi-dimensional virtual iteration of the group key agreement protocol. Group key agreement protocol allows for one-at-a-time encryption and timely key updates without the involvement of a trusted third party, and each device in the network can agreement a large number of keys. According to the analysis of this protocol, it has high security, rapid computation speed, and little storage space.}, } @article {pmid36648445, year = {2023}, author = {Deutsch, EW and Mendoza, L and Shteynberg, DD and Hoopmann, MR and Sun, Z and Eng, JK and Moritz, RL}, title = {Trans-Proteomic Pipeline: Robust Mass Spectrometry-Based Proteomics Data Analysis Suite.}, journal = {Journal of proteome research}, volume = {22}, number = {2}, pages = {615-624}, pmid = {36648445}, issn = {1535-3907}, support = {R24 GM127667/GM/NIGMS NIH HHS/United States ; U19 AG023122/AG/NIA NIH HHS/United States ; R01 HL133135/HL/NHLBI NIH HHS/United States ; R01 GM087221/GM/NIGMS NIH HHS/United States ; S10 OD026936/OD/NIH HHS/United States ; }, mesh = {*Proteomics/methods ; *Software ; Mass Spectrometry ; Probability ; Data Analysis ; }, abstract = {The Trans-Proteomic Pipeline (TPP) mass spectrometry data analysis suite has been in continual development and refinement since its first tools, PeptideProphet and ProteinProphet, were published 20 years ago. The current release provides a large complement of tools for spectrum processing, spectrum searching, search validation, abundance computation, protein inference, and more. Many of the tools include machine-learning modeling to extract the most information from data sets and build robust statistical models to compute the probabilities that derived information is correct. Here we present the latest information on the many TPP tools, and how TPP can be deployed on various platforms from personal Windows laptops to Linux clusters and expansive cloud computing environments. We describe tutorials on how to use TPP in a variety of ways and describe synergistic projects that leverage TPP. We conclude with plans for continued development of TPP.}, } @article {pmid36645733, year = {2023}, author = {Yazdani, A and Dashti, SF and Safdari, Y}, title = {A fog-assisted information model based on priority queue and clinical decision support systems.}, journal = {Health informatics journal}, volume = {29}, number = {1}, pages = {14604582231152792}, doi = {10.1177/14604582231152792}, pmid = {36645733}, issn = {1741-2811}, mesh = {Humans ; Cloud Computing ; *Decision Support Systems, Clinical ; *Telemedicine ; }, abstract = {OBJECTIVES: Telehealth monitoring applications are latency-sensitive. The current fog-based telehealth monitoring models are mainly focused on the role of the fog computing in improving response time and latency. In this paper, we have introduced a new service called "priority queue" in fog layer, which is programmed to prioritize the events sent by different sources in different environments to assist the cloud layer with reducing response time and latency.

MATERIAL AND METHODS: We analyzed the performance of the proposed model in a fog-enabled cloud environment with the IFogSim toolkit. To provide a comparison of cloud and fog computing environments, three parameters namely response time, latency, and network usage were used. We used the Pima Indian diabetes dataset to evaluate the model.

RESULT: The fog layer proved to be very effective in improving the response time while handling emergencies using priority queues. The proposed model reduces response time by 25.8%, latency by 36.18%, bandwidth by 28.17%, and network usage time by 41.4% as compared to the cloud.

CONCLUSION: By combining priority queues, and fog computing in this study, the network usage, latency time, bandwidth, and response time were significantly reduced as compared to cloud computing.}, } @article {pmid36642685, year = {2023}, author = {Akgün, FA and Fındık, Y and Solak, S and Uçar, MHB and Büyükçavuş, MH and Baykul, T}, title = {Face comparison analysis of patients with orthognathic surgery treatment using cloud computing-based face recognition application programming interfaces.}, journal = {American journal of orthodontics and dentofacial orthopedics : official publication of the American Association of Orthodontists, its constituent societies, and the American Board of Orthodontics}, volume = {163}, number = {5}, pages = {710-719}, doi = {10.1016/j.ajodo.2022.05.023}, pmid = {36642685}, issn = {1097-6752}, mesh = {Humans ; *Orthognathic Surgery ; Face ; *Facial Recognition ; Cloud Computing ; *Orthognathic Surgical Procedures ; Software ; }, abstract = {INTRODUCTION: This study aimed to investigate whether the postoperative change in patients after orthognathic surgery, whose facial aesthetics was affected, led to detectable differences using Microsoft Azure, Amazon Web Services Rekognition, and Face[++], which were commercially available face recognition systems.

METHODS: Photographs of 35 patients after orthognathic surgery were analyzed using 3 well-known cloud computing-based facial recognition application programming interfaces to compute similarity scores between preoperative and postoperative photographs. The preoperative, relaxed, smiling, profile, and semiprofile photographs of the patients were compared separately to validate the relevant application programming interfaces. Patient characteristics and type of surgery were recorded for statistical analysis. Kruskal-Wallis rank sum tests were performed to analyze the relationship between patient characteristics and similarity scores. Multiple-comparison Wilcoxon rank sum tests were performed on the statistically significant characteristics.

RESULTS: The similarity scores in the Face[++] program were lower than those in the Microsoft Azure and Amazon Web Services Rekognition. In addition, the similarity scores were higher in smiling photographs. A statistically significant difference was found in similarity scores between relaxed and smiling photographs according to different programs (P <0.05). For all 3 facial recognition programs, comparable similarity scores were found in all photographs taken before and after surgery across sex, type of surgery, and type of surgical approach. The type of surgery and surgical approach, sex, and amount of surgical movement did not significantly affect similarity scores in any facial recognition programs (P >0.05).

CONCLUSIONS: The similarity scores between the photographs before and after orthognathic surgery were high, suggesting that the software algorithms might value measurements on the basis of upper-face landmarks more than lower-face measurements.}, } @article {pmid36641699, year = {2023}, author = {Koch, M and Arlandini, C and Antonopoulos, G and Baretta, A and Beaujean, P and Bex, GJ and Biancolini, ME and Celi, S and Costa, E and Drescher, L and Eleftheriadis, V and Fadel, NA and Fink, A and Galbiati, F and Hatzakis, I and Hompis, G and Lewandowski, N and Memmolo, A and Mensch, C and Obrist, D and Paneta, V and Papadimitroulas, P and Petropoulos, K and Porziani, S and Savvidis, G and Sethia, K and Strakos, P and Svobodova, P and Vignali, E}, title = {HPC+ in the medical field: Overview and current examples.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {31}, number = {4}, pages = {1509-1523}, pmid = {36641699}, issn = {1878-7401}, mesh = {Child ; Humans ; *Computing Methodologies ; Image Processing, Computer-Assisted ; *Software ; }, abstract = {BACKGROUND: To say data is revolutionising the medical sector would be a vast understatement. The amount of medical data available today is unprecedented and has the potential to enable to date unseen forms of healthcare. To process this huge amount of data, an equally huge amount of computing power is required, which cannot be provided by regular desktop computers. These areas can be (and already are) supported by High-Performance-Computing (HPC), High-Performance Data Analytics (HPDA), and AI (together "HPC+").

OBJECTIVE: This overview article aims to show state-of-the-art examples of studies supported by the National Competence Centres (NCCs) in HPC+ within the EuroCC project, employing HPC, HPDA and AI for medical applications.

METHOD: The included studies on different applications of HPC in the medical sector were sourced from the National Competence Centres in HPC and compiled into an overview article. Methods include the application of HPC+ for medical image processing, high-performance medical and pharmaceutical data analytics, an application for pediatric dosimetry, and a cloud-based HPC platform to support systemic pulmonary shunting procedures.

RESULTS: This article showcases state-of-the-art applications and large-scale data analytics in the medical sector employing HPC+ within surgery, medical image processing in diagnostics, nutritional support of patients in hospitals, treating congenital heart diseases in children, and within basic research.

CONCLUSION: HPC+ support scientific fields from research to industrial applications in the medical area, enabling researchers to run faster and more complex calculations, simulations and data analyses for the direct benefit of patients, doctors, clinicians and as an accelerator for medical research.}, } @article {pmid36637558, year = {2022}, author = {Ye, W and Wang, J and Tian, H and Quan, H}, title = {Public auditing for real-time medical sensor data in cloud-assisted HealthIIoT system.}, journal = {Frontiers of optoelectronics}, volume = {15}, number = {1}, pages = {29}, pmid = {36637558}, issn = {2095-2767}, abstract = {With the advancement of industrial internet of things (IIoT), wireless medical sensor networks (WMSNs) have been widely introduced in modern healthcare systems to collect real-time medical data from patients, which is known as HealthIIoT. Considering the limited computing and storage capabilities of lightweight HealthIIoT devices, it is necessary to upload these data to remote cloud servers for storage and maintenance. However, there are still some serious security issues within outsourcing medical sensor data to the cloud. One of the most significant challenges is how to ensure the integrity of these data, which is a prerequisite for providing precise medical diagnosis and treatment. To meet this challenge, we propose a novel and efficient public auditing scheme, which is suitable for cloud-assisted HealthIIoT system. Specifically, to address the contradiction between the high real-time requirement of medical sensor data and the limited computing power of HealthIIoT devices, a new online/offline tag generation algorithm is designed to improve preprocessing efficiency; to protect medical data privacy, a secure hash function is employed to blind the data proof. We formally prove the security of the presented scheme, and evaluate the performance through detailed experimental comparisons with the state-of-the-art ones. The results show that the presented scheme can greatly improve the efficiency of tag generation, while achieving better auditing performance than previous schemes.}, } @article {pmid36636525, year = {2023}, author = {Wang, SH and Khan, MA and Zhu, Z and Zhang, YD}, title = {WACPN: A Neural Network for Pneumonia Diagnosis.}, journal = {Computer systems science and engineering}, volume = {45}, number = {1}, pages = {21-34}, pmid = {36636525}, issn = {2766-483X}, support = {AA/18/3/34220/BHF_/British Heart Foundation/United Kingdom ; MC_PC_17171/MRC_/Medical Research Council/United Kingdom ; }, abstract = {Community-acquired pneumonia (CAP) is considered a sort of pneumonia developed outside hospitals and clinics. To diagnose community-acquired pneumonia (CAP) more efficiently, we proposed a novel neural network model. We introduce the 2-dimensional wavelet entropy (2d-WE) layer and an adaptive chaotic particle swarm optimization (ACP) algorithm to train the feed-forward neural network. The ACP uses adaptive inertia weight factor (AIWF) and Rossler attractor (RA) to improve the performance of standard particle swarm optimization. The final combined model is named WE-layer ACP-based network (WACPN), which attains a sensitivity of 91.87±1.37%, a specificity of 90.70±1.19%, a precision of 91.01±1.12%, an accuracy of 91.29±1.09%, F1 score of 91.43±1.09%, an MCC of 82.59±2.19%, and an FMI of 91.44±1.09%. The AUC of this WACPN model is 0.9577. We find that the maximum deposition level chosen as four can obtain the best result. Experiments demonstrate the effectiveness of both AIWF and RA. Finally, this proposed WACPN is efficient in diagnosing CAP and superior to six state-of-the-art models. Our model will be distributed to the cloud computing environment.}, } @article {pmid36627353, year = {2023}, author = {Saxena, D and Singh, AK and Lee, CN and Buyya, R}, title = {A sustainable and secure load management model for green cloud data centres.}, journal = {Scientific reports}, volume = {13}, number = {1}, pages = {491}, pmid = {36627353}, issn = {2045-2322}, mesh = {*Algorithms ; *Neural Networks, Computer ; Cloud Computing ; }, abstract = {The massive upsurge in cloud resource demand and inefficient load management stave off the sustainability of Cloud Data Centres (CDCs) resulting in high energy consumption, resource contention, excessive carbon emission, and security threats. In this context, a novel Sustainable and Secure Load Management (SaS-LM) Model is proposed to enhance the security for users with sustainability for CDCs. The model estimates and reserves the required resources viz., compute, network, and storage and dynamically adjust the load subject to maximum security and sustainability. An evolutionary optimization algorithm named Dual-Phase Black Hole Optimization (DPBHO) is proposed for optimizing a multi-layered feed-forward neural network and allowing the model to estimate resource usage and detect probable congestion. Further, DPBHO is extended to a Multi-objective DPBHO algorithm for a secure and sustainable VM allocation and management to minimize the number of active server machines, carbon emission, and resource wastage for greener CDCs. SaS-LM is implemented and evaluated using benchmark real-world Google Cluster VM traces. The proposed model is compared with state-of-the-arts which reveals its efficacy in terms of reduced carbon emission and energy consumption up to 46.9% and 43.9%, respectively with improved resource utilization up to 16.5%.}, } @article {pmid36624887, year = {2023}, author = {Saba, T and Rehman, A and Haseeb, K and Alam, T and Jeon, G}, title = {Cloud-edge load balancing distributed protocol for IoE services using swarm intelligence.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-11}, pmid = {36624887}, issn = {1386-7857}, abstract = {Rapid development of the Internet of Everything (IoE) and cloud services offer a vital role in the growth of smart applications. It provides scalability with the collaboration of cloud servers and copes with a big amount of collected data for network systems. Although, edge computing supports efficient utilization of communication bandwidth, and latency requirements to facilitate smart embedded systems. However, it faces significant research issues regarding data aggregation among heterogeneous network services and objects. Moreover, distributed systems are more precise for data access and storage, thus machine-to-machine is needed to be secured from unpredictable events. As a result, this research proposed secured data management with distributed load balancing protocol using particle swarm optimization, which aims to decrease the response time for cloud users and effectively maintain the integrity of network communication. It combines distributed computing and shift high cost computations closer to the requesting node to reduce latency and transmission overhead. Moreover, the proposed work also protects the communicating machines from malicious devices by evaluating the trust in a controlled manner. Simulation results revealed a significant performance of the proposed protocol in comparison to other solutions in terms of energy consumption by 20%, success rate by 17%, end-to-end delay by 14%, and network cost by 19% as average in the light of various performance metrics.}, } @article {pmid36624868, year = {2023}, author = {Liu, X and Gao, A and Chen, C and Moghimi, MM}, title = {Lightweight similarity checking for English literatures in mobile edge computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {12}, number = {1}, pages = {3}, pmid = {36624868}, issn = {2192-113X}, abstract = {With the advent of information age, mobile devices have become one of the major convenient equipment that aids people's daily office activities such as academic research, one of whose major tasks is to check the repetition rate or similarity among different English literatures. Traditional literature similarity checking solutions in cloud paradigm often call for intensive computational cost and long waiting time. To tackle this issue, in this paper, we modify the traditional literature similarity checking solution in cloud paradigm to make it suitable for the light-weight mobile edge environment. Furthermore, we put forward a lightweight similarity checking approach SC MEC for English literatures in mobile edge computing environment. To validate the advantages of SC MEC , we have designed massive experiments on a dataset. The reported experimental results show that SC MEC can deliver a satisfactory similarity checking result of literatures compared to other existing approaches.}, } @article {pmid36620727, year = {2022}, author = {Wegner, T and Lassnig, M and Ueberholz, P and Zeitnitz, C}, title = {Simulation and Evaluation of Cloud Storage Caching for Data Intensive Science.}, journal = {Computing and software for big science}, volume = {6}, number = {1}, pages = {5}, pmid = {36620727}, issn = {2510-2044}, abstract = {A common task in scientific computing is the data reduction. This workflow extracts the most important information from large input data and stores it in smaller derived data objects. The derived data objects can then be used for further analysis. Typically, these workflows use distributed storage and computing resources. A straightforward setup of storage media would be low-cost tape storage and higher-cost disk storage. The large, infrequently accessed input data are stored on tape storage. The smaller, frequently accessed derived data is stored on disk storage. In a best-case scenario, the large input data is only accessed very infrequently and in a well-planned pattern. However, practice shows that often the data has to be processed continuously and unpredictably. This can significantly reduce tape storage performance. A common approach to counter this is storing copies of the large input data on disk storage. This contribution evaluates an approach that uses cloud storage resources to serve as a flexible cache or buffer, depending on the computational workflow. The proposed model is explored for the case of continuously processed data. For the evaluation, a simulation tool was developed, which can be used to analyse models related to storage and network resources. We show that using commercial cloud storage can reduce on-premises disk storage requirements, while maintaining an equal throughput of jobs. Moreover, the key metrics of the model are discussed, and an approach is described, which uses the simulation to assist with the decision process of using commercial cloud storage. The goal is to investigate approaches and propose new evaluation methods to overcome future data challenges.}, } @article {pmid36617078, year = {2023}, author = {Harach, T and Simonik, P and Vrtkova, A and Mrovec, T and Klein, T and Ligori, JJ and Koreny, M}, title = {Novel Method for Determining Internal Combustion Engine Dysfunctions on Platform as a Service.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36617078}, issn = {1424-8220}, support = {CZ.02.1.01/0.0/0.0/17_049/0008425//A Research Platform focused on Industry 4.0 and Robotics in Ostrava Agglomeration/ ; CZ.02.1.01/0.0/0.0/16_019/0000867//European Regional Development Fund in the Research Centre of Advanced Mechatronic Systems/ ; SP2022/9//VSB - Technical University of Ostrava, Czech Republic/ ; }, mesh = {*Vehicle Emissions/analysis ; *Machine Learning ; Cloud Computing ; Gasoline/analysis ; }, abstract = {This article deals with a unique, new powertrain diagnostics platform at the level of a large number of EU25 inspection stations. Implemented method uses emission measurement data and additional data from significant sample of vehicles. An original technique using machine learning that uses 9 static testing points (defined by constant engine load and constant engine speed), volume of engine combustion chamber, EURO emission standard category, engine condition state coefficient and actual mileage is applied. An example for dysfunction detection using exhaust emission analyses is described in detail. The test setup is also described, along with the procedure for data collection using a Mindsphere cloud data processing platform. Mindsphere is a core of the new Platform as a Service (Paas) for data processing from multiple testing facilities. An evaluation on a fleet level which used quantile regression method is implemented. In this phase of the research, real data was used, as well as data defined on the basis of knowledge of the manifestation of internal combustion engine defects. As a result of the application of the platform and the evaluation method, it is possible to classify combustion engine dysfunctions. These are defects that cannot be detected by self-diagnostic procedures for cars up to the EURO 6 level.}, } @article {pmid36616922, year = {2022}, author = {Martínez-Otzeta, JM and Rodríguez-Moreno, I and Mendialdua, I and Sierra, B}, title = {RANSAC for Robotic Applications: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616922}, issn = {1424-8220}, support = {IT1427-22//Basque Government/ ; KK-2022/00065//Basque Government/ ; FPU18/04737//Spanish Ministry of Science, Innovation and Universities/ ; PID2021-122402OB-C21//Spanish Ministry of Science, Innovation and Universities/ ; PID2021-122402OB-C21//Spanish State Research Agency/ ; PID2021-122402OB-C21//European Regional Development Fund/ ; }, mesh = {*Algorithms ; *Robotics ; Research Design ; }, abstract = {Random Sample Consensus, most commonly abbreviated as RANSAC, is a robust estimation method for the parameters of a model contaminated by a sizable percentage of outliers. In its simplest form, the process starts with a sampling of the minimum data needed to perform an estimation, followed by an evaluation of its adequacy, and further repetitions of this process until some stopping criterion is met. Multiple variants have been proposed in which this workflow is modified, typically tweaking one or several of these steps for improvements in computing time or the quality of the estimation of the parameters. RANSAC is widely applied in the field of robotics, for example, for finding geometric shapes (planes, cylinders, spheres, etc.) in cloud points or for estimating the best transformation between different camera views. In this paper, we present a review of the current state of the art of RANSAC family methods with a special interest in applications in robotics.}, } @article {pmid36616830, year = {2022}, author = {Abolhassani Khajeh, S and Saberikamarposhti, M and Rahmani, AM}, title = {Real-Time Scheduling in IoT Applications: A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616830}, issn = {1424-8220}, mesh = {Communication ; Internet ; *Internet of Things ; }, abstract = {The Internet of Things (IoT) is a telecommunication network in the next generation of applications with the rapid progress of wireless sensor network techniques that have touched many spheres of life today. Hardware, telephony, communications, storage, secure platforms, software and services, and data processing platforms are all part of the IoT environment. IoT sensors collect data from their environment and share it by connecting to the Internet gateway. These sensors often perform tasks without human intervention. This article aims to review real-time scheduling in the IoT to fully understand the issues raised in this area published from 2018 to 2022. A classification for IoT applications based on practical application is provided for selected studies. Selected studies include healthcare, infrastructure, industrial applications, smart city, commercial applications, environmental protection, and general IoT applications. Studies are sorted into groups based on related applications and compared based on indicators such as performance time, energy consumption, makespan, and assessment environments depending on the provided classification. Finally, this paper discusses all reviewed studies' main concepts, disadvantages, advantages, and future work.}, } @article {pmid36616797, year = {2022}, author = {Bhatia, J and Italiya, K and Jadeja, K and Kumhar, M and Chauhan, U and Tanwar, S and Bhavsar, M and Sharma, R and Manea, DL and Verdes, M and Raboaca, MS}, title = {An Overview of Fog Data Analytics for IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616797}, issn = {1424-8220}, abstract = {With the rapid growth in the data and processing over the cloud, it has become easier to access those data. On the other hand, it poses many technical and security challenges to the users of those provisions. Fog computing makes these technical issues manageable to some extent. Fog computing is one of the promising solutions for handling the big data produced by the IoT, which are often security-critical and time-sensitive. Massive IoT data analytics by a fog computing structure is emerging and requires extensive research for more proficient knowledge and smart decisions. Though an advancement in big data analytics is taking place, it does not consider fog data analytics. However, there are many challenges, including heterogeneity, security, accessibility, resource sharing, network communication overhead, the real-time data processing of complex data, etc. This paper explores various research challenges and their solution using the next-generation fog data analytics and IoT networks. We also performed an experimental analysis based on fog computing and cloud architecture. The result shows that fog computing outperforms the cloud in terms of network utilization and latency. Finally, the paper is concluded with future trends.}, } @article {pmid36616774, year = {2022}, author = {Condon, F and Martínez, JM and Eltamaly, AM and Kim, YC and Ahmed, MA}, title = {Design and Implementation of a Cloud-IoT-Based Home Energy Management System.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616774}, issn = {1424-8220}, support = {ID11200178//Agencia Nacional de Investigación y Desarrollo/ ; 2021R1I1A305872911//National Research Foundation of Korea/ ; }, abstract = {The advances in the Internet of Things (IoT) and cloud computing opened new opportunities for developing various smart grid applications and services. The rapidly increasing adoption of IoT devices has enabled the development of applications and solutions to manage energy consumption efficiently. This work presents the design and implementation of a home energy management system (HEMS), which allows collecting and storing energy consumption data from appliances and the main load of the home. Two scenarios are designed and implemented: a local HEMS isolated from the Internet and relies on its processing and storage duties using an edge device and a Cloud HEMS using AWS IoT Core to manage incoming data messages and provide data-driven services and applications. A testbed was carried out in a real house in the city of Valparaiso, Chile, over a one-year period, where four appliances were used to collect energy consumption using smart plugs, as well as collecting the main energy load of the house through a data logger acting as a smart meter. To the best of our knowledge, this is the first electrical energy dataset with a 10-second sampling rate from a real household in Valparaiso, Chile. Results show that both implementations perform the baseline tasks (collecting, storing, and controlling) for a HEMS. This work contributes by providing a detailed technical implementation of HEMS that enables researchers and engineers to develop and implement HEMS solutions to support different smart home applications.}, } @article {pmid36616737, year = {2022}, author = {Zheng, Y and Luo, J and Chen, W and Zhang, Y and Sun, H and Pan, Z}, title = {Unsupervised 3D Reconstruction with Multi-Measure and High-Resolution Loss.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616737}, issn = {1424-8220}, support = {62076251//National Natural Science Foundation of China/ ; }, abstract = {Multi-view 3D reconstruction technology based on deep learning is developing rapidly. Unsupervised learning has become a research hotspot because it does not need ground truth labels. The current unsupervised method mainly uses 3DCNN to regularize the cost volume to regression image depth. This approach results in high memory requirements and long computing time. In this paper, we propose an end-to-end unsupervised multi-view 3D reconstruction network framework based on PatchMatch, Unsup_patchmatchnet. It dramatically reduces memory requirements and computing time. We propose a feature point consistency loss function. We incorporate various self-supervised signals such as photometric consistency loss and semantic consistency loss into the loss function. At the same time, we propose a high-resolution loss method. This improves the reconstruction of high-resolution images. The experiment proves that the memory usage of the network is reduced by 80% and the running time is reduced by more than 50% compared with the network using 3DCNN method. The overall error of reconstructed 3D point cloud is only 0.501 mm. It is superior to most current unsupervised multi-view 3D reconstruction networks. Then, we test on different data sets and verify that the network has good generalization.}, } @article {pmid36616717, year = {2022}, author = {Passian, A and Buchs, G and Seck, CM and Marino, AM and Peters, NA}, title = {The Concept of a Quantum Edge Simulator: Edge Computing and Sensing in the Quantum Era.}, journal = {Sensors (Basel, Switzerland)}, volume = {23}, number = {1}, pages = {}, pmid = {36616717}, issn = {1424-8220}, abstract = {Sensors, enabling observations across vast spatial, spectral, and temporal scales, are major data generators for information technology (IT). Processing, storing, and communicating this ever-growing amount of data pose challenges for the current IT infrastructure. Edge computing-an emerging paradigm to overcome the shortcomings of cloud-based computing-could address these challenges. Furthermore, emerging technologies such as quantum computing, quantum sensing, and quantum communications have the potential to fill the performance gaps left by their classical counterparts. Here, we present the concept of an edge quantum computing (EQC) simulator-a platform for designing the next generation of edge computing applications. An EQC simulator is envisioned to integrate elements from both quantum technologies and edge computing to allow studies of quantum edge applications. The presented concept is motivated by the increasing demand for more sensitive and precise sensors that can operate faster at lower power consumption, generating both larger and denser datasets. These demands may be fulfilled with edge quantum sensor networks. Envisioning the EQC era, we present our view on how such a scenario may be amenable to quantification and design. Given the cost and complexity of quantum systems, constructing physical prototypes to explore design and optimization spaces is not sustainable, necessitating EQC infrastructure and component simulators to aid in co-design. We discuss what such a simulator may entail and possible use cases that invoke quantum computing at the edge integrated with new sensor infrastructures.}, } @article {pmid36610429, year = {2023}, author = {Krumm, N}, title = {Organizational and Technical Security Considerations for Laboratory Cloud Computing.}, journal = {The journal of applied laboratory medicine}, volume = {8}, number = {1}, pages = {180-193}, doi = {10.1093/jalm/jfac118}, pmid = {36610429}, issn = {2576-9456}, mesh = {Humans ; *Cloud Computing ; Reproducibility of Results ; *Privacy ; Delivery of Health Care ; }, abstract = {BACKGROUND: Clinical and anatomical pathology services are increasingly utilizing cloud information technology (IT) solutions to meet growing requirements for storage, computation, and other IT services. Cloud IT solutions are often considered on the promise of low cost of entry, durability and reliability, scalability, and features that are typically out of reach for small- or mid-sized IT organizations. However, use of cloud-based IT infrastructure also brings additional security and privacy risks to organizations, as unfamiliarity, public networks, and complex feature sets contribute to an increased surface area for attacks.

CONTENT: In this best-practices guide, we aim to help both managers and IT professionals in healthcare environments understand the requirements and risks when using cloud-based IT infrastructure within the laboratory environment. We will describe how technical, operational, and organizational best practices that can help mitigate security, privacy, and other risks associated with the use of could infrastructure; furthermore, we identify how these best practices fit into healthcare regulatory frameworks.Among organizational best practices, we identify the need for specific hiring requirements, relationships with parent IT groups, mechanisms for reviewing and auditing security practices, and sound practices for onboarding and offboarding employees. Then, we highlight selected specific operational security, account security, and auditing/logging best practices. Finally, we describe how individual cloud technologies have specific resource-level security features.

SUMMARY: We emphasize that laboratory directors, managers, and IT professionals must ensure that the fundamental organizational and process-based requirements are addressed first, to establish the groundwork for technical security solutions and successful implementation of cloud infrastructure.}, } @article {pmid36597385, year = {2022}, author = {Zhang, J and Liu, T and Yu, Y}, title = {[Research on Comprehensive Safety Monitoring System for Elderly Care Based on Artificial Intelligence and Information Fusion].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {46}, number = {6}, pages = {611-614}, doi = {10.3969/j.issn.1671-7104.2022.06.005}, pmid = {36597385}, issn = {1671-7104}, mesh = {Humans ; Aged ; *Artificial Intelligence ; *Algorithms ; Monitoring, Physiologic ; Machine Learning ; China ; }, abstract = {Nowadays, China has entered into an aging society; how to ensure safety in elderly care has drawn social attention. Through artificial intelligence and multi-information fusion research, combined with the applications of machine learning algorithms, internet of things devices and cloud computing, this paper presents a comprehensive, intelligent safety monitoring system for the elderly in the community and at home. The system collects the daily life data of the elderly through a series of sensors in an all-round, all-time, and non-intrusive manner, and realizes intelligent alarms for high-risk states such as falls, acute illness, abnormal personnel, and gas smoke for the elderly. Through the innovative research of human pose estimation and behavior recognition, and application of multi-sensor information fusion, the system can greatly reduce the occurrence or injury caused by safety incidents in senior care, bringing safe and healthy living environment for the elderly at homes and communities.}, } @article {pmid36590844, year = {2022}, author = {Gudla, SPK and Bhoi, SK and Nayak, SR and Singh, KK and Verma, A and Izonin, I}, title = {A Deep Intelligent Attack Detection Framework for Fog-Based IoT Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6967938}, pmid = {36590844}, issn = {1687-5273}, mesh = {Humans ; *Benchmarking ; *Communication ; Computer Simulation ; Data Collection ; Intelligence ; }, abstract = {Fog computing provides a multitude of end-based IoT system services. End IoT devices exchange information with fog nodes and the cloud to handle client undertakings. During the process of data collection between the layer of fog and the cloud, there are more chances of crucial attacks or assaults like DDoS and many more security attacks being compromised by IoT end devices. These network (NW) threats must be spotted early. Deep learning (DL) assumes an unmistakable part in foreseeing the end client behavior by extricating highlights and grouping the foe in the network. Yet, because of IoT devices' compelled nature in calculation and storage spaces, DL cannot be managed on those. Here, a framework for fog-based attack detection is proffered, and different attacks are prognosticated utilizing long short-term memory (LSTM). The end IoT gadget behaviour can be prognosticated by installing a trained LSTMDL model at the fog node computation module. The simulations are performed using Python by comparing LSTMDL model with deep neural multilayer perceptron (DNMLP), bidirectional LSTM (Bi-LSTM), gated recurrent units (GRU), hybrid ensemble model (HEM), and hybrid deep learning model (CNN + LSTM) comprising convolutional neural network (CNN) and LSTM on DDoS-SDN (Mendeley Dataset), NSLKDD, UNSW-NB15, and IoTID20 datasets. To evaluate the performance of the binary classifier, metrics like accuracy, precision, recall, f1-score, and ROC-AUC curves are considered on these datasets. The LSTMDL model shows outperforming nature in binary classification with 99.70%, 99.12%, 94.11%, and 99.88% performance accuracies on experimentation with respective datasets. The network simulation further shows how different DL models present fog layer communication behaviour detection time (CBDT). DNMLP detects communication behaviour (CB) faster than other models, but LSTMDL predicts assaults better.}, } @article {pmid36590152, year = {2022}, author = {Farhadi, F and Barnes, MR and Sugito, HR and Sin, JM and Henderson, ER and Levy, JJ}, title = {Applications of artificial intelligence in orthopaedic surgery.}, journal = {Frontiers in medical technology}, volume = {4}, number = {}, pages = {995526}, pmid = {36590152}, issn = {2673-3129}, support = {K23 EB026507/EB/NIBIB NIH HHS/United States ; }, abstract = {The practice of medicine is rapidly transforming as a result of technological breakthroughs. Artificial intelligence (AI) systems are becoming more and more relevant in medicine and orthopaedic surgery as a result of the nearly exponential growth in computer processing power, cloud based computing, and development, and refining of medical-task specific software algorithms. Because of the extensive role of technologies such as medical imaging that bring high sensitivity, specificity, and positive/negative prognostic value to management of orthopaedic disorders, the field is particularly ripe for the application of machine-based integration of imaging studies, among other applications. Through this review, we seek to promote awareness in the orthopaedics community of the current accomplishments and projected uses of AI and ML as described in the literature. We summarize the current state of the art in the use of ML and AI in five key orthopaedic disciplines: joint reconstruction, spine, orthopaedic oncology, trauma, and sports medicine.}, } @article {pmid36589280, year = {2023}, author = {Panja, S and Chattopadhyay, AK and Nag, A and Singh, JP}, title = {Fuzzy-logic-based IoMT framework for COVID19 patient monitoring.}, journal = {Computers & industrial engineering}, volume = {176}, number = {}, pages = {108941}, pmid = {36589280}, issn = {1879-0550}, abstract = {Smart healthcare is an integral part of a smart city, which provides real time and intelligent remote monitoring and tracking services to patients and elderly persons. In the era of an extraordinary public health crisis due to the spread of the novel coronavirus (2019-nCoV), which caused the deaths of millions and affected a multitude of people worldwide in different ways, the role of smart healthcare has become indispensable. Any modern method that allows for speedy and efficient monitoring of COVID19-affected patients could be highly beneficial to medical staff. Several smart-healthcare systems based on the Internet of Medical Things (IoMT) have attracted worldwide interest in their growing technical assistance in health services, notably in predicting, identifying and preventing, and their remote surveillance of most infectious diseases. In this paper, a real time health monitoring system for COVID19 patients based on edge computing and fuzzy logic technique is proposed. The proposed model makes use of the IoMT architecture to collect real time biological data (or health information) from the patients to monitor and analyze the health conditions of the infected patients and generates alert messages that are transmitted to the concerned parties such as relatives, medical staff and doctors to provide appropriate treatment in a timely fashion. The health data are collected through sensors attached to the patients and transmitted to the edge devices and cloud storage for further processing. The collected data are analyzed through fuzzy logic in edge devices to efficiently identify the risk status (such as low risk, moderate risk and high risk) of the COVID19 patients in real time. The proposed system is also associated with a mobile app that enables the continuous monitoring of the health status of the patients. Moreover, once alerted by the system about the high risk status of a patient, a doctor can fetch all the health records of the patient for a specified period, which can be utilized for a detailed clinical diagnosis.}, } @article {pmid36588663, year = {2023}, author = {Gezimati, M and Singh, G}, title = {Advances in terahertz technology for cancer detection applications.}, journal = {Optical and quantum electronics}, volume = {55}, number = {2}, pages = {151}, pmid = {36588663}, issn = {0306-8919}, abstract = {Currently, there is an increasing demand for the diagnostic techniques that provide functional and morphological information with early cancer detection capability. Novel modern medical imaging systems driven by the recent advancements in technology such as terahertz (THz) and infrared radiation-based imaging technologies which are complementary to conventional modalities are being developed, investigated, and validated. The THz cancer imaging techniques offer novel opportunities for label free, non-ionizing, non-invasive and early cancer detection. The observed image contrast in THz cancer imaging studies has been mostly attributed to higher refractive index, absorption coefficient and dielectric properties in cancer tissue than that in the normal tissue due the local increase of the water molecule content in tissue and increased blood supply to the cancer affected tissue. Additional image contrast parameters and cancer biomarkers that have been reported to contribute to THz image contrast include cell structural changes, molecular density, interactions between agents (e.g., contrast agents and embedding agents) and biological tissue as well as tissue substances like proteins, fiber and fat etc. In this paper, we have presented a systematic and comprehensive review of the advancements in the technological development of THz technology for cancer imaging applications. Initially, the fundamentals principles and techniques for THz radiation generation and detection, imaging and spectroscopy are introduced. Further, the application of THz imaging for detection of various cancers tissues are presented, with more focus on the in vivo imaging of skin cancer. The data processing techniques for THz data are briefly discussed. Also, we identify the advantages and existing challenges in THz based cancer detection and report the performance improvement techniques. The recent advancements towards THz systems which are optimized and miniaturized are also reported. Finally, the integration of THz systems with artificial intelligent (AI), internet of things (IoT), cloud computing, big data analytics, robotics etc. for more sophisticated systems is proposed. This will facilitate the large-scale clinical applications of THz for smart and connected next generation healthcare systems and provide a roadmap for future research.}, } @article {pmid36584089, year = {2022}, author = {Yang, D and Yu, J and Du, X and He, Z and Li, P}, title = {Energy saving strategy of cloud data computing based on convolutional neural network and policy gradient algorithm.}, journal = {PloS one}, volume = {17}, number = {12}, pages = {e0279649}, pmid = {36584089}, issn = {1932-6203}, mesh = {*Algorithms ; *Neural Networks, Computer ; Computer Simulation ; Cloud Computing ; Physical Phenomena ; }, abstract = {Cloud Data Computing (CDC) is conducive to precise energy-saving management of user data centers based on the real-time energy consumption monitoring of Information Technology equipment. This work aims to obtain the most suitable energy-saving strategies to achieve safe, intelligent, and visualized energy management. First, the theory of Convolutional Neural Network (CNN) is discussed. Besides, an intelligent energy-saving model based on CNN is designed to ameliorate the variable energy consumption, load, and power consumption of the CDC data center. Then, the core idea of the policy gradient (PG) algorithm is introduced. In addition, a CDC task scheduling model is designed based on the PG algorithm, aiming at the uncertainty and volatility of the CDC scheduling tasks. Finally, the performance of different neural network models in the training process is analyzed from the perspective of total energy consumption and load optimization of the CDC center. At the same time, simulation is performed on the CDC task scheduling model based on the PG algorithm to analyze the task scheduling demand. The results demonstrate that the energy consumption of the CNN algorithm in the CDC energy-saving model is better than that of the Elman algorithm and the ecoCloud algorithm. Besides, the CNN algorithm reduces the number of virtual machine migrations in the CDC energy-saving model by 9.30% compared with the Elman algorithm. The Deep Deterministic Policy Gradient (DDPG) algorithm performs the best in task scheduling of the cloud data center, and the average response time of the DDPG algorithm is 141. In contrast, the Deep Q Network algorithm performs poorly. This paper proves that Deep Reinforcement Learning (DRL) and neural networks can reduce the energy consumption of CDC and improve the completion time of CDC tasks, offering a research reference for CDC resource scheduling.}, } @article {pmid36575310, year = {2022}, author = {Wang, J and Li, X and Wang, X and Zhou, S and Luo, Y}, title = {Farmland quality assessment using deep fully convolutional neural networks.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {239}, pmid = {36575310}, issn = {1573-2959}, support = {21KJB170010//Natural Science Foundation of the Higher Education Institutions of Jiangsu Province, China/ ; 42201282//Young Scientists Fund of the National Natural Science Foundation of China/ ; 42271271//National Natural Science Foundation of China/ ; }, mesh = {*Environmental Monitoring ; *Farms ; Image Processing, Computer-Assisted/methods ; Machine Learning ; *Neural Networks, Computer ; *Agriculture/methods ; }, abstract = {Farmland is the cornerstone of agriculture and is important for food security and social production. Farmland assessment is essential but traditional methods are usually expensive and slow. Deep learning methods have been developed and widely applied recently in image recognition, semantic understanding, and many other application domains. In this research, we used fully convolutional networks (FCN) as the deep learning model to evaluate farmland grades. Normalized difference vegetation index (NDVI) derived from Landsat images was used as the input data, and the China National Cultivated Land Grade Database within Jiangsu Province was used to train the model on cloud computing. We also applied an image segmentation method to improve the original results from the FCN and compared the results with classical machine learning (ML) methods. Our research found that the FCN can predict farmland grades with an overall F1 score (the harmonic mean of precision and recall) of 0.719 and F1 score of 0.909, 0.590, 0.740, 0.642, and 0.023 for non-farmland, level I, II, III, and IV farmland, respectively. Combining the FCN and image segmentation method can further improve prediction accuracy with results of fewer noise pixels and more realistic edges. Compared with conventional ML, at least in farmland evaluation, FCN provides better results with higher precision, recall, and F1 score. Our research indicates that by using remote sensing NDVI data, the deep learning method can provide acceptable farmland assessment without fieldwork and can be used as a novel supplement to traditional methods. The method used in this research will save a lot of time and cost compared with traditional means.}, } @article {pmid36575255, year = {2023}, author = {Niyazi, M and Behnamian, J}, title = {Application of cloud computing and big data in three-stage dynamic modeling of disaster relief logistics and wounded transportation: a case study.}, journal = {Environmental science and pollution research international}, volume = {30}, number = {13}, pages = {38121-38140}, doi = {10.1007/s11356-022-24770-3}, pmid = {36575255}, issn = {1614-7499}, mesh = {Humans ; Cloud Computing ; Big Data ; *Disaster Planning ; *Disasters ; *Earthquakes ; }, abstract = {Collecting and sharing information about affected areas is an important activity for optimal decision-making in relief processes. Defects such as over-sending some items to affected areas and mistakes in transferring injured people to medical centers in accidents are due to improper management of this information. Because cloud computing as a processing and storage platform for big data is independent of the device and location and can also perform high-speed processing, its use in disasters has been highly regarded by researchers. In this environment, a three-stage dynamic procedure for evacuation operations and logistics issues is presented. The first stage of the proposed model is image processing and tweet mining in a cloud center in order to determine the disaster parameters. In stage II, a mixed-integer multi-commodity model is presented for the relief commodity delivery, wounded people transportation with capacity constraints, and locating of the possible on-site clinics and local distribution centers near disaster areas. In stage III, by using a system of equations, detailed vehicle load/unload instructions are obtained. Finally, the effectiveness of the proposed model on the data of an earthquake disaster in Iran is investigated. The results of comparing the proposed approach with a two-stage algorithm show that the total number of unsatisfied demand for all types of commodities in the proposed approach was better than the other. Also, the number of survivors in the three-stage model is significantly higher than in the two-stage one. The better performance of the proposed algorithm is due to the fact that online data is continuously available and that decisions such as sending relief items and dispatching are made more effectively.}, } @article {pmid36572709, year = {2022}, author = {Khan, S and Khan, HU and Nazir, S}, title = {Systematic analysis of healthcare big data analytics for efficient care and disease diagnosing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {22377}, pmid = {36572709}, issn = {2045-2322}, mesh = {*Data Science ; *Delivery of Health Care ; Big Data ; Information Systems ; Machine Learning ; }, abstract = {Big data has revolutionized the world by providing tremendous opportunities for a variety of applications. It contains a gigantic amount of data, especially a plethora of data types that has been significantly useful in diverse research domains. In healthcare domain, the researchers use computational devices to extract enriched relevant information from this data and develop smart applications to solve real-life problems in a timely fashion. Electronic health (eHealth) and mobile health (mHealth) facilities alongwith the availability of new computational models have enabled the doctors and researchers to extract relevant information and visualize the healthcare big data in a new spectrum. Digital transformation of healthcare systems by using of information system, medical technology, handheld and smart wearable devices has posed many challenges to researchers and caretakers in the form of storage, minimizing treatment cost, and processing time (to extract enriched information, and minimize error rates to make optimum decisions). In this research work, the existing literature is analysed and assessed, to identify gaps that result in affecting the overall performance of the available healthcare applications. Also, it aims to suggest enhanced solutions to address these gaps. In this comprehensive systematic research work, the existing literature reported during 2011 to 2021, is thoroughly analysed for identifying the efforts made to facilitate the doctors and practitioners for diagnosing diseases using healthcare big data analytics. A set of rresearch questions are formulated to analyse the relevant articles for identifying the key features and optimum management solutions, and laterally use these analyses to achieve effective outcomes. The results of this systematic mapping conclude that despite of hard efforts made in the domains of healthcare big data analytics, the newer hybrid machine learning based systems and cloud computing-based models should be adapted to reduce treatment cost, simulation time and achieve improved quality of care. This systematic mapping will also result in enhancing the capabilities of doctors, practitioners, researchers, and policymakers to use this study as evidence for future research.}, } @article {pmid36570052, year = {2022}, author = {Zahid, MA and Akhtar, A and Shafiq, B and Shamail, S and Afzal, A and Vaidya, J}, title = {An Integrated Framework for Fault Resolution in Business Processes.}, journal = {IEEE International Conference on Web Services : proceedings. IEEE International Conference on Web Services}, volume = {2022}, number = {}, pages = {266-275}, doi = {10.1109/icws55610.2022.00048}, pmid = {36570052}, issn = {2770-8144}, support = {R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {Cloud and edge-computing based platforms have enabled rapid development of distributed business process (BP) applications in a plug and play manner. However, these platforms do not provide the needed capabilities for identifying or repairing faults in BPs. Faults in BP may occur due to errors made by BP designers because of their lack of understanding of the underlying component services, misconfiguration of these services, or incorrect/incomplete BP workflow specifications. Such faults may not be discovered at design or development stage and may occur at runtime. In this paper, we present a unified framework for automated fault resolution in BPs. The proposed framework employs a novel and efficient fault resolution approach that extends the generate-and-validate program repair approach. In addition, we propose a hybrid approach that performs fault resolution by analyzing a faulty BP in isolation as well as by comparing with other BPs using similar services. This hybrid approach results in improved accuracy and broader coverage of fault types. We also perform an extensive experimental evaluation to compare the effectiveness of the proposed approach using a dataset of 208 faulty BPs.}, } @article {pmid36569183, year = {2022}, author = {Mawgoud, AA and Taha, MHN and Abu-Talleb, A and Kotb, A}, title = {A deep learning based steganography integration framework for ad-hoc cloud computing data security augmentation using the V-BOINC system.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {97}, pmid = {36569183}, issn = {2192-113X}, abstract = {In the early days of digital transformation, the automation, scalability, and availability of cloud computing made a big difference for business. Nonetheless, significant concerns have been raised regarding the security and privacy levels that cloud systems can provide, as enterprises have accelerated their cloud migration journeys in an effort to provide a remote working environment for their employees, primarily in light of the COVID-19 outbreak. The goal of this study is to come up with a way to improve steganography in ad hoc cloud systems by using deep learning. This research implementation is separated into two sections. In Phase 1, the "Ad-hoc Cloud System" idea and deployment plan were set up with the help of V-BOINC. In Phase 2, a modified form of steganography and deep learning were used to study the security of data transmission in ad-hoc cloud networks. In the majority of prior studies, attempts to employ deep learning models to augment or replace data-hiding systems did not achieve a high success rate. The implemented model inserts data images through colored images in the developed ad hoc cloud system. A systematic steganography model conceals from statistics lower message detection rates. Additionally, it may be necessary to incorporate small images beneath huge cover images. The implemented ad-hoc system outperformed Amazon AC2 in terms of performance, while the execution of the proposed deep steganography approach gave a high rate of evaluation for concealing both data and images when evaluated against several attacks in an ad-hoc cloud system environment.}, } @article {pmid36567676, year = {2022}, author = {Barot, V and Patel, DR}, title = {A physiological signal compression approach using optimized Spindle Convolutional Auto-encoder in mHealth applications.}, journal = {Biomedical signal processing and control}, volume = {73}, number = {}, pages = {103436}, pmid = {36567676}, issn = {1746-8094}, abstract = {BACKGROUND AND OBJECTIVES: The COVID-19 pandemic manifested the need of developing robust digital platforms for facilitating healthcare services such as consultancy, clinical therapies, real time remote monitoring, early diagnosis and future predictions. Innovations made using technologies such as Internet of Things (IoT), edge computing, cloud computing and artificial intelligence are helping address this crisis. The urge for remote monitoring, symptom analysis and early detection of diseases lead to tremendous increase in the deployment of wearable sensor devices. They facilitate seamless gathering of physiological data such as electrocardiogram (ECG) signals, respiration traces (RESP), galvanic skin response (GSR), pulse rate, body temperature, photoplethysmograms (PPG), oxygen saturation (SpO2) etc. For diagnosis and analysis purpose, the gathered data needs to be stored. Wearable devices operate on batteries and have a memory constraint. In mHealth application architectures, this gathered data is hence stored on cloud based servers. While transmitting data from wearable devices to cloud servers via edge devices, a lot of energy is consumed. This paper proposes a deep learning based compression model SCAElite that reduces the data volume, enabling energy efficient transmission.

RESULTS: Stress Recognition in Automobile Drivers dataset and MIT-BIH dataset from PhysioNet are used for validation of algorithm performance. The model achieves a compression ratio of up to 300 fold with reconstruction errors within 8% over the stress recognition dataset and 106.34-fold with reconstruction errors within 8% over the MIT-BIH dataset. The computational complexity of SCAElite is 51.65% less compared to state-of-the-art deep compressive model.

CONCLUSION: It is experimentally validated that SCAElite guarantees a high compression ratio with good quality restoration capabilities for physiological signal compression in mHealth applications. It has a compact architecture and is computationally more efficient compared to state-of-the-art deep compressive model.}, } @article {pmid36563043, year = {2022}, author = {Tuler de Oliveira, M and Amorim Reis, LH and Marquering, H and Zwinderman, AH and Delgado Olabarriaga, S}, title = {Perceptions of a Secure Cloud-Based Solution for Data Sharing During Acute Stroke Care: Qualitative Interview Study.}, journal = {JMIR formative research}, volume = {6}, number = {12}, pages = {e40061}, pmid = {36563043}, issn = {2561-326X}, abstract = {BACKGROUND: Acute stroke care demands fast procedures performed through the collaboration of multiple professionals across multiple organizations. Cloud computing and the wide adoption of electronic medical records (EMRs) enable health care systems to improve data availability and facilitate sharing among professionals. However, designing a secure and privacy-preserving EMR cloud-based application is challenging because it must dynamically control the access to the patient's EMR according to the needs for data during treatment.

OBJECTIVE: We developed a prototype of a secure EMR cloud-based application. The application explores the security features offered by the eHealth cloud-based framework created by the Advanced Secure Cloud Encrypted Platform for Internationally Orchestrated Solutions in Health Care Horizon 2020 project. This study aimed to collect impressions, challenges, and improvements for the prototype when applied to the use case of secure data sharing among acute care teams during emergency treatment in the Netherlands.

METHODS: We conducted 14 semistructured interviews with medical professionals with 4 prominent roles in acute care: emergency call centers, ambulance services, emergency hospitals, and general practitioner clinics. We used in-depth interviews to capture their perspectives about the application's design and functions and its use in a simulated acute care event. We used thematic analysis of interview transcripts. Participants were recruited until the collected data reached thematic saturation.

RESULTS: The participants' perceptions and feedback are presented as 5 themes identified from the interviews: current challenges (theme 1), quality of the shared EMR data (theme 2), integrity and auditability of the EMR data (theme 3), usefulness and functionality of the application (theme 4), and trust and acceptance of the technology (theme 5). The results reinforced the current challenges in patient data sharing during acute stroke care. Moreover, from the user point of view, we expressed the challenges of adopting the Advanced Secure Cloud Encrypted Platform for Internationally Orchestrated Solutions in Health Care Acute Stroke Care application in a real scenario and provided suggestions for improving the proposed technology's acceptability.

CONCLUSIONS: This study has endorsed a system that supports data sharing among acute care professionals with efficiency, but without compromising the security and privacy of the patient. This explorative study identified several significant barriers to and improvement opportunities for the future acceptance and adoption of the proposed system. Moreover, the study results highlight that the desired digital transformation should consider integrating the already existing systems instead of requesting migration to a new centralized system.}, } @article {pmid36561335, year = {2022}, author = {Sethuraman, A}, title = {Teaching computational genomics and bioinformatics on a high performance computing cluster-a primer.}, journal = {Biology methods & protocols}, volume = {7}, number = {1}, pages = {bpac032}, pmid = {36561335}, issn = {2396-8923}, support = {R15 GM143700/GM/NIGMS NIH HHS/United States ; }, abstract = {The burgeoning field of genomics as applied to personalized medicine, epidemiology, conservation, agriculture, forensics, drug development, and other fields comes with large computational and bioinformatics costs, which are often inaccessible to student trainees in classroom settings at universities. However, with increased availability of resources such as NSF XSEDE, Google Cloud, Amazon AWS, and other high-performance computing (HPC) clouds and clusters for educational purposes, a growing community of academicians are working on teaching the utility of HPC resources in genomics and big data analyses. Here, I describe the successful implementation of a semester-long (16 week) upper division undergraduate/graduate level course in Computational Genomics and Bioinformatics taught at San Diego State University in Spring 2022. Students were trained in the theory, algorithms and hands-on applications of genomic data quality control, assembly, annotation, multiple sequence alignment, variant calling, phylogenomic analyses, population genomics, genome-wide association studies, and differential gene expression analyses using RNAseq data on their own dedicated 6-CPU NSF XSEDE Jetstream virtual machines. All lesson plans, activities, examinations, tutorials, code, lectures, and notes are publicly available at https://github.com/arunsethuraman/biomi609spring2022.}, } @article {pmid36560272, year = {2022}, author = {Uslu, S and Kaur, D and Durresi, M and Durresi, A}, title = {Trustability for Resilient Internet of Things Services on 5G Multiple Access Edge Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {24}, pages = {}, pmid = {36560272}, issn = {1424-8220}, support = {1547411//National Science Foundation/ ; 2017-67003-26057//United States Department of Agriculture/ ; }, mesh = {*Internet of Things ; Cloud Computing ; Reproducibility of Results ; Internet ; Trust ; }, abstract = {Billions of Internet of Things (IoT) devices and sensors are expected to be supported by fifth-generation (5G) wireless cellular networks. This highly connected structure is predicted to attract different and unseen types of attacks on devices, sensors, and networks that require advanced mitigation strategies and the active monitoring of the system components. Therefore, a paradigm shift is needed, from traditional prevention and detection approaches toward resilience. This study proposes a trust-based defense framework to ensure resilient IoT services on 5G multi-access edge computing (MEC) systems. This defense framework is based on the trustability metric, which is an extension of the concept of reliability and measures how much a system can be trusted to keep a given level of performance under a specific successful attack vector. Furthermore, trustability is used as a trade-off with system cost to measure the net utility of the system. Systems using multiple sensors with different levels of redundancy were tested, and the framework was shown to measure the trustability of the entire system. Furthermore, different types of attacks were simulated on an edge cloud with multiple nodes, and the trustability was compared to the capabilities of dynamic node addition for the redundancy and removal of untrusted nodes. Finally, the defense framework measured the net utility of the service, comparing the two types of edge clouds with and without the node deactivation capability. Overall, the proposed defense framework based on trustability ensures a satisfactory level of resilience for IoT on 5G MEC systems, which serves as a trade-off with an accepted cost of redundant resources under various attacks.}, } @article {pmid36560073, year = {2022}, author = {El-Nahal, F and Xu, T and AlQahtani, D and Leeson, M}, title = {A Bidirectional Wavelength Division Multiplexed (WDM) Free Space Optical Communication (FSO) System for Deployment in Data Center Networks (DCNs).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {24}, pages = {}, pmid = {36560073}, issn = {1424-8220}, support = {101008280//European Commission/ ; }, abstract = {Data centers are crucial to the growth of cloud computing. Next-generation data center networks (DCNs) will rely heavily on optical technology. Here, we have investigated a bidirectional wavelength-division-multiplexed (WDM) free space optical communication (FSO) system for deployment in optical wireless DCNs. The system was evaluated for symmetric 10 Gbps 16-quadrature amplitude modulation (16-QAM) intensity-modulated orthogonal frequency-division multiplexing (OFDM) downstream signals and 10 Gbps on-off keying (OOK) upstream signals, respectively. The transmission of optical signals over an FSO link is demonstrated using a gamma-gamma channel model. According to the bit error rate (BER) results obtained for each WDM signal, the bidirectional WDM-FSO transmission could achieve 320 Gbps over 1000 m free space transmission length. The results show that the proposed FSO topology offers an excellent alternative to fiber-based optical interconnects in DCNs, allowing for high data rate bidirectional transmission.}, } @article {pmid36555731, year = {2022}, author = {Puch-Giner, I and Molina, A and Municoy, M and Pérez, C and Guallar, V}, title = {Recent PELE Developments and Applications in Drug Discovery Campaigns.}, journal = {International journal of molecular sciences}, volume = {23}, number = {24}, pages = {}, pmid = {36555731}, issn = {1422-0067}, mesh = {Computer Simulation ; *Software ; *Drug Discovery/methods ; Models, Molecular ; Monte Carlo Method ; Drug Design ; }, abstract = {Computer simulation techniques are gaining a central role in molecular pharmacology. Due to several factors, including the significant improvements of traditional molecular modelling, the irruption of machine learning methods, the massive data generation, or the unlimited computational resources through cloud computing, the future of pharmacology seems to go hand in hand with in silico predictions. In this review, we summarize our recent efforts in such a direction, centered on the unconventional Monte Carlo PELE software and on its coupling with machine learning techniques. We also provide new data on combining two recent new techniques, aquaPELE capable of exhaustive water sampling and fragPELE, for fragment growing.}, } @article {pmid36555493, year = {2022}, author = {Nelson, TM and Ghosh, S and Postler, TS}, title = {L-RAPiT: A Cloud-Based Computing Pipeline for the Analysis of Long-Read RNA Sequencing Data.}, journal = {International journal of molecular sciences}, volume = {23}, number = {24}, pages = {}, pmid = {36555493}, issn = {1422-0067}, support = {R21 AI156616/AI/NIAID NIH HHS/United States ; R21AI156616//National Institute of Allergy and Infectious Diseases/ ; }, mesh = {*RNA/genetics ; *Cloud Computing ; Gene Expression Profiling/methods ; Computational Biology/methods ; Software ; Sequence Analysis, RNA ; High-Throughput Nucleotide Sequencing/methods ; }, abstract = {Long-read sequencing (LRS) has been adopted to meet a wide variety of research needs, ranging from the construction of novel transcriptome annotations to the rapid identification of emerging virus variants. Amongst other advantages, LRS preserves more information about RNA at the transcript level than conventional high-throughput sequencing, including far more accurate and quantitative records of splicing patterns. New studies with LRS datasets are being published at an exponential rate, generating a vast reservoir of information that can be leveraged to address a host of different research questions. However, mining such publicly available data in a tailored fashion is currently not easy, as the available software tools typically require familiarity with the command-line interface, which constitutes a significant obstacle to many researchers. Additionally, different research groups utilize different software packages to perform LRS analysis, which often prevents a direct comparison of published results across different studies. To address these challenges, we have developed the Long-Read Analysis Pipeline for Transcriptomics (L-RAPiT), a user-friendly, free pipeline requiring no dedicated computational resources or bioinformatics expertise. L-RAPiT can be implemented directly through Google Colaboratory, a system based on the open-source Jupyter notebook environment, and allows for the direct analysis of transcriptomic reads from Oxford Nanopore and PacBio LRS machines. This new pipeline enables the rapid, convenient, and standardized analysis of publicly available or newly generated LRS datasets.}, } @article {pmid36554175, year = {2022}, author = {Liu, C and Jiao, J and Li, W and Wang, J and Zhang, J}, title = {Tr-Predictior: An Ensemble Transfer Learning Model for Small-Sample Cloud Workload Prediction.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {12}, pages = {}, pmid = {36554175}, issn = {1099-4300}, support = {61902112//National Natural Science Foundation of China 404 under Grant/ ; 23A520036//Application Research Plan of Key Scientific Research Projects in 405 Henan University/ ; GCIS202115//Guangxi Key Laboratory of Cryptography and Information 406 Security/ ; }, abstract = {Accurate workload prediction plays a key role in intelligent scheduling decisions on cloud platforms. There are massive amounts of short-workload sequences in the cloud platform, and the small amount of data and the presence of outliers make accurate workload sequence prediction a challenge. For the above issues, this paper proposes an ensemble learning method based on sample weight transfer and long short-term memory (LSTM), termed as Tr-Predictor. Specifically, a selection method of similar sequences combining time warp edit distance (TWED) and transfer entropy (TE) is proposed to select a source domain dataset with higher similarity for the target workload sequence. Then, we upgrade the basic learner of the ensemble model two-stage TrAdaBoost.R2 to LSTM in the deep model and enhance the ability of the ensemble model to extract sequence features. To optimize the weight adjustment strategy, we adopt a two-stage weight adjustment strategy and select the best weight for the learner according to the sample error and model error. Finally, the above process determines the parameters of the target model and uses the target model to predict the short-task sequences. In the experimental validation, we arbitrarily select nine sets of short-workload data from the Google dataset and three sets of short-workload data from the Alibaba cluster to verify the prediction effectiveness of the proposed algorithm. The experimental results show that compared with the commonly used cloud workload prediction methods Tr-Predictor has higher prediction accuracy on the small-sample workload. The prediction indicators of the ablation experiments show the performance gain of each part in the proposed method.}, } @article {pmid36550311, year = {2023}, author = {Pietris, J and Bacchi, S and Tan, Y and Kovoor, J and Gupta, A and Chan, W}, title = {Safety always: the challenges of cloud computing in medical practice and ophthalmology.}, journal = {Eye (London, England)}, volume = {37}, number = {12}, pages = {2436-2437}, pmid = {36550311}, issn = {1476-5454}, mesh = {Humans ; *Cloud Computing ; *Ophthalmology ; Software ; }, } @article {pmid36547491, year = {2022}, author = {Martin, J and Cantero, D and González, M and Cabrera, A and Larrañaga, M and Maltezos, E and Lioupis, P and Kosyvas, D and Karagiannidis, L and Ouzounoglou, E and Amditis, A}, title = {Embedded Vision Intelligence for the Safety of Smart Cities.}, journal = {Journal of imaging}, volume = {8}, number = {12}, pages = {}, pmid = {36547491}, issn = {2313-433X}, support = {883522//European Commission/ ; }, abstract = {Advances in Artificial intelligence (AI) and embedded systems have resulted on a recent increase in use of image processing applications for smart cities' safety. This enables a cost-adequate scale of automated video surveillance, increasing the data available and releasing human intervention. At the same time, although deep learning is a very intensive task in terms of computing resources, hardware and software improvements have emerged, allowing embedded systems to implement sophisticated machine learning algorithms at the edge. Additionally, new lightweight open-source middleware for constrained resource devices, such as EdgeX Foundry, have appeared to facilitate the collection and processing of data at sensor level, with communication capabilities to exchange data with a cloud enterprise application. The objective of this work is to show and describe the development of two Edge Smart Camera Systems for safety of Smart cities within S4AllCities H2020 project. Hence, the work presents hardware and software modules developed within the project, including a custom hardware platform specifically developed for the deployment of deep learning models based on the I.MX8 Plus from NXP, which considerably reduces processing and inference times; a custom Video Analytics Edge Computing (VAEC) system deployed on a commercial NVIDIA Jetson TX2 platform, which provides high level results on person detection processes; and an edge computing framework for the management of those two edge devices, namely Distributed Edge Computing framework, DECIoT. To verify the utility and functionality of the systems, extended experiments were performed. The results highlight their potential to provide enhanced situational awareness and demonstrate the suitability for edge machine vision applications for safety in smart cities.}, } @article {pmid36547481, year = {2022}, author = {Saad El Imanni, H and El Harti, A and Hssaisoune, M and Velastegui-Montoya, A and Elbouzidi, A and Addi, M and El Iysaouy, L and El Hachimi, J}, title = {Rapid and Automated Approach for Early Crop Mapping Using Sentinel-1 and Sentinel-2 on Google Earth Engine; A Case of a Highly Heterogeneous and Fragmented Agricultural Region.}, journal = {Journal of imaging}, volume = {8}, number = {12}, pages = {}, pmid = {36547481}, issn = {2313-433X}, abstract = {Accurate and rapid crop type mapping is critical for agricultural sustainability. The growing trend of cloud-based geospatial platforms provides rapid processing tools and cloud storage for remote sensing data. In particular, a variety of remote sensing applications have made use of publicly accessible data from the Sentinel missions of the European Space Agency (ESA). However, few studies have employed these data to evaluate the effectiveness of Sentinel-1, and Sentinel-2 spectral bands and Machine Learning (ML) techniques in challenging highly heterogeneous and fragmented agricultural landscapes using the Google Earth Engine (GEE) cloud computing platform. This work aims to map, accurately and early, the crop types in a highly heterogeneous and fragmented agricultural region of the Tadla Irrigated Perimeter (TIP) as a case study using the high spatiotemporal resolution of Sentinel-1, Sentinel-2, and a Random Forest (RF) classifier implemented on GEE. More specifically, five experiments were performed to assess the optical band reflectance values, vegetation indices, and SAR backscattering coefficients on the accuracy of crop classification. Besides, two scenarios were used to assess the monthly temporal windows on classification accuracy. The findings of this study show that the fusion of Sentinel-1 and Sentinel-2 data can accurately produce the early crop mapping of the studied area with an Overall Accuracy (OA) reaching 95.02%. The scenarios prove that the monthly time series perform better in terms of classification accuracy than single monthly windows images. Red-edge and shortwave infrared bands can improve the accuracy of crop classification by 1.72% when compared to only using traditional bands (i.e., visible and near-infrared bands). The inclusion of two common vegetation indices (The Normalized Vegetation Index (NDVI), the Enhanced Vegetation Index (EVI)) and Sentinel-1 backscattering coefficients to the crop classification enhanced the overall classification accuracy by 0.02% and 2.94%, respectively, compared to using the Sentinel-2 reflectance bands alone. The monthly windows analysis indicated that the improvement in the accuracy of crop classification is the greatest when the March images are accessible, with an OA higher than 80%.}, } @article {pmid36544470, year = {2023}, author = {Bang, I and Khanh Nong, L and Young Park, J and Thi Le, H and Mok Lee, S and Kim, D}, title = {ChEAP: ChIP-exo analysis pipeline and the investigation of Escherichia coli RpoN protein-DNA interactions.}, journal = {Computational and structural biotechnology journal}, volume = {21}, number = {}, pages = {99-104}, pmid = {36544470}, issn = {2001-0370}, abstract = {Genome-scale studies of the bacterial regulatory network have been leveraged by declining sequencing cost and advances in ChIP (chromatin immunoprecipitation) methods. Of which, ChIP-exo has proven competent with its near-single base-pair resolution. While several algorithms and programs have been developed for different analytical steps in ChIP-exo data processing, there is a lack of effort in incorporating them into a convenient bioinformatics pipeline that is intuitive and publicly available. In this paper, we developed ChIP-exo Analysis Pipeline (ChEAP) that executes the one-step process, starting from trimming and aligning raw sequencing reads to visualization of ChIP-exo results. The pipeline was implemented on the interactive web-based Python development environment - Jupyter Notebook, which is compatible with the Google Colab cloud platform to facilitate the sharing of codes and collaboration among researchers. Additionally, users could exploit the free GPU and CPU resources allocated by Colab to carry out computing tasks regardless of the performance of their local machines. The utility of ChEAP was demonstrated with the ChIP-exo datasets of RpoN sigma factor in E. coli K-12 MG1655. To analyze two raw data files, ChEAP runtime was 2 min and 25 s. Subsequent analyses identified 113 RpoN binding sites showing a conserved RpoN binding pattern in the motif search. ChEAP application in ChIP-exo data analysis is extensive and flexible for the parallel processing of data from various organisms.}, } @article {pmid36541007, year = {2023}, author = {Holko, M and Weber, N and Lunt, C and Brenner, SE}, title = {Biomedical research in the Cloud: considerations for researchers and organizations moving to (or adding) cloud computing resources.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {28}, number = {}, pages = {536-540}, pmid = {36541007}, issn = {2335-6936}, mesh = {Humans ; *Computational Biology ; Cloud Computing ; Reproducibility of Results ; *Biomedical Research ; Information Dissemination ; }, abstract = {As biomedical research data grow, researchers need reliable and scalable solutions for storage and compute. There is also a need to build systems that encourage and support collaboration and data sharing, to result in greater reproducibility. This has led many researchers and organizations to use cloud computing [1]. The cloud not only enables scalable, on-demand resources for storage and compute, but also collaboration and continuity during virtual work, and can provide superior security and compliance features. Moving to or adding cloud resources, however, is not trivial or without cost, and may not be the best choice in every scenario. The goal of this workshop is to explore the benefits of using the cloud in biomedical and computational research, and considerations (pros and cons) for a range of scenarios including individual researchers, collaborative research teams, consortia research programs, and large biomedical research agencies / organizations.}, } @article {pmid36537002, year = {2023}, author = {Crowley, MA and Stockdale, CA and Johnston, JM and Wulder, MA and Liu, T and McCarty, JL and Rieb, JT and Cardille, JA and White, JC}, title = {Towards a whole-system framework for wildfire monitoring using Earth observations.}, journal = {Global change biology}, volume = {29}, number = {6}, pages = {1423-1436}, doi = {10.1111/gcb.16567}, pmid = {36537002}, issn = {1365-2486}, support = {CGSD2-534128-2019//Natural Sciences and Engineering Research Council of Canada/ ; }, mesh = {*Wildfires ; Ecosystem ; *Fires ; Forests ; }, abstract = {Fire seasons have become increasingly variable and extreme due to changing climatological, ecological, and social conditions. Earth observation data are critical for monitoring fires and their impacts. Herein, we present a whole-system framework for identifying and synthesizing fire monitoring objectives and data needs throughout the life cycle of a fire event. The four stages of fire monitoring using Earth observation data include the following: (1) pre-fire vegetation inventories, (2) active-fire monitoring, (3) post-fire assessment, and (4) multi-scale synthesis. We identify the challenges and opportunities associated with current approaches to fire monitoring, highlighting four case studies from North American boreal, montane, and grassland ecosystems. While the case studies are localized to these ecosystems and regional contexts, they provide insights for others experiencing similar monitoring challenges worldwide. The field of remote sensing is experiencing a rapid proliferation of new data sources, providing observations that can inform all aspects of our fire monitoring framework; however, significant challenges for meeting fire monitoring objectives remain. We identify future opportunities for data sharing and rapid co-development of information products using cloud computing that benefits from open-access Earth observation and other geospatial data layers.}, } @article {pmid36536803, year = {2022}, author = {Bao, G and Guo, P}, title = {Federated learning in cloud-edge collaborative architecture: key technologies, applications and challenges.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {94}, pmid = {36536803}, issn = {2192-113X}, abstract = {In recent years, with the rapid growth of edge data, the novel cloud-edge collaborative architecture has been proposed to compensate for the lack of data processing power of traditional cloud computing. On the other hand, on account of the increasing demand of the public for data privacy, federated learning has been proposed to compensate for the lack of security of traditional centralized machine learning. Deploying federated learning in cloud-edge collaborative architecture is widely considered to be a promising cyber infrastructure in the future. Although each cloud-edge collaboration and federated learning is hot research topic respectively at present, the discussion of deploying federated learning in cloud-edge collaborative architecture is still in its infancy and little research has been conducted. This article aims to fill the gap by providing a detailed description of the critical technologies, challenges, and applications of deploying federated learning in cloud-edge collaborative architecture, and providing guidance on future research directions.}, } @article {pmid36534206, year = {2022}, author = {Ruifeng, L and Kai, Y and Xing, L and Xiaoli, L and Xitao, Z and Xiaocheng, G and Juan, F and Shixin, C}, title = {Extraction and spatiotemporal changes of open-pit mines during 1985-2020 using Google Earth Engine: A case study of Qingzhou City, Shandong Province, China.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {209}, pmid = {36534206}, issn = {1573-2959}, support = {2019LY010//Shandong Agricultural Science and Technology Fund (Forestry Science and Technology Innovation)/ ; SJCX21_1126//Postgraduate Research & Practice Innovation Program of Jiangsu Province/ ; KYCX21_2628//Postgraduate Research & Practice Innovation Program of Jiangsu Province/ ; }, mesh = {*Search Engine ; *Environmental Monitoring/methods ; Mining ; Environment ; Cities ; China ; }, abstract = {The global use of mineral resources has increased exponentially for decades and will continue to grow for the foreseeable future, resulting in increasingly negative impacts on the surrounding environment. However, to date, there are a lack of historical and current spatial extent datasets with high accuracy for mining areas in many parts of the world, which has hindered a more comprehensive understanding of the environmental impacts of mining. Using the Google Earth Engine cloud platform and the Landsat normalized difference vegetation index (NDVI) datasets, the spatial extent data of open-pit mining areas for eight years (1985, 1990, 1995, 2000, 2005, 2010, 2015, and 2020) was extracted by the Otsu algorithm. The limestone mining areas in Qingzhou, Shandong Province, China, was selected as a case study. The annual maximum NDVI was first derived from the Landsat NDVI datasets, and then the Otsu algorithm was used to segment the annual maximum NDVI images to obtain the extent of the mining areas. Finally, the spatiotemporal characteristics of the mining areas in the study region were analyzed in reference to previous survey data. The results showed that the mining areas were primarily located in Shaozhuang Town, Wangfu Street and the northern part of Miaozi Town, and the proportion of mining areas within these three administrative areas has increased annually from 88% in 1985 to more than 98% in 2010. Moreover, the open-pit mining areas in Qingzhou gradually expanded from a scattered, point-like distribution to a large, contiguous distribution. From 1985 to 2020, the open-pit mining area expanded to more than 10 times its original size at a rate of 0.5 km[2]/year. In 2015, this area reached its maximum size of 19.7 km[2] and slightly decreased in 2020. Furthermore, the expansion of the mining areas in Qingzhou went through three stages: a slow growth period before 1995, a rapid expansion period from 1995 to 2005, and a shutdown and remediation period after 2005. A quantitative accuracy assessment was performed by calculating the Intersection over Union (IoU) of the extraction results and the visual interpretation results from Gaofen-2 images with 1-m spatial resolution. The IoU reached 72%. The results showed that it was feasible to threshold the Landsat annual maximum NDVI data by the Otsu algorithm to extract the annual spatial extent of the open-pit mining areas. Our method will be easily transferable to other regions worldwide, enabling the monitoring of mine environments.}, } @article {pmid36530862, year = {2022}, author = {Tsai, CW and Lee, LY and Cheng, YP and Lin, CH and Hung, ML and Lin, JW}, title = {Integrating online meta-cognitive learning strategy and team regulation to develop students' programming skills, academic motivation, and refusal self-efficacy of Internet use in a cloud classroom.}, journal = {Universal access in the information society}, volume = {}, number = {}, pages = {1-16}, pmid = {36530862}, issn = {1615-5297}, abstract = {With the development of technology and demand for online courses, there have been considerable quantities of online, blended, or flipped courses designed and provided. However, in the technology-enhanced learning environments, which are also full of social networking websites, shopping websites, and free online games, it is challenging to focus students' attention and help them achieve satisfactory learning performance. In addition, the instruction of programming courses constantly challenges both teachers and students, particularly in online learning environments. To overcome and solve these problems and to facilitate students' learning, the researchers in this study integrated two teaching approaches, using meta-cognitive learning strategy (MCLS) and team regulation (TR), to develop students' regular learning habits and further contribute to their programming skills, academic motivation, and refusal self-efficacy of Internet use, in a cloud classroom. In this research, a quasi-experiment was conducted to investigate the effects of MCLS and TR adopting the experimental design of a 2 (MCLS vs. non-MCLS) × 2 (TR vs. non-TR) factorial pre-test/post-test. In this research, the participants consisted of four classes of university students from non-information or computer departments enrolled in programming design, a required course. The experimental groups comprised three of the classes, labelled as G1, G2, and G3. G1 concurrently received both the online MCLS and TR intervention, while G2 only received the online MCLS intervention, and G3 only received the online TR intervention. Serving as the control group, the fourth class (G4) received traditional teaching. This study investigated the effects of MCLS, TR, and their combination, on improving students' programming skills, academic motivation, and refusal self-efficacy of Internet use in an online computing course. According to the results, students who received online TR significantly enhanced their programming design skills and their refusal self-efficacy of Internet use a cloud classroom. However, the expected effects of MCLS on developing students' programming skills, academic motivation, and refusal self-efficacy of Internet use were not found in this study. The teaching strategy of integrating MCLS and TR in an online programming course in this study can serve as a reference for educators when conducting online, blended, or flipped courses during the COVID-19 pandemic.}, } @article {pmid36523099, year = {2022}, author = {Wang, S and Chen, B and Liang, R and Liu, L and Chen, H and Gao, M and Wu, J and Ju, W and Ho, PH}, title = {Energy-efficient workload allocation in edge-cloud fiber-wireless networks.}, journal = {Optics express}, volume = {30}, number = {24}, pages = {44186-44200}, doi = {10.1364/OE.472978}, pmid = {36523099}, issn = {1094-4087}, abstract = {In order to realize the green computing of the edge-cloud fiber-wireless networks, the cooperation between the edge servers and the cloud servers is particularly important to reduce the network energy consumption. Therefore, this paper proposes an energy-efficient workload allocation (EEWA) scheme to improve the energy efficiency by using the architecture of edge-cloud fiber-wireless networks. The feasibility of the proposed EEWA scheme was verified on our SDN testbed. We also do the simulation to obtain the optimal results for a given set of task requests. Simulation results show that our proposed EEWA scheme greatly reduces the blocking probability and the average energy consumption of task requests in edge-cloud fiber-wireless networks.}, } @article {pmid36517473, year = {2022}, author = {Ogasawara, O}, title = {Building cloud computing environments for genome analysis in Japan.}, journal = {Human genome variation}, volume = {9}, number = {1}, pages = {46}, pmid = {36517473}, issn = {2054-345X}, support = {JP19km0405501//Japan Agency for Medical Research and Development (AMED)/ ; }, abstract = {This review article describes the current status of data archiving and computational infrastructure in the field of genomic medicine, focusing primarily on the situation in Japan. I begin by introducing the status of supercomputer operations in Japan, where a high-performance computing infrastructure (HPCI) is operated to meet the diverse computational needs of science in general. Since this HPCI consists of supercomputers of various architectures located across the nation connected via a high-speed network, including supercomputers specialized in genome science, the status of its response to the explosive increase in genomic data, including the International Nucleotide Sequence Database Collaboration (INSDC) data archive, is explored. Separately, since it is clear that the use of commercial cloud computing environments needs to be promoted, both in light of the rapid increase in computing demands and to support international data sharing and international data analysis projects, I explain how the Japanese government has established a series of guidelines for the use of cloud computing based on its cybersecurity strategy and has begun to build a government cloud for government agencies. I will also carefully consider several other issues of user concern. Finally, I will show how Japan's major cloud computing infrastructure is currently evolving toward a multicloud and hybrid cloud configuration.}, } @article {pmid36516515, year = {2023}, author = {Zhou, Y and Luo, B and Sang, J and Li, C and Zhu, M and Zhu, Z and Dai, J and Wang, J and Chen, H and Zhai, S and Lu, L and Liu, H and Yu, G and Ye, J and Zhang, Z and Huan, J}, title = {A cloud-based consultation and collaboration system for radiotherapy: Remote decision support services for community radiotherapy centers.}, journal = {Computer methods and programs in biomedicine}, volume = {229}, number = {}, pages = {107270}, doi = {10.1016/j.cmpb.2022.107270}, pmid = {36516515}, issn = {1872-7565}, mesh = {Humans ; *Radiotherapy, Intensity-Modulated/methods ; Radiotherapy Planning, Computer-Assisted/methods ; Cloud Computing ; Radiometry ; Computer Simulation ; Radiotherapy Dosage ; }, abstract = {PURPOSE: This study aimed to establish a cloud-based radiotherapy consultation and collaboration system, then investigated the practicability of remote decision support for community radiotherapy centers using the system.

METHODS AND MATERIALS: A cloud-based consultation and collaboration system for radiotherapy, OncoEvidance®, was developed to provide remote services of LINAC modeling, simulation CT data import/export, target volume and organ-at-risk delineation, prescription, and treatment planning. The system was deployed on a hybrid cloud. A federate of public nodes, each corresponding to a medical institution, are managed by a central node where a group of consultants have registered. Users can access the system through network using computing devices. The system has been tested at three community radiotherapy centers. One accelerator was modeled. 12 consultants participated the remote radiotherapy decision support and 77 radiation treatment plans had been evaluated remotely.

RESULTS: All the passing rates of per-beam dose verification are > 94% and all the passing rates of composite beam dose verification are > 99%. The average downloading time for one set of simulation CT data for one patient from Internet was within 1 min under the cloud download bandwidth of 8 Mbps and local network bandwidth of 100 Mbps. The average response time for one consultant to contour target volumes and make prescription was about 24 h. And that for one consultant to design and optimize a IMRT treatment plan was about 36 h. 100% of the remote plans passed the dosimetric criteria and could be imported into the local TPS for further verification.

CONCLUSION: The cloud-based consultation and collaboration system saved the travel time for consultants and provided high quality radiotherapy to patients in community centers. The under-staffed community radiotherapy centers could benefit from the remote system with lower cost and better treatment quality control.}, } @article {pmid36515465, year = {2023}, author = {Wiewiórka, M and Szmurło, A and Stankiewicz, P and Gambin, T}, title = {Cloud-native distributed genomic pileup operations.}, journal = {Bioinformatics (Oxford, England)}, volume = {39}, number = {1}, pages = {}, pmid = {36515465}, issn = {1367-4811}, support = {//Research University/ ; }, mesh = {*Software ; *Genomics/methods ; Algorithms ; Genome ; Computational Biology/methods ; }, abstract = {MOTIVATION: Pileup analysis is a building block of many bioinformatics pipelines, including variant calling and genotyping. This step tends to become a bottleneck of the entire assay since the straightforward pileup implementations involve processing of all base calls from all alignments sequentially. On the other hand, a distributed version of the algorithm faces the intrinsic challenge of splitting reads-oriented file formats into self-contained partitions to avoid costly data exchange between computational nodes.

RESULTS: Here, we present a scalable, distributed and efficient implementation of a pileup algorithm that is suitable for deploying in cloud computing environments. In particular, we implemented: (i) our custom data-partitioning algorithm optimized to work with the alignment reads, (ii) a novel and unique approach to process alignment events from sequencing reads using the MD tags, (iii) the source code micro-optimizations for recurrent operations, and (iv) a modular structure of the algorithm. We have proven that our novel approach consistently and significantly outperforms other state-of-the-art distributed tools in terms of execution time (up to 6.5× faster) and memory usage (up to 2× less), resulting in a substantial cloud cost reduction. SeQuiLa is a cloud-native solution that can be easily deployed using any managed Kubernetes and Hadoop services available in public clouds, like Microsoft Azure Cloud, Google Cloud Platform, or Amazon Web Services. Together with the already implemented distributed range join and coverage calculations, our package provides end-users with a unified SQL interface for convenient analyses of population-scale genomic data in an interactive way.

https://biodatageeks.github.io/sequila/.}, } @article {pmid36512073, year = {2022}, author = {Paul, A and K S, V and Sood, A and Bhaumik, S and Singh, KA and Sethupathi, S and Chanda, A}, title = {Suspended Particulate Matter Analysis of Pre and During Covid Lockdown Using Google Earth Engine Cloud Computing: A Case Study of Ukai Reservoir.}, journal = {Bulletin of environmental contamination and toxicology}, volume = {110}, number = {1}, pages = {7}, pmid = {36512073}, issn = {1432-0800}, mesh = {Humans ; *Particulate Matter/analysis ; Cloud Computing ; Search Engine ; *COVID-19 ; Communicable Disease Control ; }, abstract = {Presence of suspended particulate matter (SPM) in a waterbody or a river can be caused by multiple parameters such as other pollutants by the discharge of poorly maintained sewage, siltation, sedimentation, flood and even bacteria. In this study, remote sensing techniques were used to understand the effects of pandemic-induced lockdown on the SPM concentration in the lower Tapi reservoir or Ukai reservoir. The estimation was done using Landsat-8 OLI (Operational Land Imager) having radiometric resolution (12-bit) and a spatial resolution of 30 m. The Google Earth Engine (GEE) cloud computing platform was used in this study to generate the products. The GEE is a semi-automated workflow system using a robust approach designed for scientific analysis and visualization of geospatial datasets. An algorithm was deployed, and a time-series (2013-2020) analysis was done for the study area. It was found that the average mean value of SPM in Tapi River during 2020 is lowest than the last seven years at the same time.}, } @article {pmid36508783, year = {2023}, author = {Xu, X and Li, L and Zhou, H and Fan, M and Wang, H and Wang, L and Hu, Q and Cai, Q and Zhu, Y and Ji, S}, title = {MRTCM: A comprehensive dataset for probabilistic risk assessment of metals and metalloids in traditional Chinese medicine.}, journal = {Ecotoxicology and environmental safety}, volume = {249}, number = {}, pages = {114395}, doi = {10.1016/j.ecoenv.2022.114395}, pmid = {36508783}, issn = {1090-2414}, mesh = {Animals ; *Metals, Heavy/toxicity/analysis ; Medicine, Chinese Traditional ; *Metalloids/analysis ; *Mercury/analysis ; Risk Assessment ; Carcinogens/analysis ; Environmental Monitoring/methods ; }, abstract = {Traditional Chinese medicine (TCM) is still considered a global complementary or alternative medical system, but exogenous hazardous contaminants remain in TCM even after decocting. Besides, it is time-consuming to conduct a risk assessment of trace elements in TCMs with a non-automatic approach due to the wide variety of TCMs. Here, we present MRTCM, a cloud-computing infrastructure for automating the probabilistic risk assessment of metals and metalloids in TCM. MRTCM includes a consumption database and a pollutant database involving forty million rows of consumption data and fourteen types of TCM potentially toxic elements concentrations. The algorithm of probabilistic risk assessment was also packaged in MRTCM to assess the risks of eight elements with Monte Carlo simulation. The results demonstrated that 96.64% and 99.46% had no non-carcinogenic risk (hazard indices (HI) were < 1.0) for animal and herbal medicines consumers, respectively. After twenty years of exposure, less than 1% of the total carcinogenic risk (CRt) was > 10[-4] for TCM consumers, indicating that they are at potential risk for carcinogenicity. Sensitivity analysis revealed that annual consumption and concentration were the main variables affecting the assessment results. Ultimately, a priority management list of TCMs was also generated, indicating that more attention should be paid to the non-carcinogenic risks of As, Mn, and Hg and the carcinogenic risks of As and Cr in Pheretima and Cr in Arcae Conch. In general, MRTCM could significantly enhance the efficiency of risk assessment in TCM and provide reasonable guidance for policymakers to optimize risk management.}, } @article {pmid36506615, year = {2022}, author = {Zahid, MA and Shafiq, B and Shamail, S and Afzal, A and Vaidya, J}, title = {BP-DEBUG: A Fault Debugging and Resolution Tool for Business Processes.}, journal = {Proceedings. International Conference on Distributed Computing Systems}, volume = {2022}, number = {}, pages = {1306-1309}, doi = {10.1109/icdcs54860.2022.00143}, pmid = {36506615}, issn = {2575-8411}, support = {R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; }, abstract = {Cloud computing and Internet-ware software paradigm have enabled rapid development of distributed business process (BP) applications. Several tools are available to facilitate automated/ semi-automated development and deployment of such distributed BPs by orchestrating relevant service components in a plug-and-play fashion. However, the BPs developed using such tools are not guaranteed to be fault-free. In this demonstration, we present a tool called BP-DEBUG for debugging and automated repair of faulty BPs. BP-DEBUG implements our Collaborative Fault Resolution (CFR) approach that utilizes the knowledge of existing BPs with a similar set of web services fault detection and resolution in a given user BP. Essentially, CFR attempts to determine any semantic and structural differences between a faulty BP and related BPs and computes a minimum set of transformations which can be used to repair the faulty BP. Demo url: https://youtu.be/mf49oSekLOA.}, } @article {pmid36506593, year = {2022}, author = {Silversmith, W and Zlateski, A and Bae, JA and Tartavull, I and Kemnitz, N and Wu, J and Seung, HS}, title = {Igneous: Distributed dense 3D segmentation meshing, neuron skeletonization, and hierarchical downsampling.}, journal = {Frontiers in neural circuits}, volume = {16}, number = {}, pages = {977700}, pmid = {36506593}, issn = {1662-5110}, support = {R01 NS104926/NS/NINDS NIH HHS/United States ; U01 MH117072/MH/NIMH NIH HHS/United States ; U19 NS104648/NS/NINDS NIH HHS/United States ; RF1 MH117815/MH/NIMH NIH HHS/United States ; U01 MH114824/MH/NIMH NIH HHS/United States ; R01 EY027036/EY/NEI NIH HHS/United States ; }, mesh = {*Imaging, Three-Dimensional/methods ; Microscopy, Electron ; *Neurons ; Information Storage and Retrieval ; Image Processing, Computer-Assisted/methods ; }, abstract = {Three-dimensional electron microscopy images of brain tissue and their dense segmentations are now petascale and growing. These volumes require the mass production of dense segmentation-derived neuron skeletons, multi-resolution meshes, image hierarchies (for both modalities) for visualization and analysis, and tools to manage the large amount of data. However, open tools for large-scale meshing, skeletonization, and data management have been missing. Igneous is a Python-based distributed computing framework that enables economical meshing, skeletonization, image hierarchy creation, and data management using cloud or cluster computing that has been proven to scale horizontally. We sketch Igneous's computing framework, show how to use it, and characterize its performance and data storage.}, } @article {pmid36502208, year = {2022}, author = {Buriboev, A and Muminov, A}, title = {Computer State Evaluation Using Adaptive Neuro-Fuzzy Inference Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36502208}, issn = {1424-8220}, mesh = {Humans ; *Fuzzy Logic ; *Neural Networks, Computer ; Algorithms ; Computers ; }, abstract = {Several crucial system design and deployment decisions, including workload management, sizing, capacity planning, and dynamic rule generation in dynamic systems such as computers, depend on predictive analysis of resource consumption. An analysis of the computer components' utilizations and their workloads is the best way to assess the performance of the computer's state. Especially, analyzing the particular or whole influence of components on another component gives more reliable information about the state of computer systems. There are many evaluation techniques proposed by researchers. The bulk of them have complicated metrics and parameters such as utilization, time, throughput, latency, delay, speed, frequency, and the percentage which are difficult to understand and use in the assessing process. According to these, we proposed a simplified evaluation method using components' utilization in percentage scale and its linguistic values. The use of the adaptive neuro-fuzzy inference system (ANFIS) model and fuzzy set theory offers fantastic prospects to realize use impact analyses. The purpose of the study is to examine the usage impact of memory, cache, storage, and bus on CPU performance using the Sugeno type and Mamdani type ANFIS models to determine the state of the computer system. The suggested method is founded on keeping an eye on how computer parts behave. The developed method can be applied for all kinds of computing system, such as personal computers, mainframes, and supercomputers by considering that the inference engine of the proposed ANFIS model requires only its own behavior data of computers' components and the number of inputs can be enriched according to the type of computer, for instance, in cloud computers' case the added number of clients and network quality can be used as the input parameters. The models present linguistic and quantity results which are convenient to understand performance issues regarding specific bottlenecks and determining the relationship of components.}, } @article {pmid36502177, year = {2022}, author = {Mei, P and Karimi, HR and Chen, F and Yang, S and Huang, C and Qiu, S}, title = {A Learning-Based Vehicle-Cloud Collaboration Approach for Joint Estimation of State-of-Energy and State-of-Health.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36502177}, issn = {1424-8220}, mesh = {United States ; Bayes Theorem ; Physical Phenomena ; *Electric Power Supplies ; *Electricity ; Neural Networks, Computer ; }, abstract = {The state-of-energy (SOE) and state-of-health (SOH) are two crucial quotas in the battery management systems, whose accurate estimation is facing challenges by electric vehicles' (EVs) complexity and changeable external environment. Although the machine learning algorithm can significantly improve the accuracy of battery estimation, it cannot be performed on the vehicle control unit as it requires a large amount of data and computing power. This paper proposes a joint SOE and SOH prediction algorithm, which combines long short-term memory (LSTM), Bi-directional LSTM (Bi-LSTM), and convolutional neural networks (CNNs) for EVs based on vehicle-cloud collaboration. Firstly, the indicator of battery performance degradation is extracted for SOH prediction according to the historical data; the Bayesian optimization approach is applied to the SOH prediction combined with Bi-LSTM. Then, the CNN-LSTM is implemented to provide direct and nonlinear mapping models for SOE. These direct mapping models avoid parameter identification and updating, which are applicable in cases with complex operating conditions. Finally, the SOH correction in SOE estimation achieves the joint estimation with different time scales. With the validation of the National Aeronautics and Space Administration battery data set, as well as the established battery platform, the error of the proposed method is kept within 3%. The proposed vehicle-cloud approach performs high-precision joint estimation of battery SOE and SOH. It can not only use the battery historical data of the cloud platform to predict the SOH but also correct the SOE according to the predicted value of the SOH. The feasibility of vehicle-cloud collaboration is promising in future battery management systems.}, } @article {pmid36502107, year = {2022}, author = {Jing, X and Tian, X and Du, C}, title = {LPAI-A Complete AIoT Framework Based on LPWAN Applicable to Acoustic Scene Classification Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36502107}, issn = {1424-8220}, support = {XDC02070800//Chinese Academy of Sciences(CAS)/ ; 22511100600//The Science and Technology Commission of Shanghai Municipality (STCSM)/ ; }, mesh = {Animals ; *Artificial Intelligence ; Acoustics ; Computer Simulation ; Reaction Time ; Recognition, Psychology ; *Ursidae ; }, abstract = {Deploying artificial intelligence on edge nodes of Low-Power Wide Area Networks can significantly reduce network transmission volumes, event response latency, and overall network power consumption. However, the edge nodes in LPWAN bear limited computing power and storage space, and researchers have found it challenging to improve the recognition capability of the nodes using sensor data from the environment. In particular, the domain-shift problem in LPWAN is challenging to overcome. In this paper, a complete AIoT system framework referred to as LPAI is presented. It is the first generic framework for implementing AIoT technology based on LPWAN applicable to acoustic scene classification scenarios. LPAI overcomes the domain-shift problem, which enables resource-constrained edge nodes to continuously improve their performance using real data to become more adaptive to the environment. For efficient use of limited resources, the edge nodes independently select representative data and transmit it back to the cloud. Moreover, the model is iteratively retrained on the cloud using the few-shot uploaded data. Finally, the feasibility of LPAI is analyzed, and simulation experiments on the public ASC dataset provide validation that our proposed framework can improve the recognition accuracy by as little as 5% using 85 actual sensor data points.}, } @article {pmid36501960, year = {2022}, author = {Wan, S and Zhao, K and Lu, Z and Li, J and Lu, T and Wang, H}, title = {A Modularized IoT Monitoring System with Edge-Computing for Aquaponics.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501960}, issn = {1424-8220}, support = {61871380//National Natural Science Foundation of China/ ; 21327401D-1//Key Common Technologies for High-quality Agricultural Development/ ; }, mesh = {Animals ; *Plant Breeding ; *Electrocardiography ; Aquaculture/methods ; Algorithms ; }, abstract = {Aquaponics is a green and efficient agricultural production model that combines aquaculture and vegetable cultivation. It is worth looking into optimizing the proportion of fish and plants to improve the quality and yield. However, there is little non-destructive monitoring of plant growth in aquaponics monitoring systems currently. In this paper, based on the Internet of Things technologies, a monitoring system is designed with miniaturization, modularization, and low-cost features for cultivation-breeding ratio research. The system can realize remote monitoring and intelligent control of parameters needed to keep fish and plants under optimal conditions. First, a 32-bit chip is used as the Microcontroller Unit to develop the intelligent sensing unit, which can realize 16 different data acquisitions as stand-alone extensible modules. Second, to achieve plant data acquisition and upload, the Raspberry Pi embedded with image processing algorithms is introduced to realize edge-computing. Finally, all the collected data is stored in the Ali-cloud through Wi-Fi and a WeChat Mini Program is designed to display data and control devices. The results show that there is no packet loss within 90 m for wireless transmission, and the error rate of environment parameters is limited to 5%. It was proven that the system is intelligent, flexible, low-cost, and stable which is suitable for small-scale aquaponics well.}, } @article {pmid36501875, year = {2022}, author = {Wu, TY and Kong, F and Wang, L and Chen, YC and Kumari, S and Pan, JS}, title = {Toward Smart Home Authentication Using PUF and Edge-Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501875}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Communication ; Internet ; Nonoxynol ; Privacy ; }, abstract = {The smart home is a crucial embodiment of the internet of things (IoT), which can facilitate users to access smart home services anytime and anywhere. Due to the limited resources of cloud computing, it cannot meet users' real-time needs. Therefore, edge computing emerges as the times require, providing users with better real-time access and storage. The application of edge computing in the smart home environment can enable users to enjoy smart home services. However, users and smart devices communicate through public channels, and malicious attackers may intercept information transmitted through public channels, resulting in user privacy disclosure. Therefore, it is a critical issue to protect the secure communication between users and smart devices in the smart home environment. Furthermore, authentication protocols in smart home environments also have some security challenges. In this paper, we propose an anonymous authentication protocol that applies edge computing to the smart home environment to protect communication security between entities. To protect the security of smart devices, we embed physical unclonable functions (PUF) into each smart device. Real-or-random model, informal security analysis, and ProVerif are adopted to verify the security of our protocol. Finally, we compare our protocol with existing protocols regarding security and performance. The comparison results demonstrate that our protocol has higher security and slightly better performance.}, } @article {pmid36501855, year = {2022}, author = {Li, P and Cao, J}, title = {A Virtual Machine Consolidation Algorithm Based on Dynamic Load Mean and Multi-Objective Optimization in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501855}, issn = {1424-8220}, support = {62172089//National Natural Science Foundation of China/ ; }, abstract = {High energy consumption and low resource utilization have become increasingly prominent problems in cloud data centers. Virtual machine (VM) consolidation is the key technology to solve the problems. However, excessive VM consolidation may lead to service level agreement violations (SLAv). Most studies have focused on optimizing energy consumption and ignored other factors. An effective VM consolidation should comprehensively consider multiple factors, including the quality of service (QoS), energy consumption, resource utilization, migration overhead and network communication overhead, which is a multi-objective optimization problem. To solve the problems above, we propose a VM consolidation approach based on dynamic load mean and multi-objective optimization (DLMM-VMC), which aims to minimize power consumption, resources waste, migration overhead and network communication overhead while ensuring QoS. Fist, based on multi-dimensional resources consideration, the host load status is objectively evaluated by using the proposed host load detection algorithm based on the dynamic load mean to avoid an excessive VM consolidation. Then, the best solution is obtained based on the proposed multi-objective optimization model and optimized ant colony algorithm, so as to ensure the common interests of cloud service providers and users. Finally, the experimental results show that compared with the existing VM consolidation methods, our proposed algorithm has a significant improvement in the energy consumption, QoS, resources waste, SLAv, migration and network overhead.}, } @article {pmid36501828, year = {2022}, author = {Marcillo, P and Tamayo-Urgilés, D and Valdivieso Caraguay, ÁL and Hernández-Álvarez, M}, title = {Security in V2I Communications: A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501828}, issn = {1424-8220}, support = {PIS 20-02//National Polytechnic School/ ; }, mesh = {*Computer Security ; *Confidentiality ; Cloud Computing ; Computer Communication Networks ; Communication ; }, abstract = {Recently, the number of vehicles equipped with wireless connections has increased considerably. The impact of that growth in areas such as telecommunications, infotainment, and automatic driving is enormous. More and more drivers want to be part of a vehicular network, despite the implications or risks that, for instance, the openness of wireless communications, its dynamic topology, and its considerable size may bring. Undoubtedly, this trend is because of the benefits the vehicular network can offer. Generally, a vehicular network has two modes of communication (V2I and V2V). The advantage of V2I over V2V is roadside units' high computational and transmission power, which assures the functioning of early warning and driving guidance services. This paper aims to discover the principal vulnerabilities and challenges in V2I communications, the tools and methods to mitigate those vulnerabilities, the evaluation metrics to measure the effectiveness of those tools and methods, and based on those metrics, the methods or tools that provide the best results. Researchers have identified the non-resistance to attacks, the regular updating and exposure of keys, and the high dependence on certification authorities as main vulnerabilities. Thus, the authors found schemes resistant to attacks, authentication schemes, privacy protection models, and intrusion detection and prevention systems. Of the solutions for providing security analyzed in this review, the authors determined that most of them use metrics such as computational cost and communication overhead to measure their performance. Additionally, they determined that the solutions that use emerging technologies such as fog/edge/cloud computing present better results than the rest. Finally, they established that the principal challenge in V2I communication is to protect and dispose of a safe and reliable communication channel to avoid adversaries taking control of the medium.}, } @article {pmid36501767, year = {2022}, author = {Hung, YH}, title = {Developing an Improved Ensemble Learning Approach for Predictive Maintenance in the Textile Manufacturing Process.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501767}, issn = {1424-8220}, support = {MOST 110-2221-E-224 -047,MOST 111-2221-E-224 -033 -MY2.//Ministry of Science and Technology/ ; }, mesh = {*Machine Learning ; *Algorithms ; Data Science ; Cloud Computing ; Automation ; }, abstract = {With the rapid development of digital transformation, paper forms are digitalized as electronic forms (e-Forms). Existing data can be applied in predictive maintenance (PdM) for the enabling of intelligentization and automation manufacturing. This study aims to enhance the utilization of collected e-Form data though machine learning approaches and cloud computing to predict and provide maintenance actions. The ensemble learning approach (ELA) requires less computation time and has a simple hardware requirement; it is suitable for processing e-form data with specific attributes. This study proposed an improved ELA to predict the defective class of product data from a manufacturing site's work order form. This study proposed the resource dispatching approach to arrange data with the corresponding emailing resource for automatic notification. This study's novelty is the integration of cloud computing and an improved ELA for PdM to assist the textile product manufacturing process. The data analytics results show that the improved ensemble learning algorithm has over 98% accuracy and precision for defective product prediction. The validation results of the dispatching approach show that data can be correctly transmitted in a timely manner to the corresponding resource, along with a notification being sent to users.}, } @article {pmid36501737, year = {2022}, author = {Gul, OM}, title = {Heuristic Resource Reservation Policies for Public Clouds in the IoT Era.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {23}, pages = {}, pmid = {36501737}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Algorithms ; Policy ; }, abstract = {With the advances in the IoT era, the number of wireless sensor devices has been growing rapidly. This increasing number gives rise to more complex networks where more complex tasks can be executed by utilizing more computational resources from the public clouds. Cloud service providers use various pricing models for their offered services. Some models are appropriate for the cloud service user's short-term requirements whereas the other models are appropriate for the long-term requirements of cloud service users. Reservation-based price models are suitable for long-term requirements of cloud service users. We used the pricing schemes with spot and reserved instances. Reserved instances support a hybrid cost model with fixed reservation costs that vary with contract duration and an hourly usage charge which is lower than the charge of the spot instances. Optimizing resources to be reserved requires sufficient research effort. Recent algorithms proposed for this problem are generally based on integer programming problems, so they do not have polynomial time complexity. In this work, heuristic-based polynomial time policies are proposed for this problem. It is exhibited that the cost for the cloud service user which uses our approach is comparable to optimal solutions, i.e., it is near-optimal.}, } @article {pmid36500810, year = {2022}, author = {Malik, S and Dhasmana, A and Preetam, S and Mishra, YK and Chaudhary, V and Bera, SP and Ranjan, A and Bora, J and Kaushik, A and Minkina, T and Jatav, HS and Singh, RK and Rajput, VD}, title = {Exploring Microbial-Based Green Nanobiotechnology for Wastewater Remediation: A Sustainable Strategy.}, journal = {Nanomaterials (Basel, Switzerland)}, volume = {12}, number = {23}, pages = {}, pmid = {36500810}, issn = {2079-4991}, abstract = {Water scarcity due to contamination of water resources with different inorganic and organic contaminants is one of the foremost global concerns. It is due to rapid industrialization, fast urbanization, and the low efficiency of traditional wastewater treatment strategies. Conventional water treatment strategies, including chemical precipitation, membrane filtration, coagulation, ion exchange, solvent extraction, adsorption, and photolysis, are based on adopting various nanomaterials (NMs) with a high surface area, including carbon NMs, polymers, metals-based, and metal oxides. However, significant bottlenecks are toxicity, cost, secondary contamination, size and space constraints, energy efficiency, prolonged time consumption, output efficiency, and scalability. On the contrary, green NMs fabricated using microorganisms emerge as cost-effective, eco-friendly, sustainable, safe, and efficient substitutes for these traditional strategies. This review summarizes the state-of-the-art microbial-assisted green NMs and strategies including microbial cells, magnetotactic bacteria (MTB), bio-augmentation and integrated bioreactors for removing an extensive range of water contaminants addressing the challenges associated with traditional strategies. Furthermore, a comparative analysis of the efficacies of microbe-assisted green NM-based water remediation strategy with the traditional practices in light of crucial factors like reusability, regeneration, removal efficiency, and adsorption capacity has been presented. The associated challenges, their alternate solutions, and the cutting-edge prospects of microbial-assisted green nanobiotechnology with the integration of advanced tools including internet-of-nano-things, cloud computing, and artificial intelligence have been discussed. This review opens a new window to assist future research dedicated to sustainable and green nanobiotechnology-based strategies for environmental remediation applications.}, } @article {pmid36497649, year = {2022}, author = {Vărzaru, AA}, title = {Assessing Digital Transformation of Cost Accounting Tools in Healthcare.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {23}, pages = {}, pmid = {36497649}, issn = {1660-4601}, mesh = {Artificial Intelligence ; Delivery of Health Care ; *Accounting ; *Blockchain ; Big Data ; }, abstract = {The expansion of digital technologies has significantly changed most economic activities and professions. Digital technologies penetrated managerial accounting and have a vast potential to transform this profession. Implementing emerging digital technologies, such as artificial intelligence, blockchain, the Internet of Things, big data, and cloud computing, can trigger a crucial leap forward, leading to a paradigm-shifting in healthcare organizations' accounting management. The paper's main objective is to investigate the perception of Romanian accountants on implementing digital technologies in healthcare organizations' accounting management. The paper implies a study based on a questionnaire among Romanian accountants who use various digital technologies implemented in traditional and innovative cost accounting tools. Based on structural equation modeling, the results emphasize the prevalence of innovative tools over traditional cost accounting tools improved through digital transformation, digital technologies assuming the most complex and time-consuming tasks. Moreover, the influence of cost accounting tools improved through digital transformation on healthcare organizations' performance is much more robust in the case of innovative tools than in the case of traditional cost accounting tools. The proposed model provides managers in healthcare organizations with information on the most effective methods in the context of digital transformation.}, } @article {pmid36495459, year = {2023}, author = {Contaldo, SG and Alessandri, L and Colonnelli, I and Beccuti, M and Aldinucci, M}, title = {Bringing Cell Subpopulation Discovery on a Cloud-HPC Using rCASC and StreamFlow.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2584}, number = {}, pages = {337-345}, pmid = {36495459}, issn = {1940-6029}, mesh = {*Software ; *Algorithms ; Workflow ; High-Throughput Nucleotide Sequencing ; Single-Cell Analysis ; Sequence Analysis, RNA ; }, abstract = {The idea behind novel single-cell RNA sequencing (scRNA-seq) pipelines is to isolate single cells through microfluidic approaches and generate sequencing libraries in which the transcripts are tagged to track their cell of origin. Modern scRNA-seq platforms are capable of analyzing up to many thousands of cells in each run. Then, combined with massive high-throughput sequencing producing billions of reads, scRNA-seq allows the assessment of fundamental biological properties of cell populations and biological systems at unprecedented resolution.In this chapter, we describe how cell subpopulation discovery algorithms, integrated into rCASC, could be efficiently executed on cloud-HPC infrastructure. To achieve this task, we focus on the StreamFlow framework which provides container-native runtime support for scientific workflows in cloud/HPC environments.}, } @article {pmid36472895, year = {2022}, author = {Barbaric, A and Munteanu, C and Ross, H and Cafazzo, JA}, title = {Design of a Patient Voice App Experience for Heart Failure Management: Usability Study.}, journal = {JMIR formative research}, volume = {6}, number = {12}, pages = {e41628}, pmid = {36472895}, issn = {2561-326X}, abstract = {BACKGROUND: The use of digital therapeutics (DTx) in the prevention and management of medical conditions has increased through the years, with an estimated 44 million people using one as part of their treatment plan in 2021, nearly double the number from the previous year. DTx are commonly accessed through smartphone apps, but offering these treatments through additional platforms can improve the accessibility of these interventions. Voice apps are an emerging technology in the digital health field; not only do they have the potential to improve DTx adherence, but they can also create a better user experience for some user groups.

OBJECTIVE: This research aimed to identify the acceptability and feasibility of offering a voice app for a chronic disease self-management program. The objective of this project was to design, develop, and evaluate a voice app of an already-existing smartphone-based heart failure self-management program, Medly, to be used as a case study.

METHODS: A voice app version of Medly was designed and developed through a user-centered design process. We conducted a usability study and semistructured interviews with patients with heart failure (N=8) at the Peter Munk Cardiac Clinic in Toronto General Hospital to better understand the user experience. A Medly voice app prototype was built using a software development kit in tandem with a cloud computing platform and was verified and validated before the usability study. Data collection and analysis were guided by a mixed methods triangulation convergence design.

RESULTS: Common themes were identified in the results of the usability study, which involved 8 participants with heart failure. Almost all participants (7/8, 88%) were satisfied with the voice app and felt confident using it, although half of the participants (4/8, 50%) were unsure about using it in the future. Six main themes were identified: changes in physical behavior, preference between voice app and smartphone, importance of music during voice app interaction, lack of privacy concerns, desired reassurances during voice app interaction, and helpful aids during voice app interaction. These findings were triangulated with the quantitative data, and it concluded that the main area for improvement was related to the ease of use; design changes were then implemented to better improve the user experience.

CONCLUSIONS: This work offered preliminary insight into the acceptability and feasibility of a Medly voice app. Given the recent emergence of voice apps in health care, we believe that this research offered invaluable insight into successfully deploying DTx for chronic disease self-management using this technology.}, } @article {pmid36470948, year = {2022}, author = {Zhao, S and Guo, X and Qu, Z and Zhang, Z and Yu, T}, title = {Intelligent retrieval method for power grid operation data based on improved SimHash and multi-attribute decision making.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {20994}, pmid = {36470948}, issn = {2045-2322}, abstract = {IN the trend of energy revolution, power data becomes one of the key elements of the power grid. And an advance power system with "electric power + computing power" as the core has become an inevitable choice. However, the traditional search approach based on directory query is commonly used for power grid operation data in domestic and international. The approach fails to effectively meet the user's need for fast, accurate and personalized retrieval of useful information from the vast amount of power grid data. It seriously affects the real-time availability of data and the efficiency of business-critical analytical decisions. For this reason, an intelligent retrieval approach for power grid operation data based on improved SimHash and multi-attribute decision making is proposed in this paper. This method elaborates the properties of SimHash and multi-attribute decision making algorithms. And an intelligent parallel retrieval algorithm MR-ST based on MapReduce model is designed. Finally, real time grid operation data from multiple sources are analyzed on the cloud platform for example. The experimental results show the effectiveness and precision of the method. Compared with traditional methods, the search accuracy rate, search completion rate and search time are significantly improved. Experiments show that the method can be applied to intelligent retrieval of power grid operation data.}, } @article {pmid36470698, year = {2023}, author = {Lee, P and Tahmasebi, A and Dave, JK and Parekh, MR and Kumaran, M and Wang, S and Eisenbrey, JR and Donuru, A}, title = {Comparison of Gray-scale Inversion to Improve Detection of Pulmonary Nodules on Chest X-rays Between Radiologists and a Deep Convolutional Neural Network.}, journal = {Current problems in diagnostic radiology}, volume = {52}, number = {3}, pages = {180-186}, doi = {10.1067/j.cpradiol.2022.11.004}, pmid = {36470698}, issn = {1535-6302}, mesh = {Humans ; X-Rays ; *Radiography, Thoracic/methods ; Retrospective Studies ; *Multiple Pulmonary Nodules/diagnostic imaging ; Neural Networks, Computer ; Radiologists ; }, abstract = {Detection of pulmonary nodules on chest x-rays is an important task for radiologists. Previous studies have shown improved detection rates using gray-scale inversion. The purpose of our study was to compare the efficacy of gray-scale inversion in improving the detection of pulmonary nodules on chest x-rays for radiologists and machine learning models (ML). We created a mixed dataset consisting of 60, 2-view (posteroanterior view - PA and lateral view) chest x-rays with computed tomography confirmed nodule(s) and 62 normal chest x-rays. Twenty percent of the cases were separated for a testing dataset (24 total images). Data augmentation through mirroring and transfer learning was used for the remaining cases (784 total images) for supervised training of 4 ML models (grayscale PA, grayscale lateral, gray-scale inversion PA, and gray-scale inversion lateral) on Google's cloud-based AutoML platform. Three cardiothoracic radiologists analyzed the complete 2-view dataset (n=120) and, for comparison to the ML, the single-view testing subsets (12 images each). Gray-scale inversion (area under the curve (AUC) 0.80, 95% confidence interval (CI) 0.75-0.85) did not improve diagnostic performance for radiologists compared to grayscale (AUC 0.84, 95% CI 0.79-0.88). Gray-scale inversion also did not improve diagnostic performance for the ML. The ML did demonstrate higher sensitivity and negative predictive value for grayscale PA (72.7% and 75.0%), grayscale lateral (63.6% and 66.6%), and gray-scale inversion lateral views (72.7% and 76.9%), comparing favorably to the radiologists (63.9% and 72.3%, 27.8% and 58.3%, 19.5% and 50.5% respectively). In the limited testing dataset, the ML did demonstrate higher sensitivity and negative predictive value for grayscale PA (72.7% and 75.0%), grayscale lateral (63.6% and 66.6%), and gray-scale inversion lateral views (72.7% and 76.9%), comparing favorably to the radiologists (63.9% and 72.3%, 27.8% and 58.3%, 19.5% and 50.5%, respectively). Further investigation of other post-processing algorithms to improve diagnostic performance of ML is warranted.}, } @article {pmid36467434, year = {2022}, author = {Lanjewar, MG and Shaikh, AY and Parab, J}, title = {Cloud-based COVID-19 disease prediction system from X-Ray images using convolutional neural network on smartphone.}, journal = {Multimedia tools and applications}, volume = {}, number = {}, pages = {1-30}, pmid = {36467434}, issn = {1380-7501}, abstract = {COVID-19 has engulfed over 200 nations through human-to-human transmission, either directly or indirectly. Reverse Transcription-polymerase Chain Reaction (RT-PCR) has been endorsed as a standard COVID-19 diagnostic procedure but has caveats such as low sensitivity, the need for a skilled workforce, and is time-consuming. Coronaviruses show significant manifestation in Chest X-Ray (CX-Ray) images and, thus, can be a viable option for an alternate COVID-19 diagnostic strategy. An automatic COVID-19 detection system can be developed to detect the disease, thus reducing strain on the healthcare system. This paper discusses a real-time Convolutional Neural Network (CNN) based system for COVID-19 illness prediction from CX-Ray images on the cloud. The implemented CNN model displays exemplary results, with training accuracy being 99.94% and validation accuracy reaching 98.81%. The confusion matrix was utilized to assess the models' outcome and achieved 99% precision, 98% recall, 99% F1 score, 100% training area under the curve (AUC) and 98.3% validation AUC. The same CX-Ray dataset was also employed to predict the COVID-19 disease with deep Convolution Neural Networks (DCNN), such as ResNet50, VGG19, InceptonV3, and Xception. The prediction outcome demonstrated that the present CNN was more capable than the DCNN models. The efficient CNN model was deployed to the Platform as a Service (PaaS) cloud.}, } @article {pmid36465713, year = {2023}, author = {Magotra, B and Malhotra, D and Dogra, AK}, title = {Adaptive Computational Solutions to Energy Efficiency in Cloud Computing Environment Using VM Consolidation.}, journal = {Archives of computational methods in engineering : state of the art reviews}, volume = {30}, number = {3}, pages = {1789-1818}, pmid = {36465713}, issn = {1886-1784}, abstract = {Cloud Computing has emerged as a computing paradigm where services are provided through the internet in recent years. Offering on-demand services has transformed the IT companies' working environment, leading to a linearly increasing trend of its usage. The provisioning of the Computing infrastructure is achieved with the help of virtual machines. A great figure of physical devices is required to satisfy the users' resource requirements. To meet the requirements of the submitted workloads that are usually dynamic, the cloud data centers cause the over-provisioning of cloud resources. The result of this over-provisioning is the resource wastage with an increase in the levels of energy consumption, causing a raised operational cost. High CO2 emissions result from this huge energy consumption by data centers, posing a threat to environmental stability. The environmental concern demands for the controlled energy consumption, which can be attained by optimal usage of resources to achieve in the server load, by minimizing the number of active nodes, and by minimizing the frequency of switching between active and de-active server mode in the data center. Motivated by these actualities, we discuss numerous statistical, deterministic, probabilistic, machine learning and optimization based computational solutions for the cloud computing environment. A comparative analysis of the computational methods, on the basis of architecture, consolidation step involved, objectives achieved, simulators involved and resources utilized, has also been presented. A taxonomy for virtual machine (VM) consolidation has also been derived in this research article followed by emerging challenges and research gaps in the field of VM consolidation in cloud computing environment.}, } @article {pmid36465318, year = {2022}, author = {Ilyas, A and Alatawi, MN and Hamid, Y and Mahfooz, S and Zada, I and Gohar, N and Shah, MA}, title = {Software architecture for pervasive critical health monitoring system using fog computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {84}, pmid = {36465318}, issn = {2192-113X}, abstract = {Because of the existence of Covid-19 and its variants, health monitoring systems have become mandatory, particularly for critical patients such as neonates. However, the massive volume of real-time data generated by monitoring devices necessitates the use of efficient methods and approaches to respond promptly. A fog-based architecture for IoT healthcare systems tends to provide better services, but it also produces some issues that must be addressed. We present a bidirectional approach to improving real-time data transmission for health monitors by minimizing network latency and usage in this paper. To that end, a simplified approach for large-scale IoT health monitoring systems is devised, which provides a solution for IoT device selection of optimal fog nodes to reduce both communication and processing delays. Additionally, an improved dynamic approach for load balancing and task assignment is also suggested. Embedding the best practices from the IoT, Fog, and Cloud planes, our aim in this work is to offer software architecture for IoT-based healthcare systems to fulfill non-functional needs. 4 + 1 views are used to illustrate the proposed architecture.}, } @article {pmid36462891, year = {2022}, author = {Motwani, A and Shukla, PK and Pawar, M}, title = {Ubiquitous and smart healthcare monitoring frameworks based on machine learning: A comprehensive review.}, journal = {Artificial intelligence in medicine}, volume = {134}, number = {}, pages = {102431}, pmid = {36462891}, issn = {1873-2860}, mesh = {Aged ; Humans ; *COVID-19/epidemiology ; Delivery of Health Care ; Machine Learning ; Pandemics ; }, abstract = {During the COVID-19 pandemic, the patient care delivery paradigm rapidly shifted to remote technological solutions. Rising rates of life expectancy of older people, and deaths due to chronic diseases (CDs) such as cancer, diabetes and respiratory disease pose many challenges to healthcare. While the feasibility of Remote Patient Monitoring (RPM) with a Smart Healthcare Monitoring (SHM) framework was somewhat questionable before the COVID-19 pandemic, it is now a proven commodity and is on its way to becoming ubiquitous. More health organizations are adopting RPM to enable CD management in the absence of individual monitoring. The current studies on SHM have reviewed the applications of IoT and/or Machine Learning (ML) in the domain, their architecture, security, privacy and other network related issues. However, no study has analyzed the AI and ubiquitous computing advances in SHM frameworks. The objective of this research is to identify and map key technical concepts in the SHM framework. In this context an interesting and meaningful classification of the research articles surveyed for this work is presented. The comprehensive and systematic review is based on the "Preferred Reporting Items for Systematic Review and Meta-Analysis" (PRISMA) approach. A total of 2540 papers were screened from leading research archives from 2016 to March 2021, and finally, 50 articles were selected for review. The major advantages, developments, distinctive architectural structure, components, technical challenges and possibilities in SHM are briefly discussed. A review of various recent cloud and fog computing based architectures, major ML implementation challenges, prospects and future trends is also presented. The survey primarily encourages the data driven predictive analytics aspects of healthcare and the development of ML models for health empowerment.}, } @article {pmid36459531, year = {2022}, author = {Truong, L and Ayora, F and D'Orsogna, L and Martinez, P and De Santis, D}, title = {Nanopore sequencing data analysis using Microsoft Azure cloud computing service.}, journal = {PloS one}, volume = {17}, number = {12}, pages = {e0278609}, pmid = {36459531}, issn = {1932-6203}, mesh = {Animals ; Cloud Computing ; *Nanopore Sequencing ; Data Analysis ; Data Accuracy ; *Mammoths ; }, abstract = {Genetic information provides insights into the exome, genome, epigenetics and structural organisation of the organism. Given the enormous amount of genetic information, scientists are able to perform mammoth tasks to improve the standard of health care such as determining genetic influences on outcome of allogeneic transplantation. Cloud based computing has increasingly become a key choice for many scientists, engineers and institutions as it offers on-demand network access and users can conveniently rent rather than buy all required computing resources. With the positive advancements of cloud computing and nanopore sequencing data output, we were motivated to develop an automated and scalable analysis pipeline utilizing cloud infrastructure in Microsoft Azure to accelerate HLA genotyping service and improve the efficiency of the workflow at lower cost. In this study, we describe (i) the selection process for suitable virtual machine sizes for computing resources to balance between the best performance versus cost effectiveness; (ii) the building of Docker containers to include all tools in the cloud computational environment; (iii) the comparison of HLA genotype concordance between the in-house manual method and the automated cloud-based pipeline to assess data accuracy. In conclusion, the Microsoft Azure cloud based data analysis pipeline was shown to meet all the key imperatives for performance, cost, usability, simplicity and accuracy. Importantly, the pipeline allows for the on-going maintenance and testing of version changes before implementation. This pipeline is suitable for the data analysis from MinION sequencing platform and could be adopted for other data analysis application processes.}, } @article {pmid36443470, year = {2022}, author = {Jang, H and Koh, H and Gu, W and Kang, B}, title = {Integrative web cloud computing and analytics using MiPair for design-based comparative analysis with paired microbiome data.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {20465}, pmid = {36443470}, issn = {2045-2322}, mesh = {Humans ; Cloud Computing ; *Microbiota ; *Gastrointestinal Microbiome ; Mouth ; Skin ; }, abstract = {Pairing (or blocking) is a design technique that is widely used in comparative microbiome studies to efficiently control for the effects of potential confounders (e.g., genetic, environmental, or behavioral factors). Some typical paired (block) designs for human microbiome studies are repeated measures designs that profile each subject's microbiome twice (or more than twice) (1) for pre and post treatments to see the effects of a treatment on microbiome, or (2) for different organs of the body (e.g., gut, mouth, skin) to see the disparity in microbiome between (or across) body sites. Researchers have developed a sheer number of web-based tools for user-friendly microbiome data processing and analytics, though there is no web-based tool currently available for such paired microbiome studies. In this paper, we thus introduce an integrative web-based tool, named MiPair, for design-based comparative analysis with paired microbiome data. MiPair is a user-friendly web cloud service that is built with step-by-step data processing and analytic procedures for comparative analysis between (or across) groups or between baseline and other groups. MiPair employs parametric and non-parametric tests for complete or incomplete block designs to perform comparative analyses with respect to microbial ecology (alpha- and beta-diversity) and taxonomy (e.g., phylum, class, order, family, genus, species). We demonstrate its usage through an example clinical trial on the effects of antibiotics on gut microbiome. MiPair is an open-source software that can be run on our web server (http://mipair.micloud.kr) or on user's computer (https://github.com/yj7599/mipairgit).}, } @article {pmid36439763, year = {2022}, author = {Fouotsa Manfouo, NC and Von Fintel, D}, title = {Investigating the effects of drought and lockdowns on smallholder and commercial agricultural production in KwaZulu-Natal using remotely sensed data.}, journal = {Heliyon}, volume = {8}, number = {11}, pages = {e11637}, pmid = {36439763}, issn = {2405-8440}, abstract = {Not many efforts have been made so far to understand the effects of both the 2015-2016 drought and the 2020 lockdown measures on the agricultural production of smallholder vis-a-vis commercial farmers in Kwazulu-Natal. Google Earth Engine, and random forest algorithm, are used to generate a dataset that help to investigate this question. A regression is performed on double differenced data to investigate the effects of interest. A k-mean cluster analysis, is also used to determine whether the distribution patterns of crop production changed with drought and disruption of agricultural production input. Results show that: (1) droughts affected the agricultural production of both areas similarly. Crop cover declined in both areas for one season after droughts were broken. Then recovery was driven by greener, more productive crops rather than the expansion of crop area. (2) The response of both areas to the COVID-19 lockdown was also similar. Both smallholder and commercial areas' Normalised Difference Vegetation Index - a proxy for crop vitality - improved in response to regulations favourable to the sector and improved rainfall. No significant adjustments in crop cover were observed. Production therefore changed primarily at the intensive margin (improved productivity of existing croplands) rather than the extensive (changing the extent of land under cultivation). (3) Cluster analysis allows for a more granular view, showing that the positive impact of lockdowns on agriculture were concentrated in areas with high rainfall and close proximity to metropolitan markets. Both smallholder and commercial farmers therefore are reliant on market access together with favourable environmental conditions for improved production.}, } @article {pmid36438442, year = {2022}, author = {Alzoubi, YI and Gill, A and Mishra, A}, title = {A systematic review of the purposes of Blockchain and fog computing integration: classification and open issues.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {80}, pmid = {36438442}, issn = {2192-113X}, abstract = {The fog computing concept was proposed to help cloud computing for the data processing of Internet of Things (IoT) applications. However, fog computing faces several challenges such as security, privacy, and storage. One way to address these challenges is to integrate blockchain with fog computing. There are several applications of blockchain-fog computing integration that have been proposed, recently, due to their lucrative benefits such as enhancing security and privacy. There is a need to systematically review and synthesize the literature on this topic of blockchain-fog computing integration. The purposes of integrating blockchain and fog computing were determined using a systematic literature review approach and tailored search criteria established from the research questions. In this research, 181 relevant papers were found and reviewed. The results showed that the authors proposed the combination of blockchain and fog computing for several purposes such as security, privacy, access control, and trust management. A lack of standards and laws may make it difficult for blockchain and fog computing to be integrated in the future, particularly in light of newly developed technologies like quantum computing and artificial intelligence. The findings of this paper serve as a resource for researchers and practitioners of blockchain-fog computing integration for future research and designs.}, } @article {pmid36433599, year = {2022}, author = {Trakadas, P and Masip-Bruin, X and Facca, FM and Spantideas, ST and Giannopoulos, AE and Kapsalis, NC and Martins, R and Bosani, E and Ramon, J and Prats, RG and Ntroulias, G and Lyridis, DV}, title = {A Reference Architecture for Cloud-Edge Meta-Operating Systems Enabling Cross-Domain, Data-Intensive, ML-Assisted Applications: Architectural Overview and Key Concepts.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433599}, issn = {1424-8220}, support = {PID2021-124463OB-100//Spanish Ministry of Science, Innovation and Universities and FEDER/ ; }, mesh = {*Ecosystem ; *Software ; }, abstract = {Future data-intensive intelligent applications are required to traverse across the cloud-to-edge-to-IoT continuum, where cloud and edge resources elegantly coordinate, alongside sensor networks and data. However, current technical solutions can only partially handle the data outburst associated with the IoT proliferation experienced in recent years, mainly due to their hierarchical architectures. In this context, this paper presents a reference architecture of a meta-operating system (RAMOS), targeted to enable a dynamic, distributed and trusted continuum which will be capable of facilitating the next-generation smart applications at the edge. RAMOS is domain-agnostic, capable of supporting heterogeneous devices in various network environments. Furthermore, the proposed architecture possesses the ability to place the data at the origin in a secure and trusted manner. Based on a layered structure, the building blocks of RAMOS are thoroughly described, and the interconnection and coordination between them is fully presented. Furthermore, illustration of how the proposed reference architecture and its characteristics could fit in potential key industrial and societal applications, which in the future will require more power at the edge, is provided in five practical scenarios, focusing on the distributed intelligence and privacy preservation principles promoted by RAMOS, as well as the concept of environmental footprint minimization. Finally, the business potential of an open edge ecosystem and the societal impacts of climate net neutrality are also illustrated.}, } @article {pmid36433575, year = {2022}, author = {Bin Mofidul, R and Alam, MM and Rahman, MH and Jang, YM}, title = {Real-Time Energy Data Acquisition, Anomaly Detection, and Monitoring System: Implementation of a Secured, Robust, and Integrated Global IIoT Infrastructure with Edge and Cloud AI.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433575}, issn = {1424-8220}, mesh = {*Internet of Things ; Artificial Intelligence ; Reproducibility of Results ; Computers ; Electrocardiography ; }, abstract = {The industrial internet of things (IIoT), a leading technology to digitize industrial sectors and applications, requires the integration of edge and cloud computing, cyber security, and artificial intelligence to enhance its efficiency, reliability, and sustainability. However, the collection of heterogeneous data from individual sensors as well as monitoring and managing large databases with sufficient security has become a concerning issue for the IIoT framework. The development of a smart and integrated IIoT infrastructure can be a possible solution that can efficiently handle the aforementioned issues. This paper proposes an AI-integrated, secured IIoT infrastructure incorporating heterogeneous data collection and storing capability, global inter-communication, and a real-time anomaly detection model. To this end, smart data acquisition devices are designed and developed through which energy data are transferred to the edge IIoT servers. Hash encoding credentials and transport layer security protocol are applied to the servers. Furthermore, these servers can exchange data through a secured message queuing telemetry transport protocol. Edge and cloud databases are exploited to handle big data. For detecting the anomalies of individual electrical appliances in real-time, an algorithm based on a group of isolation forest models is developed and implemented on edge and cloud servers as well. In addition, remote-accessible online dashboards are implemented, enabling users to monitor the system. Overall, this study covers hardware design; the development of open-source IIoT servers and databases; the implementation of an interconnected global networking system; the deployment of edge and cloud artificial intelligence; and the development of real-time monitoring dashboards. Necessary performance results are measured, and they demonstrate elaborately investigating the feasibility of the proposed IIoT framework at the end.}, } @article {pmid36433564, year = {2022}, author = {Umoren, O and Singh, R and Awan, S and Pervez, Z and Dahal, K}, title = {Blockchain-Based Secure Authentication with Improved Performance for Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433564}, issn = {1424-8220}, mesh = {*Blockchain ; Computer Security ; Cloud Computing ; *Internet of Things ; Algorithms ; }, abstract = {Advancement in the Internet of Things (IoT) and cloud computing has escalated the number of connected edge devices in a smart city environment. Having billions more devices has contributed to security concerns, and an attack-proof authentication mechanism is the need of the hour to sustain the IoT environment. Securing all devices could be a huge task and require lots of computational power, and can be a bottleneck for devices with fewer computational resources. To improve the authentication mechanism, many researchers have proposed decentralized applications such as blockchain technology for securing fog and IoT environments. Ethereum is considered a popular blockchain platform and is used by researchers to implement the authentication mechanism due to its programable smart contract. In this research, we proposed a secure authentication mechanism with improved performance. Neo blockchain is a platform that has properties that can provide improved security and faster execution. The research utilizes the intrinsic properties of Neo blockchain to develop a secure authentication mechanism. The proposed authentication mechanism is compared with the existing algorithms and shows that the proposed mechanism is 20 to 90 per cent faster in execution time and has over 30 to 70 per cent decrease in registration and authentication when compared to existing methods.}, } @article {pmid36433381, year = {2022}, author = {Yang, J and Lee, TY and Lee, WT and Xu, L}, title = {A Design and Application of Municipal Service Platform Based on Cloud-Edge Collaboration for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433381}, issn = {1424-8220}, support = {ZZ2021J23//the Zhangzhou Municipal Natural Science Foundation/ ; 2020J01813//the Fujian Province Nature Science Foundation/ ; NSCL-KF2021-07//the Opening Foundation of Fujian Provincial Key Laboratory of Network Security and Cryptology Research Fund, Fujian Normal University/ ; FBJG20210070//the Research Project on Education and Teaching Reform of Undergraduate Colleges and Universities in Fujian Province/ ; }, mesh = {Cities ; *Artificial Intelligence ; *Cloud Computing ; Computers ; Game Theory ; }, abstract = {Information and Communication Technology (ICT) makes cities "smart", capable of providing advanced municipal services to citizens more efficiently. In the literature, many applications of municipal service platform based on cloud computing and edge computing have been proposed, but the reference model and application instance based on cloud-edge collaboration specially for municipal service platform is rarely studied. In this context, this paper first develops a reference model, including resource collaboration, application collaboration, service collaboration, and security collaboration, and discusses the main contents and challenges of each part. Then, aiming at the problem of computing and communication resources allocation in the cloud-edge collaboration, a game-theory-based dynamic resource allocation model is introduced. Finally, an e-government self-service system based on the cloud-edge collaboration is designed and implemented. The cloud side is a cloud computing server, and the edge side are the self-service terminals integrating various edge computing devices with Artificial Intelligence (AI) embedded. The experimental results show that the designed system combines the advantages of cloud computing and edge computing, and provides a better user experience with lower processing latency, larger bandwidth, and more concurrent tasks. Meanwhile, the findings show that the evolutionary equilibrium and the Nash equilibrium are the optimal solutions, respectively.}, } @article {pmid36433374, year = {2022}, author = {Mir, TS and Liaqat, HB and Kiren, T and Sana, MU and Alvarez, RM and Miró, Y and Pascual Barrera, AE and Ashraf, I}, title = {Antifragile and Resilient Geographical Information System Service Delivery in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433374}, issn = {1424-8220}, support = {N/A//European University of the Atlantic/ ; }, mesh = {*Geographic Information Systems ; *Cloud Computing ; }, abstract = {The demand for cloud computing has drastically increased recently, but this paradigm has several issues due to its inherent complications, such as non-reliability, latency, lesser mobility support, and location-aware services. Fog computing can resolve these issues to some extent, yet it is still in its infancy. Despite several existing works, these works lack fault-tolerant fog computing, which necessitates further research. Fault tolerance enables the performing and provisioning of services despite failures and maintains anti-fragility and resiliency. Fog computing is highly diverse in terms of failures as compared to cloud computing and requires wide research and investigation. From this perspective, this study primarily focuses on the provision of uninterrupted services through fog computing. A framework has been designed to provide uninterrupted services while maintaining resiliency. The geographical information system (GIS) services have been deployed as a test bed which requires high computation, requires intensive resources in terms of CPU and memory, and requires low latency. Keeping different types of failures at different levels and their impacts on service failure and greater response time in mind, the framework was made anti-fragile and resilient at different levels. Experimental results indicate that during service interruption, the user state remains unaffected.}, } @article {pmid36433242, year = {2022}, author = {Daraghmi, YA and Daraghmi, EY and Daraghma, R and Fouchal, H and Ayaida, M}, title = {Edge-Fog-Cloud Computing Hierarchy for Improving Performance and Security of NB-IoT-Based Health Monitoring Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {22}, pages = {}, pmid = {36433242}, issn = {1424-8220}, support = {Palestine Technical University - Kadoorie//French Ministry for Europe and Foreign Affairs (MEAE), the French Ministry for Higher Education, Research and Innovation (MESRI), and by the Consulate General of France in Jerusalem./ ; }, mesh = {*Cloud Computing ; *Electrocardiography ; Algorithms ; Support Vector Machine ; }, abstract = {This paper proposes a three-computing-layer architecture consisting of Edge, Fog, and Cloud for remote health vital signs monitoring. The novelty of this architecture is in using the Narrow-Band IoT (NB-IoT) for communicating with a large number of devices and covering large areas with minimum power consumption. Additionally, the architecture reduces the communication delay as the edge layer serves the health terminal devices with initial decisions and prioritizes data transmission for minimizing congestion on base stations. The paper also investigates different authentication protocols for improving security while maintaining low computation and transmission time. For data analysis, different machine learning algorithms, such as decision tree, support vector machines, and logistic regression, are used on the three layers. The proposed architecture is evaluated using CloudSim, iFogSim, and ns3-NB-IoT on real data consisting of medical vital signs. The results show that the proposed architecture reduces the NB-IoT delay by 59.9%, the execution time by an average of 38.5%, and authentication time by 35.1% for a large number of devices. This paper concludes that the NB-IoT combined with edge, fog, and cloud computing can support efficient remote health monitoring for large devices and large areas.}, } @article {pmid36430048, year = {2022}, author = {Zhao, Z and Wang, Z and Garcia-Campayo, J and Perez, HM}, title = {The Dissemination Strategy of an Urban Smart Medical Tourism Image by Big Data Analysis Technology.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {22}, pages = {}, pmid = {36430048}, issn = {1660-4601}, mesh = {Humans ; *Medical Tourism ; Tourism ; Data Analysis ; Big Data ; Reproducibility of Results ; Technology ; }, abstract = {The advanced level of medical care is closely related to the development and popularity of a city, and it will also drive the development of tourism. The smart urban medical system based on big data analysis technology can greatly facilitate people's lives and increase the flow of people in the city, which is of great significance to the city's tourism image dissemination and branding. The medical system, with eight layers of architecture including access, medical cloud service governance, the medical cloud service resource, the platform's public service, the platform's runtime service, infrastructure, and the overall security and monitoring system of the platform, is designed based on big data analysis technology. Chengdu city is taken as an example based on big data analysis technology to position the dissemination of an urban tourism image. Quantitative analysis and questionnaire methods are used to study the effect of urban smart medical system measurement and tourism image communication positioning based on big data analysis technology. The results show that the smart medical cloud service platform of the urban smart medical system, as a public information service system, supports users in obtaining medical services through various terminal devices without geographical restrictions. The smart medical cloud realizes service aggregation and data sharing compared to the traditional isolated medical service system. Cloud computing has been used as the technical basis, making the scalability and reliability of the system have unprecedented improvements. This paper discusses how to effectively absorb, understand, and use tools in the big data environment, extract information from data, find effective information, make image communication activities accurate, reduce the cost, and improve the efficiency of city image communication. The research shows that big data analysis technology improves patients' medical experience, improves medical efficiency, and alleviates urban medical resource allocation to a certain extent. This technology improves people's satisfaction with the dissemination of urban tourism images, makes urban tourism image dissemination activities accurate, reduces the cost of urban tourism image dissemination, and improves the efficiency of urban tourism image dissemination. The combination of the two can provide a reference for developing urban smart medical care and disseminating a tourism image.}, } @article {pmid36429833, year = {2022}, author = {Li, H and Ou, D and Ji, Y}, title = {An Environmentally Sustainable Software-Defined Networking Data Dissemination Method for Mixed Traffic Flows in RSU Clouds with Energy Restriction.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {22}, pages = {}, pmid = {36429833}, issn = {1660-4601}, mesh = {*Computer Communication Networks ; *Software ; Programming, Linear ; Algorithms ; Physical Phenomena ; }, abstract = {The connected multi road side unit (RSU) environment can be envisioned as the RSU cloud. In this paper, the Software-Defined Networking (SDN) framework is utilized to dynamically reconfigure the RSU clouds for the mixed traffic flows with energy restrictions, which are composed of five categories of vehicles with distinctive communication demands. An environmentally sustainable SDN data dissemination method for safer and greener transportation solutions is thus proposed, aiming to achieve the lowest overall SDN cloud delay with the least working hosts and minimum energy consumption, which is a mixed integer linear programming problem (MILP). To solve the problem, Joint optimization algorithms with Finite resources (JF) in three hyperparameters versions, JF (DW = 0.3, HW = 0.7), JF (DW = 0.5, HW = 0.5) and JF (DW = 0.7, HW = 0.3), were proposed, which are in contrast with single-objective optimization algorithms, the Host Optimization (H) algorithm, and the Delay optimization (D) algorithm. Results show that JF (DW = 0.3, HW = 0.7) and JF (DW = 0.5, HW = 0.5), when compared with the D algorithm, usually had slightly larger cloud delays, but fewer working hosts and energy consumptions, which has vital significance for enhancing energy efficiency and environmental protection, and shows the superiority of JFs over the D algorithm. Meanwhile, the H algorithm had the least working hosts and fewest energy consumptions under the same conditions, but completely ignored the explosive surge of delay, which is not desirable for most cases of the SDN RSU cloud. Further analysis showed that the larger the network topology of the SDN cloud, the harder it was to find a feasible network configuration. Therefore, when designing an environmentally sustainable SDN RSU cloud for the greener future mobility of intelligent transportation systems, its size should be limited or partitioned into a relatively small topology.}, } @article {pmid36417024, year = {2023}, author = {Cohen, RY and Sodickson, AD}, title = {An Orchestration Platform that Puts Radiologists in the Driver's Seat of AI Innovation: a Methodological Approach.}, journal = {Journal of digital imaging}, volume = {36}, number = {2}, pages = {700-714}, pmid = {36417024}, issn = {1618-727X}, mesh = {Humans ; *Artificial Intelligence ; Radiologists ; *Radiology/methods ; Machine Learning ; Diagnostic Imaging ; }, abstract = {Current AI-driven research in radiology requires resources and expertise that are often inaccessible to small and resource-limited labs. The clinicians who are able to participate in AI research are frequently well-funded, well-staffed, and either have significant experience with AI and computing, or have access to colleagues or facilities that do. Current imaging data is clinician-oriented and is not easily amenable to machine learning initiatives, resulting in inefficient, time consuming, and costly efforts that rely upon a crew of data engineers and machine learning scientists, and all too often preclude radiologists from driving AI research and innovation. We present the system and methodology we have developed to address infrastructure and platform needs, while reducing the staffing and resource barriers to entry. We emphasize a data-first and modular approach that streamlines the AI development and deployment process while providing efficient and familiar interfaces for radiologists, such that they can be the drivers of new AI innovations.}, } @article {pmid36415683, year = {2022}, author = {Xie, Y and Li, P and Nedjah, N and Gupta, BB and Taniar, D and Zhang, J}, title = {Privacy protection framework for face recognition in edge-based Internet of Things.}, journal = {Cluster computing}, volume = {}, number = {}, pages = {1-19}, pmid = {36415683}, issn = {1386-7857}, abstract = {Edge computing (EC) gets the Internet of Things (IoT)-based face recognition systems out of trouble caused by limited storage and computing resources of local or mobile terminals. However, data privacy leak remains a concerning problem. Previous studies only focused on some stages of face data processing, while this study focuses on the privacy protection of face data throughout its entire life cycle. Therefore, we propose a general privacy protection framework for edge-based face recognition (EFR) systems. To protect the privacy of face images and training models transmitted between edges and the remote cloud, we design a local differential privacy (LDP) algorithm based on the proportion difference of feature information. In addition, we also introduced identity authentication and hash technology to ensure the legitimacy of the terminal device and the integrity of the face image in the data acquisition phase. Theoretical analysis proves the rationality and feasibility of the scheme. Compared with the non-privacy protection situation and the equal privacy budget allocation method, our method achieves the best balance between availability and privacy protection in the numerical experiment.}, } @article {pmid36410105, year = {2023}, author = {Aguilar, B and Abdilleh, K and Acquaah-Mensah, GK}, title = {Multi-omics inference of differential breast cancer-related transcriptional regulatory network gene hubs between young Black and White patients.}, journal = {Cancer genetics}, volume = {270-271}, number = {}, pages = {1-11}, doi = {10.1016/j.cancergen.2022.11.001}, pmid = {36410105}, issn = {2210-7762}, mesh = {Humans ; Female ; Adult ; *Breast Neoplasms/genetics/metabolism ; Multiomics ; White ; Oncogenes ; *MicroRNAs/genetics ; Tumor Suppressor Proteins/genetics ; Ubiquitin-Protein Ligases/genetics ; }, abstract = {OBJECTIVE: Breast cancers (BrCA) are a leading cause of illness and mortality worldwide. Black women have a higher incidence rate relative to white women prior to age 40 years, and a lower incidence rate after 50 years. The objective of this study is to identify -omics differences between the two breast cancer cohorts to better understand the disparities observed in patient outcomes.

MATERIALS AND METHODS: Using Standard SQL, we queried ISB-CGC hosted Google BigQuery tables storing TCGA BrCA gene expression, methylation, and somatic mutation data and analyzed the combined multi-omics results using a variety of methods.

RESULTS: Among Stage II patients 50 years or younger, genes PIK3CA and CDH1 are more frequently mutated in White (W50) than in Black or African American patients (BAA50), while HUWE1, HYDIN, and FBXW7 mutations are more frequent in BAA50. Over-representation analysis (ORA) and Gene Set Enrichment Analysis (GSEA) results indicate that, among others, the Reactome Signaling by ROBO Receptors gene set is enriched in BAA50. Using the Virtual Inference of Protein-activity by Enriched Regulon analysis (VIPER) algorithm, putative top 20 master regulators identified include NUPR1, NFKBIL1, ZBTB17, TEAD1, EP300, TRAF6, CACTIN, and MID2. CACTIN and MID2 are of prognostic value. We identified driver genes, such as OTUB1, with suppressed expression whose DNA methylation status were inversely correlated with gene expression. Networks capturing microRNA and gene expression correlations identified notable microRNA hubs, such as miR-93 and miR-92a-2, expressed at higher levels in BAA50 than in W50.

DISCUSSION/CONCLUSION: The results point to several driver genes as being involved in the observed differences between the cohorts. The findings here form the basis for further mechanistic exploration.}, } @article {pmid36408731, year = {2023}, author = {Kucewicz, MT and Worrell, GA and Axmacher, N}, title = {Direct electrical brain stimulation of human memory: lessons learnt and future perspectives.}, journal = {Brain : a journal of neurology}, volume = {146}, number = {6}, pages = {2214-2226}, doi = {10.1093/brain/awac435}, pmid = {36408731}, issn = {1460-2156}, mesh = {Humans ; *Brain/physiology ; *Memory/physiology ; Mental Recall/physiology ; Electric Stimulation ; Cognition ; }, abstract = {Modulation of cognitive functions supporting human declarative memory is one of the grand challenges of neuroscience, and of vast importance for a variety of neuropsychiatric, neurodegenerative and neurodevelopmental diseases. Despite a recent surge of successful attempts at improving performance in a range of memory tasks, the optimal approaches and parameters for memory enhancement have yet to be determined. On a more fundamental level, it remains elusive as to how delivering electrical current in a given brain area leads to enhanced memory processing. Starting from the local and distal physiological effects on neural populations, the mechanisms of enhanced memory encoding, maintenance, consolidation or recall in response to direct electrical stimulation are only now being unravelled. With the advent of innovative neurotechnologies for concurrent recording and stimulation intracranially in the human brain, it becomes possible to study both acute and chronic effects of stimulation on memory performance and the underlying neural activities. In this review, we summarize the effects of various invasive stimulation approaches for modulating memory functions. We first outline the challenges that were faced in the initial studies of memory enhancement and the lessons learnt. Electrophysiological biomarkers are then reviewed as more objective measures of the stimulation effects than behavioural outcomes. Finally, we classify the various stimulation approaches into continuous and phasic modulation with an open or closed loop for responsive stimulation based on analysis of the recorded neural activities. Although the potential advantage of closed-loop responsive stimulation over the classic open-loop approaches is inconclusive, we foresee the emerging results from ongoing longitudinal studies and clinical trials will shed light on both the mechanisms and optimal strategies for improving declarative memory. Adaptive stimulation based on the biomarker analysis over extended periods of time is proposed as a future direction for obtaining lasting effects on memory functions. Chronic tracking and modulation of neural activities intracranially through adaptive stimulation opens tantalizing new avenues to continually monitor and treat memory and cognitive deficits in a range of brain disorders. Brain co-processors created with machine-learning tools and wireless bi-directional connectivity to seamlessly integrate implanted devices with smartphones and cloud computing are poised to enable real-time automated analysis of large data volumes and adaptively tune electrical stimulation based on electrophysiological biomarkers of behavioural states. Next-generation implantable devices for high-density recording and stimulation of electrophysiological activities, and technologies for distributed brain-computer interfaces are presented as selected future perspectives for modulating human memory and associated mental processes.}, } @article {pmid36408485, year = {2022}, author = {Al-Khafaji, HMR and Jaleel, RA}, title = {Adopting effective hierarchal IoMTs computing with K-efficient clustering to control and forecast COVID-19 cases.}, journal = {Computers & electrical engineering : an international journal}, volume = {104}, number = {}, pages = {108472}, pmid = {36408485}, issn = {0045-7906}, abstract = {The Internet of Medical Things (IoMTs) based on fog/cloud computing has been effectively proven to improve the controlling, monitoring, and care quality of Coronavirus disease 2019 (COVID-19) patients. One of the convenient approaches to assess symptomatic patients is to group patients with comparable symptoms and provide an overview of the required level of care to patients with similar conditions. Therefore, this study adopts an effective hierarchal IoMTs computing with K-Efficient clustering to control and forecast COVID-19 cases. The proposed system integrates the K-Means and K-Medoids clusterings to monitor the health status of patients, early detection of COVID-19 cases, and process data in real-time with ultra-low latency. In addition, the data analysis takes into account the primary requirements of the network to assist in understanding the nature of COVID-19. Based on the findings, the K-Efficient clustering with fog computing is a more effective approach to analyse the status of patients compared to that of K-Means and K-Medoids in terms of intra-class, inter-class, running time, the latency of network, and RAM consumption. In summary, the outcome of this study provides a novel approach for remote monitoring and handling of infected COVID-19 patients through real-time personalised treatment services.}, } @article {pmid36404909, year = {2022}, author = {Narasimha Raju, AS and Jayavel, K and Rajalakshmi, T}, title = {ColoRectalCADx: Expeditious Recognition of Colorectal Cancer with Integrated Convolutional Neural Networks and Visual Explanations Using Mixed Dataset Evidence.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8723957}, pmid = {36404909}, issn = {1748-6718}, mesh = {Humans ; Neural Networks, Computer ; Support Vector Machine ; Diagnosis, Computer-Assisted/methods ; Colonoscopy ; *Polyps ; *Colorectal Neoplasms/diagnostic imaging ; }, abstract = {Colorectal cancer typically affects the gastrointestinal tract within the human body. Colonoscopy is one of the most accurate methods of detecting cancer. The current system facilitates the identification of cancer by computer-assisted diagnosis (CADx) systems with a limited number of deep learning methods. It does not imply the depiction of mixed datasets for the functioning of the system. The proposed system, called ColoRectalCADx, is supported by deep learning (DL) models suitable for cancer research. The CADx system comprises five stages: convolutional neural networks (CNN), support vector machine (SVM), long short-term memory (LSTM), visual explanation such as gradient-weighted class activation mapping (Grad-CAM), and semantic segmentation phases. Here, the key components of the CADx system are equipped with 9 individual and 12 integrated CNNs, implying that the system consists mainly of investigational experiments with a total of 21 CNNs. In the subsequent phase, the CADx has a combination of CNNs of concatenated transfer learning functions associated with the machine SVM classification. Additional classification is applied to ensure effective transfer of results from CNN to LSTM. The system is mainly made up of a combination of CVC Clinic DB, Kvasir2, and Hyper Kvasir input as a mixed dataset. After CNN and LSTM, in advanced stage, malignancies are detected by using a better polyp recognition technique with Grad-CAM and semantic segmentation using U-Net. CADx results have been stored on Google Cloud for record retention. In these experiments, among all the CNNs, the individual CNN DenseNet-201 (87.1% training and 84.7% testing accuracies) and the integrated CNN ADaDR-22 (84.61% training and 82.17% testing accuracies) were the most efficient for cancer detection with the CNN+LSTM model. ColoRectalCADx accurately identifies cancer through individual CNN DesnseNet-201 and integrated CNN ADaDR-22. In Grad-CAM's visual explanations, CNN DenseNet-201 displays precise visualization of polyps, and CNN U-Net provides precise malignant polyps.}, } @article {pmid36395912, year = {2023}, author = {Xu, H and Yang, X and Wang, D and Hu, Y and Cheng, Z and Shi, Y and Zheng, P and Shi, L}, title = {Multivariate and spatio-temporal groundwater pollution risk assessment: A new long-time serial groundwater environmental impact assessment system.}, journal = {Environmental pollution (Barking, Essex : 1987)}, volume = {317}, number = {}, pages = {120621}, doi = {10.1016/j.envpol.2022.120621}, pmid = {36395912}, issn = {1873-6424}, mesh = {*Environmental Monitoring/methods ; *Groundwater ; Environmental Pollution ; Soil ; Risk Assessment/methods ; }, abstract = {Groundwater pollution risk assessment is an important part of environmental assessment. Although it has been developed for many years, there has not yet been a multi-dimensional method that takes into account long time series and spatial factors. We proposed a new method combines the advantages of remote sensing cloud computing, long-term groundwater modeling simulation and GIS technology to solve it efficiently. A coastal industrial park in Hainan was used as the study area. The depth of groundwater level, rainfall, topography and geomorphology, soil moisture, pollution source, pollution toxicity and other more than 10 parameters were used as the indexes. A comprehensive model with remote sensing cloud computing, DRASTIC model and Modflow + MT3DMS was established to assess the pollution risk from 2014 to 2021. The multi-year results indicated that the risk assessment of groundwater pollution was usually on the vertical coastal direction, and the risk increased from far away to near coast. With the discharge of pollutants in the industrial park, the pollution risk in the area 5 km away from the centre increased year by year until it became stable in 2019, and the risk in the centre of the park reached 1 level, covered an area of up to 145400 square metres, accounted for 0.012% of the whole study area. The assessment results in 2020 and 2021 fluctuate slightly compared with those in 2019. Therefore, in terms of groundwater resource protection and resource management, it is necessary to focus on the detection of pollution in the coastal zone and the pollution within 5 km of the centre to strictly control pollution discharge. In this study, the comprehensive assessment includes surface indicators, subsurface indicators, and pollutant indicators. Finally, we achieve a multivariate, spatial and long time series groundwater pollution risk assessment system, which is a new groundwater environmental impact assessment (GEIA) system.}, } @article {pmid36395210, year = {2022}, author = {Datta, S and Chakraborty, W and Radosavljevic, M}, title = {Toward attojoule switching energy in logic transistors.}, journal = {Science (New York, N.Y.)}, volume = {378}, number = {6621}, pages = {733-740}, doi = {10.1126/science.ade7656}, pmid = {36395210}, issn = {1095-9203}, abstract = {Advances in the theory of semiconductors in the 1930s in addition to the purification of germanium and silicon crystals in the 1940s enabled the point-contact junction transistor in 1947 and initiated the era of semiconductor electronics. Gordon Moore postulated 18 years later that the number of components in an integrated circuit would double every 1 to 2 years with associated reductions in cost per transistor. Transistor density doubling through scaling-the decrease of component sizes-with each new process node continues today, albeit at a slower pace compared with historical rates of scaling. Transistor scaling has resulted in exponential gain in performance and energy efficiency of integrated circuits, which transformed computing from mainframes to personal computers and from mobile computing to cloud computing. Innovations in new materials, transistor structures, and lithographic technologies will enable further scaling. Monolithic 3D integration, design technology co-optimization, alternative switching mechanisms, and cryogenic operation could enable further transistor scaling and improved energy efficiency in the foreseeable future.}, } @article {pmid36388591, year = {2022}, author = {Pei, J and Wang, L and Huang, H and Wang, L and Li, W and Wang, X and Yang, H and Cao, J and Fang, H and Niu, Z}, title = {Characterization and attribution of vegetation dynamics in the ecologically fragile South China Karst: Evidence from three decadal Landsat observations.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {1043389}, pmid = {36388591}, issn = {1664-462X}, abstract = {Plant growth and its changes over space and time are effective indicators for signifying ecosystem health. However, large uncertainties remain in characterizing and attributing vegetation changes in the ecologically fragile South China Karst region, since most existing studies were conducted at a coarse spatial resolution or covered limited time spans. Considering the highly fragmented landscapes in the region, this hinders their capability in detecting fine information of vegetation dynamics taking place at local scales and comprehending the influence of climate change usually over relatively long temporal ranges. Here, we explored the spatiotemporal variations in vegetation greenness for the entire South China Karst region (1.9 million km[2]) at a resolution of 30m for the notably increased time span (1987-2018) using three decadal Landsat images and the cloud-based Google Earth Engine. Moreover, we spatially attributed the vegetation changes and quantified the relative contribution of driving factors. Our results revealed a widespread vegetation recovery in the South China Karst (74.80%) during the past three decades. Notably, the area of vegetation recovery tripled following the implementation of ecological engineering compared with the reference period (1987-1999). Meanwhile, the vegetation restoration trend was strongly sustainable beyond 2018 as demonstrated by the Hurst exponent. Furthermore, climate change contributed only one-fifth to vegetation restoration, whereas major vegetation recovery was highly attributable to afforestation projects, implying that anthropogenic influences accelerated vegetation greenness gains in karst areas since the start of the new millennium during which ecological engineering was continually established. Our study provides additional insights into ecological restoration and conservation in the highly heterogeneous karst landscapes and other similar ecologically fragile areas worldwide.}, } @article {pmid36387768, year = {2022}, author = {Noh, SK}, title = {Deep Learning System for Recycled Clothing Classification Linked to Cloud and Edge Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6854626}, pmid = {36387768}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Deep Learning ; Cloud Computing ; Automation ; Clothing ; }, abstract = {Recently, IT technologies related to the Fourth Industrial Revolution (4IR), such as artificial intelligence (AI), Internet of things (IoT), cloud computing, and edge computing have been studied. Although there are many used clothing occurrences with 61 trillion worn of clothing consumption per year in Korea, it is not properly collected due to the efficiency of the used clothing collection system, and the collected used clothing is not properly recycled due to insufficient recycling system, lack of skilled labor force, and health problems of workers. To solve this problem, this study proposes a deep learning clothing classification system (DLCCS) using cloud and edge computing. The system proposed is to classify clothing image data input from camera terminals installed in various clothing classification sites in various regions into two classes, as well as nine classes, by deep learning using convolution neural network (CNN). And the classification results are stored in the cloud through edge computing. The edge computing enables the analysis of the data of the Internet of Things (IoT) device on the edge of the network before transmitting it to the cloud. The performance evaluation parameters that are considered for the proposed research study are transmission velocity and latency. Proposed system can efficiently improve the process and automation in the classification and processing of recycled clothing in various places. It is also expected that the waste of clothing resources and health problems of clothing classification workers will be improved.}, } @article {pmid36374893, year = {2022}, author = {Nguyen, AD and Choi, S and Kim, W and Kim, J and Oh, H and Kang, J and Lee, S}, title = {Single-Image 3-D Reconstruction: Rethinking Point Cloud Deformation.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {PP}, number = {}, pages = {}, doi = {10.1109/TNNLS.2022.3211929}, pmid = {36374893}, issn = {2162-2388}, abstract = {Single-image 3-D reconstruction has long been a challenging problem. Recent deep learning approaches have been introduced to this 3-D area, but the ability to generate point clouds still remains limited due to inefficient and expensive 3-D representations, the dependency between the output and the number of model parameters, or the lack of a suitable computing operation. In this article, we present a novel deep-learning-based method to reconstruct a point cloud of an object from a single still image. The proposed method can be decomposed into two steps: feature fusion and deformation. The first step extracts both global and point-specific shape features from a 2-D object image, and then injects them into a randomly generated point cloud. In the second step, which is deformation, we introduce a new layer termed as GraphX that considers the interrelationship between points like common graph convolutions but operates on unordered sets. The framework can be applicable to realistic image data with background as we optionally learn a mask branch to segment objects from input images. To complement the quality of point clouds, we further propose an objective function to control the point uniformity. In addition, we introduce different variants of GraphX that cover from best performance to best memory budget. Moreover, the proposed model can generate an arbitrary-sized point cloud, which is the first deep method to do so. Extensive experiments demonstrate that we outperform the existing models and set a new height for different performance metrics in single-image 3-D reconstruction.}, } @article {pmid36366266, year = {2022}, author = {Kawa, J and Pyciński, B and Smoliński, M and Bożek, P and Kwasecki, M and Pietrzyk, B and Szymański, D}, title = {Design and Implementation of a Cloud PACS Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366266}, issn = {1424-8220}, support = {POIR.01.01.01-00-0303/1//European Regional Development Fund/ ; }, mesh = {*Radiology Information Systems ; Cloud Computing ; Computers ; Software ; Tomography, X-Ray Computed ; }, abstract = {The limitations of the classic PACS (picture archiving and communication system), such as the backward-compatible DICOM network architecture and poor security and maintenance, are well-known. They are challenged by various existing solutions employing cloud-related patterns and services. However, a full-scale cloud-native PACS has not yet been demonstrated. The paper introduces a vendor-neutral cloud PACS architecture. It is divided into two main components: a cloud platform and an access device. The cloud platform is responsible for nearline (long-term) image archive, data flow, and backend management. It operates in multi-tenant mode. The access device is responsible for the local DICOM (Digital Imaging and Communications in Medicine) interface and serves as a gateway to cloud services. The cloud PACS was first implemented in an Amazon Web Services environment. It employs a number of general-purpose services designed or adapted for a cloud environment, including Kafka, OpenSearch, and Memcached. Custom services, such as a central PACS node, queue manager, or flow worker, also developed as cloud microservices, bring DICOM support, external integration, and a management layer. The PACS was verified using image traffic from, among others, computed tomography (CT), magnetic resonance (MR), and computed radiography (CR) modalities. During the test, the system was reliably storing and accessing image data. In following tests, scaling behavior differences between the monolithic Dcm4chee server and the proposed solution are shown. The growing number of parallel connections did not influence the monolithic server's overall throughput, whereas the performance of cloud PACS noticeably increased. In the final test, different retrieval patterns were evaluated to assess performance under different scenarios. The current production environment stores over 450 TB of image data and handles over 4000 DICOM nodes.}, } @article {pmid36366264, year = {2022}, author = {Kim, JK and Park, BS and Kim, W and Park, JT and Lee, S and Seo, YH}, title = {Robust Estimation and Optimized Transmission of 3D Feature Points for Computer Vision on Mobile Communication Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366264}, issn = {1424-8220}, mesh = {*Algorithms ; *Vision, Ocular ; Computers ; }, abstract = {Due to the amount of transmitted data and the security of personal or private information in wireless communication, there are cases where the information for a multimedia service should be directly transferred from the user's device to the cloud server without the captured original images. This paper proposes a new method to generate 3D (dimensional) keypoints based on a user's mobile device with a commercial RGB camera in a distributed computing environment such as a cloud server. The images are captured with a moving camera and 2D keypoints are extracted from them. After executing feature extraction between continuous frames, disparities are calculated between frames using the relationships between matched keypoints. The physical distance of the baseline is estimated by using the motion information of the camera, and the actual distance is calculated by using the calculated disparity and the estimated baseline. Finally, 3D keypoints are generated by adding the extracted 2D keypoints to the calculated distance. A keypoint-based scene change method is proposed as well. Due to the existing similarity between continuous frames captured from a camera, not all 3D keypoints are transferred and stored, only the new ones. Compared with the ground truth of the TUM dataset, the average error of the estimated 3D keypoints was measured as 5.98 mm, which shows that the proposed method has relatively good performance considering that it uses a commercial RGB camera on a mobile device. Furthermore, the transferred 3D keypoints were decreased to about 73.6%.}, } @article {pmid36366095, year = {2022}, author = {Gonzalez-Compean, JL and Sosa-Sosa, VJ and Garcia-Hernandez, JJ and Galeana-Zapien, H and Reyes-Anastacio, HG}, title = {A Blockchain and Fingerprinting Traceability Method for Digital Product Lifecycle Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366095}, issn = {1424-8220}, support = {41756//Consejo Nacional de Ciencia y Tecnología/ ; }, mesh = {*Blockchain ; Computer Security ; *Internet of Things ; Cloud Computing ; Technology ; }, abstract = {The rise of digitalization, sensory devices, cloud computing and internet of things (IoT) technologies enables the design of novel digital product lifecycle management (DPLM) applications for use cases such as manufacturing and delivery of digital products. The verification of the accomplishment/violations of agreements defined in digital contracts is a key task in digital business transactions. However, this verification represents a challenge when validating both the integrity of digital product content and the transactions performed during multiple stages of the DPLM. This paper presents a traceability method for DPLM based on the integration of online and offline verification mechanisms based on blockchain and fingerprinting, respectively. A blockchain lifecycle registration model is used for organizations to register the exchange of digital products in the cloud with partners and/or consumers throughout the DPLM stages as well as to verify the accomplishment of agreements at each DPLM stage. The fingerprinting scheme is used for offline verification of digital product integrity and to register the DPLM logs within digital products, which is useful in either dispute or violation of agreements scenarios. We built a DPLM service prototype based on this method, which was implemented as a cloud computing service. A case study based on the DPLM of audios was conducted to evaluate this prototype. The experimental evaluation revealed the ability of this method to be applied to DPLM in real scenarios in an efficient manner.}, } @article {pmid36366082, year = {2022}, author = {Hijji, M and Ahmad, B and Alam, G and Alwakeel, A and Alwakeel, M and Abdulaziz Alharbi, L and Aljarf, A and Khan, MU}, title = {Cloud Servers: Resource Optimization Using Different Energy Saving Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366082}, issn = {1424-8220}, mesh = {*Cloud Computing ; Physical Phenomena ; *Workload ; }, abstract = {Currently, researchers are working to contribute to the emerging fields of cloud computing, edge computing, and distributed systems. The major area of interest is to examine and understand their performance. The major globally leading companies, such as Google, Amazon, ONLIVE, Giaki, and eBay, are truly concerned about the impact of energy consumption. These cloud computing companies use huge data centers, consisting of virtual computers that are positioned worldwide and necessitate exceptionally high-power costs to preserve. The increased requirement for energy consumption in IT firms has posed many challenges for cloud computing companies pertinent to power expenses. Energy utilization is reliant upon numerous aspects, for example, the service level agreement, techniques for choosing the virtual machine, the applied optimization strategies and policies, and kinds of workload. The present paper tries to provide an answer to challenges related to energy-saving through the assistance of both dynamic voltage and frequency scaling techniques for gaming data centers. Also, to evaluate both the dynamic voltage and frequency scaling techniques compared to non-power-aware and static threshold detection techniques. The findings will facilitate service suppliers in how to encounter the quality of service and experience limitations by fulfilling the service level agreements. For this purpose, the CloudSim platform is applied for the application of a situation in which game traces are employed as a workload for analyzing the procedure. The findings evidenced that an assortment of good quality techniques can benefit gaming servers to conserve energy expenditures and sustain the best quality of service for consumers located universally. The originality of this research presents a prospect to examine which procedure performs good (for example, dynamic, static, or non-power aware). The findings validate that less energy is utilized by applying a dynamic voltage and frequency method along with fewer service level agreement violations, and better quality of service and experience, in contrast with static threshold consolidation or non-power aware technique.}, } @article {pmid36366068, year = {2022}, author = {Baca, A and Dabnichki, P and Hu, CW and Kornfeind, P and Exel, J}, title = {Ubiquitous Computing in Sports and Physical Activity-Recent Trends and Developments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366068}, issn = {1424-8220}, mesh = {Humans ; Artificial Intelligence ; *Sports ; *Wearable Electronic Devices ; Exercise ; Athletes ; }, abstract = {The use of small, interconnected and intelligent tools within the broad framework of pervasive computing for analysis and assessments in sport and physical activity is not a trend in itself but defines a way for information to be handled, processed and utilised: everywhere, at any time. The demand for objective data to support decision making prompted the adoption of wearables that evolve to fulfil the aims of assessing athletes and practitioners as closely as possible with their performance environments. In the present paper, we mention and discuss the advancements in ubiquitous computing in sports and physical activity in the past 5 years. Thus, recent developments in wearable sensors, cloud computing and artificial intelligence tools have been the pillars for a major change in the ways sport-related analyses are performed. The focus of our analysis is wearable technology, computer vision solutions for markerless tracking and their major contribution to the process of acquiring more representative data from uninhibited actions in realistic ecological conditions. We selected relevant literature on the applications of such approaches in various areas of sports and physical activity while outlining some limitations of the present-day data acquisition and data processing practices and the resulting sensors' functionalities, as well as the limitations to the data-driven informed decision making in the current technological and scientific framework. Finally, we hypothesise that a continuous merger of measurement, processing and analysis will lead to the development of more reliable models utilising the advantages of open computing and unrestricted data access and allow for the development of personalised-medicine-type approaches to sport training and performance.}, } @article {pmid36366060, year = {2022}, author = {Niebla-Montero, Á and Froiz-Míguez, I and Fraga-Lamas, P and Fernández-Caramés, TM}, title = {Practical Latency Analysis of a Bluetooth 5 Decentralized IoT Opportunistic Edge Computing System for Low-Cost SBCs.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366060}, issn = {1424-8220}, support = {PID2020-118857RA (ORBALLO)//MCIN/AEI/10.13039/50110001103/ ; ED431C 2020/15 and ED431G 2019/01//Xunta de Galicia and ERDF/ ; }, abstract = {IoT devices can be deployed almost anywhere, but they usually need to be connected to other IoT devices, either through the Internet or local area networks. For such communications, many IoT devices make use of wireless communications, whose coverage is key: if no coverage is available, an IoT device becomes isolated. This can happen both indoors (e.g., large buildings, industrial warehouses) or outdoors (e.g., rural areas, cities). To tackle such an issue, opportunistic networks can be useful, since they use gateways to provide services to IoT devices when they are in range (i.e., IoT devices take the opportunity of having a nearby gateway to exchange data or to use a computing service). Moreover, opportunistic networks can provide Edge Computing capabilities, thus creating Opportunistic Edge Computing (OEC) systems, which deploy smart gateways able to perform certain tasks faster than a remote Cloud. This article presents a novel decentralized OEC system based on Bluetooth 5 IoT nodes whose latency is evaluated to determine the feasibility of using it in practical applications. The obtained results indicate that, for the selected scenario, the average end-to-end latency is relatively low (736 ms), but it is impacted by factors such as the location of the bootstrap node, the smart gateway hardware or the use of high-security mechanisms.}, } @article {pmid36366028, year = {2022}, author = {Lo, SC and Tsai, HH}, title = {Design of 3D Virtual Reality in the Metaverse for Environmental Conservation Education Based on Cognitive Theory.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36366028}, issn = {1424-8220}, support = {MOST-109-2511-H-150-004//National Science and Technology Council, Taiwan/ ; SWCB-110-049//Soil and Water Conservation Bureau, Taiwan/ ; SWCB-111-052//Soil and Water Conservation Bureau, Taiwan/ ; }, mesh = {Humans ; *Virtual Reality ; *Computer-Assisted Instruction/methods ; Learning ; Cognition ; }, abstract = {BACKGROUND: Climate change causes devastating impacts with extreme weather conditions, such as flooding, polar ice caps melting, sea level rise, and droughts. Environmental conservation education is an important and ongoing project nowadays for all governments in the world. In this paper, a novel 3D virtual reality architecture in the metaverse (VRAM) is proposed to foster water resources education using modern information technology.

METHODS: A quasi-experimental study was performed to observe a comparison between learning involving VRAM and learning without VRAM. The 3D VRAM multimedia content comes from a picture book for learning environmental conservation concepts, based on the cognitive theory of multimedia learning to enhance human cognition. Learners wear VRAM helmets to run VRAM Android apps by entering the immersive environment for playing and/or interacting with 3D VRAM multimedia content in the metaverse. They shake their head to move the interaction sign to initiate interactive actions, such as replaying, going to consecutive video clips, displaying text annotations, and replying to questions when learning soil-and-water conservation course materials. Interactive portfolios of triggering actions are transferred to the cloud computing database immediately by the app.

RESULTS: Experimental results showed that participants who received instruction involving VRAM had significant improvement in their flow experience, learning motivation, learning interaction, self-efficacy, and presence in learning environmental conservation concepts.

CONCLUSIONS: The novel VRAM is highly suitable for multimedia educational systems. Moreover, learners' interactive VRAM portfolios can be analyzed by big-data analytics to understand behaviors for using VRAM in the future to improve the quality of environmental conservation education.}, } @article {pmid36365971, year = {2022}, author = {Na, D and Park, S}, title = {IoT-Chain and Monitoring-Chain Using Multilevel Blockchain for IoT Security.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365971}, issn = {1424-8220}, support = {Keimyung University 2021//Keimyung University/ ; }, abstract = {In general, the Internet of Things (IoT) relies on centralized servers due to limited computing power and storage capacity. These server-based architectures have vulnerabilities such as DDoS attacks, single-point errors, and data forgery, and cannot guarantee stability and reliability. Blockchain technology can guarantee reliability and stability with a P2P network-based consensus algorithm and distributed ledger technology. However, it requires the high storage capacity of the existing blockchain and the computational power of the consensus algorithm. Therefore, blockchain nodes for IoT data management are maintained through an external cloud, an edge node. As a result, the vulnerability of the existing centralized structure cannot be guaranteed, and reliability cannot be guaranteed in the process of storing IoT data on the blockchain. In this paper, we propose a multi-level blockchain structure and consensus algorithm to solve the vulnerability. A multi-level blockchain operates on IoT devices, and there is an IoT chain layer that stores sensor data to ensure reliability. In addition, there is a hyperledger fabric-based monitoring chain layer that operates the access control for the metadata and data of the IoT chain to lighten the weight. We propose an export consensus method between the two blockchains, the Schnorr signature method, and a random-based lightweight consensus algorithm within the IoT-Chain. Experiments to measure the blockchain size, propagation time, consensus delay time, and transactions per second (TPS) were conducted using IoT. The blockchain did not exceed a certain size, and the delay time was reduced by 96% to 99% on average compared to the existing consensus algorithm. In the throughput tests, the maximum was 1701 TPS and the minimum was 1024 TPS.}, } @article {pmid36365871, year = {2022}, author = {Kaur, A and Singh, G and Kukreja, V and Sharma, S and Singh, S and Yoon, B}, title = {Adaptation of IoT with Blockchain in Food Supply Chain Management: An Analysis-Based Review in Development, Benefits and Potential Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365871}, issn = {1424-8220}, support = {21163MFDS502//Ministry of Food and Drug Safety/ ; }, mesh = {*Blockchain ; *Internet of Things ; Food Supply ; Monitoring, Physiologic ; Technology ; }, abstract = {In today's scenario, blockchain technology is an emerging area and promising technology in the field of the food supply chain industry (FSCI). A literature survey comprising an analytical review of blockchain technology with the Internet of things (IoT) for food supply chain management (FSCM) is presented to better understand the associated research benefits, issues, and challenges. At present, with the concept of farm-to-fork gaining increasing popularity, food safety and quality certification are of critical concern. Blockchain technology provides the traceability of food supply from the source, i.e., the seeding factories, to the customer's table. The main idea of this paper is to identify blockchain technology with the Internet of things (IoT) devices to investigate the food conditions and various issues faced by transporters while supplying fresh food. Blockchain provides applications such as smart contracts to monitor, observe, and manage all transactions and communications among stakeholders. IoT technology provides approaches for verifying all transactions; these transactions are recorded and then stored in a centralized database system. Thus, IoT enables a safe and cost-effective FSCM system for stakeholders. In this paper, we contribute to the awareness of blockchain applications that are relevant to the food supply chain (FSC), and we present an analysis of the literature on relevant blockchain applications which has been conducted concerning various parameters. The observations in the present survey are also relevant to the application of blockchain technology with IoT in other areas.}, } @article {pmid36365848, year = {2022}, author = {Shamshad, S and Riaz, F and Riaz, R and Rizvi, SS and Abdulla, S}, title = {An Enhanced Architecture to Resolve Public-Key Cryptographic Issues in the Internet of Things (IoT), Employing Quantum Computing Supremacy.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {21}, pages = {}, pmid = {36365848}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) strongly influences the world economy; this emphasizes the importance of securing all four aspects of the IoT model: sensors, networks, cloud, and applications. Considering the significant value of public-key cryptography threats on IoT system confidentiality, it is vital to secure it. One of the potential candidates to assist in securing public key cryptography in IoT is quantum computing. Although the notion of IoT and quantum computing convergence is not new, it has been referenced in various works of literature and covered by many scholars. Quantum computing eliminates most of the challenges in IoT. This research provides a comprehensive introduction to the Internet of Things and quantum computing before moving on to public-key cryptography difficulties that may be encountered across the convergence of quantum computing and IoT. An enhanced architecture is then proposed for resolving these public-key cryptography challenges using SimuloQron to implement the BB84 protocol for quantum key distribution (QKD) and one-time pad (OTP). The proposed model prevents eavesdroppers from performing destructive operations in the communication channel and cyber side by preserving its state and protecting the public key using quantum cryptography and the BB84 protocol. A modified version is introduced for this IoT situation. A traditional cryptographic mechanism called "one-time pad" (OTP) is employed in hybrid management.}, } @article {pmid36357557, year = {2022}, author = {Tuli, S and Casale, G and Jennings, NR}, title = {SimTune: bridging the simulator reality gap for resource management in edge-cloud computing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {19158}, pmid = {36357557}, issn = {2045-2322}, mesh = {Humans ; *Cloud Computing ; Computer Simulation ; }, abstract = {Industries and services are undergoing an Internet of Things centric transformation globally, giving rise to an explosion of multi-modal data generated each second. This, with the requirement of low-latency result delivery, has led to the ubiquitous adoption of edge and cloud computing paradigms. Edge computing follows the data gravity principle, wherein the computational devices move closer to the end-users to minimize data transfer and communication times. However, large-scale computation has exacerbated the problem of efficient resource management in hybrid edge-cloud platforms. In this regard, data-driven models such as deep neural networks (DNNs) have gained popularity to give rise to the notion of edge intelligence. However, DNNs face significant problems of data saturation when fed volatile data. Data saturation is when providing more data does not translate to improvements in performance. To address this issue, prior work has leveraged coupled simulators that, akin to digital twins, generate out-of-distribution training data alleviating the data-saturation problem. However, simulators face the reality-gap problem, which is the inaccuracy in the emulation of real computational infrastructure due to the abstractions in such simulators. To combat this, we develop a framework, SimTune, that tackles this challenge by leveraging a low-fidelity surrogate model of the high-fidelity simulator to update the parameters of the latter, so to increase the simulation accuracy. This further helps co-simulated methods to generalize to edge-cloud configurations for which human encoded parameters are not known apriori. Experiments comparing SimTune against state-of-the-art data-driven resource management solutions on a real edge-cloud platform demonstrate that simulator tuning can improve quality of service metrics such as energy consumption and response time by up to 14.7% and 7.6% respectively.}, } @article {pmid36351936, year = {2022}, author = {Benhammou, Y and Alcaraz-Segura, D and Guirado, E and Khaldi, R and Achchab, B and Herrera, F and Tabik, S}, title = {Sentinel2GlobalLULC: A Sentinel-2 RGB image tile dataset for global land use/cover mapping with deep learning.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {681}, pmid = {36351936}, issn = {2052-4463}, abstract = {Land-Use and Land-Cover (LULC) mapping is relevant for many applications, from Earth system and climate modelling to territorial and urban planning. Global LULC products are continuously developing as remote sensing data and methods grow. However, there still exists low consistency among LULC products due to low accuracy in some regions and LULC types. Here, we introduce Sentinel2GlobalLULC, a Sentinel-2 RGB image dataset, built from the spatial-temporal consensus of up to 15 global LULC maps available in Google Earth Engine. Sentinel2GlobalLULC v2.1 contains 194877 single-class RGB image tiles organized into 29 LULC classes. Each image is a 224 × 224 pixels tile at 10 × 10 m resolution built as a cloud-free composite from Sentinel-2 images acquired between June 2015 and October 2020. Metadata includes a unique LULC annotation per image, together with level of consensus, reverse geo-referencing, global human modification index, and number of dates used in the composite. Sentinel2GlobalLULC is designed for training deep learning models aiming to build precise and robust global or regional LULC maps.}, } @article {pmid36350854, year = {2023}, author = {Zhang, X and Han, L and Sobeih, T and Han, L and Dempsey, N and Lechareas, S and Tridente, A and Chen, H and White, S and Zhang, D}, title = {CXR-Net: A Multitask Deep Learning Network for Explainable and Accurate Diagnosis of COVID-19 Pneumonia From Chest X-Ray Images.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {2}, pages = {980-991}, doi = {10.1109/JBHI.2022.3220813}, pmid = {36350854}, issn = {2168-2208}, mesh = {Humans ; *COVID-19/diagnostic imaging ; *Deep Learning ; X-Rays ; Thorax/diagnostic imaging ; *Pneumonia, Viral/diagnostic imaging ; COVID-19 Testing ; }, abstract = {Accurate and rapid detection of COVID-19 pneumonia is crucial for optimal patient treatment. Chest X-Ray (CXR) is the first-line imaging technique for COVID-19 pneumonia diagnosis as it is fast, cheap and easily accessible. Currently, many deep learning (DL) models have been proposed to detect COVID-19 pneumonia from CXR images. Unfortunately, these deep classifiers lack the transparency in interpreting findings, which may limit their applications in clinical practice. The existing explanation methods produce either too noisy or imprecise results, and hence are unsuitable for diagnostic purposes. In this work, we propose a novel explainable CXR deep neural Network (CXR-Net) for accurate COVID-19 pneumonia detection with an enhanced pixel-level visual explanation using CXR images. An Encoder-Decoder-Encoder architecture is proposed, in which an extra encoder is added after the encoder-decoder structure to ensure the model can be trained on category samples. The method has been evaluated on real world CXR datasets from both public and private sources, including healthy, bacterial pneumonia, viral pneumonia and COVID-19 pneumonia cases. The results demonstrate that the proposed method can achieve a satisfactory accuracy and provide fine-resolution activation maps for visual explanation in the lung disease detection. Compared to current state-of-the-art visual explanation methods, the proposed method can provide more detailed, high-resolution, visual explanation for the classification results. It can be deployed in various computing environments, including cloud, CPU and GPU environments. It has a great potential to be used in clinical practice for COVID-19 pneumonia diagnosis.}, } @article {pmid36335750, year = {2022}, author = {Tomassini, S and Sbrollini, A and Covella, G and Sernani, P and Falcionelli, N and Müller, H and Morettini, M and Burattini, L and Dragoni, AF}, title = {Brain-on-Cloud for automatic diagnosis of Alzheimer's disease from 3D structural magnetic resonance whole-brain scans.}, journal = {Computer methods and programs in biomedicine}, volume = {227}, number = {}, pages = {107191}, doi = {10.1016/j.cmpb.2022.107191}, pmid = {36335750}, issn = {1872-7565}, mesh = {Humans ; *Alzheimer Disease/diagnostic imaging/pathology ; Quality of Life ; Neuroimaging/methods ; Magnetic Resonance Imaging/methods ; Brain/diagnostic imaging/pathology ; Magnetic Resonance Spectroscopy ; *Cognitive Dysfunction ; }, abstract = {BACKGROUND AND OBJECTIVE: Alzheimer's disease accounts for approximately 70% of all dementia cases. Cortical and hippocampal atrophy caused by Alzheimer's disease can be appreciated easily from a T1-weighted structural magnetic resonance scan. Since a timely therapeutic intervention during the initial stages of the syndrome has a positive impact on both disease progression and quality of life of affected subjects, Alzheimer's disease diagnosis is crucial. Thus, this study relies on the development of a robust yet lightweight 3D framework, Brain-on-Cloud, dedicated to efficient learning of Alzheimer's disease-related features from 3D structural magnetic resonance whole-brain scans by improving our recent convolutional long short-term memory-based framework with the integration of a set of data handling techniques in addition to the tuning of the model hyper-parameters and the evaluation of its diagnostic performance on independent test data.

METHODS: For this objective, four serial experiments were conducted on a scalable GPU cloud service. They were compared and the hyper-parameters of the best experiment were tuned until reaching the best-performing configuration. In parallel, two branches were designed. In the first branch of Brain-on-Cloud, training, validation and testing were performed on OASIS-3. In the second branch, unenhanced data from ADNI-2 were employed as independent test set, and the diagnostic performance of Brain-on-Cloud was evaluated to prove its robustness and generalization capability. The prediction scores were computed for each subject and stratified according to age, sex and mini mental state examination.

RESULTS: In its best guise, Brain-on-Cloud is able to discriminate Alzheimer's disease with an accuracy of 92% and 76%, sensitivity of 94% and 82%, and area under the curve of 96% and 92% on OASIS-3 and independent ADNI-2 test data, respectively.

CONCLUSIONS: Brain-on-Cloud shows to be a reliable, lightweight and easily-reproducible framework for automatic diagnosis of Alzheimer's disease from 3D structural magnetic resonance whole-brain scans, performing well without segmenting the brain into its portions. Preserving the brain anatomy, its application and diagnostic ability can be extended to other cognitive disorders. Due to its cloud nature, computational lightness and fast execution, it can also be applied in real-time diagnostic scenarios providing prompt clinical decision support.}, } @article {pmid36321417, year = {2022}, author = {Golkar, A and Malekhosseini, R and RahimiZadeh, K and Yazdani, A and Beheshti, A}, title = {A priority queue-based telemonitoring system for automatic diagnosis of heart diseases in integrated fog computing environments.}, journal = {Health informatics journal}, volume = {28}, number = {4}, pages = {14604582221137453}, doi = {10.1177/14604582221137453}, pmid = {36321417}, issn = {1741-2811}, mesh = {Humans ; *Cloud Computing ; Delivery of Health Care ; *Heart Diseases ; }, abstract = {Various studies have shown the benefits of using distributed fog computing for healthcare systems. The new pattern of fog and edge computing reduces latency for data processing compared to cloud computing. Nevertheless, the proposed fog models still have many limitations in improving system performance and patients' response time.This paper, proposes a new performance model by integrating fog computing, priority queues and certainty theory into the Edge computing devices and validating it by analyzing heart disease patients' conditions in clinical decision support systems (CDSS). In this model, a Certainty Factor (CF) value is assigned to each symptom of heart disease. When one or more symptoms show an abnormal value, the patient's condition will be evaluated using CF values in the fog layer. In the fog layer, requests are categorized in different priority queues before arriving into the system. The results demonstrate that network usage, latency, and response time of patients' requests are respectively improved by 25.55%, 42.92%, and 34.28% compared to the cloud model. Prioritizing patient requests with respect to CF values in the CDSS provides higher system Quality of Service (QoS) and patients' response time.}, } @article {pmid36318260, year = {2023}, author = {Ament, SA and Adkins, RS and Carter, R and Chrysostomou, E and Colantuoni, C and Crabtree, J and Creasy, HH and Degatano, K and Felix, V and Gandt, P and Garden, GA and Giglio, M and Herb, BR and Khajouei, F and Kiernan, E and McCracken, C and McDaniel, K and Nadendla, S and Nickel, L and Olley, D and Orvis, J and Receveur, JP and Schor, M and Sonthalia, S and Tickle, TL and Way, J and Hertzano, R and Mahurkar, AA and White, OR}, title = {The Neuroscience Multi-Omic Archive: a BRAIN Initiative resource for single-cell transcriptomic and epigenomic data from the mammalian brain.}, journal = {Nucleic acids research}, volume = {51}, number = {D1}, pages = {D1075-D1085}, pmid = {36318260}, issn = {1362-4962}, support = {R01 DC019370/DC/NIDCD NIH HHS/United States ; R24 MH114788/MH/NIMH NIH HHS/United States ; R24 MH114815/MH/NIMH NIH HHS/United States ; UM1 DA052244/DA/NIDA NIH HHS/United States ; }, mesh = {Animals ; Mice ; *Epigenomics ; Genomics ; Mammals ; *Multiomics ; Primates ; *Transcriptome ; *Brain/cytology/metabolism ; *Databases, Genetic ; }, abstract = {Scalable technologies to sequence the transcriptomes and epigenomes of single cells are transforming our understanding of cell types and cell states. The Brain Research through Advancing Innovative Neurotechnologies (BRAIN) Initiative Cell Census Network (BICCN) is applying these technologies at unprecedented scale to map the cell types in the mammalian brain. In an effort to increase data FAIRness (Findable, Accessible, Interoperable, Reusable), the NIH has established repositories to make data generated by the BICCN and related BRAIN Initiative projects accessible to the broader research community. Here, we describe the Neuroscience Multi-Omic Archive (NeMO Archive; nemoarchive.org), which serves as the primary repository for genomics data from the BRAIN Initiative. Working closely with other BRAIN Initiative researchers, we have organized these data into a continually expanding, curated repository, which contains transcriptomic and epigenomic data from over 50 million brain cells, including single-cell genomic data from all of the major regions of the adult and prenatal human and mouse brains, as well as substantial single-cell genomic data from non-human primates. We make available several tools for accessing these data, including a searchable web portal, a cloud-computing interface for large-scale data processing (implemented on Terra, terra.bio), and a visualization and analysis platform, NeMO Analytics (nemoanalytics.org).}, } @article {pmid36316488, year = {2022}, author = {Prakash, AJ and Kumar, S and Behera, MD and Das, P and Kumar, A and Srivastava, PK}, title = {Impact of extreme weather events on cropland inundation over Indian subcontinent.}, journal = {Environmental monitoring and assessment}, volume = {195}, number = {1}, pages = {50}, pmid = {36316488}, issn = {1573-2959}, mesh = {*Extreme Weather ; Environmental Monitoring/methods ; Floods ; Crops, Agricultural ; Water ; Weather ; }, abstract = {Cyclonic storms and extreme precipitation lead to loss of lives and significant damage to land and property, crop productivity, etc. The "Gulab" cyclonic storm formed on the 24[th] of September 2021 in the Bay of Bengal (BoB), hit the eastern Indian coasts on the 26[th] of September and caused massive damage and water inundation. This study used Integrated Multi-satellite Retrievals for GPM (IMERG) satellite precipitation data for daily to monthly scale assessments focusing on the "Gulab" cyclonic event. The Otsu's thresholding approach was applied to Sentinel-1 data to map water inundation. Standardized Precipitation Index (SPI) was employed to analyze the precipitation deviation compared to the 20 years mean climatology across India from June to November 2021 on a monthly scale. The water-inundated areas were overlaid on a recent publicly available high-resolution land use land cover (LULC) map to demarcate crop area damage in four eastern Indian states such as Andhra Pradesh, Chhattisgarh, Odisha, and Telangana. The maximum water inundation and crop area damages were observed in Andhra Pradesh (~2700 km[2]), followed by Telangana (~2040 km[2]) and Odisha (~1132 km[2]), and the least in Chhattisgarh (~93.75 km[2]). This study has potential implications for an emergency response to extreme weather events, such as cyclones, extreme precipitation, and flood. The spatio-temporal data layers and rapid assessment methodology can be helpful to various users such as disaster management authorities, mitigation and response teams, and crop insurance scheme development. The relevant satellite data, products, and cloud-computing facility could operationalize systematic disaster monitoring under the rising threats of extreme weather events in the coming years.}, } @article {pmid36316226, year = {2022}, author = {Khosla, A and Sonu, and Awan, HTA and Singh, K and Gaurav, and Walvekar, R and Zhao, Z and Kaushik, A and Khalid, M and Chaudhary, V}, title = {Emergence of MXene and MXene-Polymer Hybrid Membranes as Future- Environmental Remediation Strategies.}, journal = {Advanced science (Weinheim, Baden-Wurttemberg, Germany)}, volume = {9}, number = {36}, pages = {e2203527}, pmid = {36316226}, issn = {2198-3844}, support = {STR-IRNGS-SET-GAMRG-01-2022//Sunway University/ ; }, mesh = {*Artificial Intelligence ; *Environmental Restoration and Remediation ; Machine Learning ; Polymers ; }, abstract = {The continuous deterioration of the environment due to extensive industrialization and urbanization has raised the requirement to devise high-performance environmental remediation technologies. Membrane technologies, primarily based on conventional polymers, are the most commercialized air, water, solid, and radiation-based environmental remediation strategies. Low stability at high temperatures, swelling in organic contaminants, and poor selectivity are the fundamental issues associated with polymeric membranes restricting their scalable viability. Polymer-metal-carbides and nitrides (MXenes) hybrid membranes possess remarkable physicochemical attributes, including strong mechanical endurance, high mechanical flexibility, superior adsorptive behavior, and selective permeability, due to multi-interactions between polymers and MXene's surface functionalities. This review articulates the state-of-the-art MXene-polymer hybrid membranes, emphasizing its fabrication routes, enhanced physicochemical properties, and improved adsorptive behavior. It comprehensively summarizes the utilization of MXene-polymer hybrid membranes for environmental remediation applications, including water purification, desalination, ion-separation, gas separation and detection, containment adsorption, and electromagnetic and nuclear radiation shielding. Furthermore, the review highlights the associated bottlenecks of MXene-Polymer hybrid-membranes and its possible alternate solutions to meet industrial requirements. Discussed are opportunities and prospects related to MXene-polymer membrane to devise intelligent and next-generation environmental remediation strategies with the integration of modern age technologies of internet-of-things, artificial intelligence, machine-learning, 5G-communication and cloud-computing are elucidated.}, } @article {pmid36304269, year = {2022}, author = {Raveendran, K and Freese, NH and Kintali, C and Tiwari, S and Bole, P and Dias, C and Loraine, AE}, title = {BioViz Connect: Web Application Linking CyVerse Cloud Resources to Genomic Visualization in the Integrated Genome Browser.}, journal = {Frontiers in bioinformatics}, volume = {2}, number = {}, pages = {764619}, pmid = {36304269}, issn = {2673-7647}, support = {R01 GM121927/GM/NIGMS NIH HHS/United States ; R35 GM139609/GM/NIGMS NIH HHS/United States ; }, abstract = {Genomics researchers do better work when they can interactively explore and visualize data. Due to the vast size of experimental datasets, researchers are increasingly using powerful, cloud-based systems to process and analyze data. These remote systems, called science gateways, offer user-friendly, Web-based access to high performance computing and storage resources, but typically lack interactive visualization capability. In this paper, we present BioViz Connect, a middleware Web application that links CyVerse science gateway resources to the Integrated Genome Browser (IGB), a highly interactive native application implemented in Java that runs on the user's personal computer. Using BioViz Connect, users can 1) stream data from the CyVerse data store into IGB for visualization, 2) improve the IGB user experience for themselves and others by adding IGB specific metadata to CyVerse data files, including genome version and track appearance, and 3) run compute-intensive visual analytics functions on CyVerse infrastructure to create new datasets for visualization in IGB or other applications. To demonstrate how BioViz Connect facilitates interactive data visualization, we describe an example RNA-Seq data analysis investigating how heat and desiccation stresses affect gene expression in the model plant Arabidopsis thaliana. The RNA-Seq use case illustrates how interactive visualization with IGB can help a user identify problematic experimental samples, sanity-check results using a positive control, and create new data files for interactive visualization in IGB (or other tools) using a Docker image deployed to CyVerse via the Terrain API. Lastly, we discuss limitations of the technologies used and suggest opportunities for future work. BioViz Connect is available from https://bioviz.org.}, } @article {pmid36303792, year = {2021}, author = {Guérinot, C and Marcon, V and Godard, C and Blanc, T and Verdier, H and Planchon, G and Raimondi, F and Boddaert, N and Alonso, M and Sailor, K and Lledo, PM and Hajj, B and El Beheiry, M and Masson, JB}, title = {New Approach to Accelerated Image Annotation by Leveraging Virtual Reality and Cloud Computing.}, journal = {Frontiers in bioinformatics}, volume = {1}, number = {}, pages = {777101}, pmid = {36303792}, issn = {2673-7647}, abstract = {Three-dimensional imaging is at the core of medical imaging and is becoming a standard in biological research. As a result, there is an increasing need to visualize, analyze and interact with data in a natural three-dimensional context. By combining stereoscopy and motion tracking, commercial virtual reality (VR) headsets provide a solution to this critical visualization challenge by allowing users to view volumetric image stacks in a highly intuitive fashion. While optimizing the visualization and interaction process in VR remains an active topic, one of the most pressing issue is how to utilize VR for annotation and analysis of data. Annotating data is often a required step for training machine learning algorithms. For example, enhancing the ability to annotate complex three-dimensional data in biological research as newly acquired data may come in limited quantities. Similarly, medical data annotation is often time-consuming and requires expert knowledge to identify structures of interest correctly. Moreover, simultaneous data analysis and visualization in VR is computationally demanding. Here, we introduce a new procedure to visualize, interact, annotate and analyze data by combining VR with cloud computing. VR is leveraged to provide natural interactions with volumetric representations of experimental imaging data. In parallel, cloud computing performs costly computations to accelerate the data annotation with minimal input required from the user. We demonstrate multiple proof-of-concept applications of our approach on volumetric fluorescent microscopy images of mouse neurons and tumor or organ annotations in medical images.}, } @article {pmid36301785, year = {2023}, author = {Reani, Y and Bobrowski, O}, title = {Cycle Registration in Persistent Homology With Applications in Topological Bootstrap.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {45}, number = {5}, pages = {5579-5593}, doi = {10.1109/TPAMI.2022.3217443}, pmid = {36301785}, issn = {1939-3539}, abstract = {We propose a novel approach for comparing the persistent homology representations of two spaces (or filtrations). Commonly used methods are based on numerical summaries such as persistence diagrams and persistence landscapes, along with suitable metrics (e.g., Wasserstein). These summaries are useful for computational purposes, but they are merely a marginal of the actual topological information that persistent homology can provide. Instead, our approach compares between two topological representations directly in the data space. We do so by defining a correspondence relation between individual persistent cycles of two different spaces, and devising a method for computing this correspondence. Our matching of cycles is based on both the persistence intervals and the spatial placement of each feature. We demonstrate our new framework in the context of topological inference, where we use statistical bootstrap methods in order to differentiate between real features and noise in point cloud data.}, } @article {pmid36299750, year = {2022}, author = {Li, X and You, K}, title = {Real-time tracking and detection of patient conditions in the intelligent m-Health monitoring system.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {922718}, pmid = {36299750}, issn = {2296-2565}, mesh = {Humans ; *Telemedicine ; }, abstract = {In order to help patients monitor their personal health in real time, this paper proposes an intelligent mobile health monitoring system and establishes a corresponding health network to track and process patients' physical activity and other health-related factors in real time. Performance was analyzed. The experimental results show that after comparing the accuracy, delay time, error range, efficiency, and energy utilization of Im-HMS and existing UCD systems, it is found that the accuracy of Im-HMS is mostly between 98 and 100%, while the accuracy of UCD is mostly between 98 and 100%. Most of the systems are between 91 and 97%; in terms of delay comparison, the delay of the Im-HMS system is between 18 and 39 ms, which is far lower than the lowest value of the UCD system of 84 ms, and the Im-HMS is significantly better than the existing UCD system; the error range of Im-HMS is mainly between 0.2 and 1.4, while the error range of UCD system is mainly between -2 and 14; and in terms of efficiency and energy utilization, Im-HMS values are higher than those of UCD system. In general, the Im-HMS system proposed in this study is more accurate than UCD system and has lower delay, smaller error, and higher efficiency, and energy utilization is more efficient than UCD system, which is of great significance for mobile health monitoring in practical applications.}, } @article {pmid36299577, year = {2022}, author = {Yu, L and Yu, PS and Duan, Y and Qiao, H}, title = {A resource scheduling method for reliable and trusted distributed composite services in cloud environment based on deep reinforcement learning.}, journal = {Frontiers in genetics}, volume = {13}, number = {}, pages = {964784}, pmid = {36299577}, issn = {1664-8021}, abstract = {With the vigorous development of Internet technology, applications are increasingly migrating to the cloud. Cloud, a distributed network environment, has been widely extended to many fields such as digital finance, supply chain management, and biomedicine. In order to meet the needs of the rapid development of the modern biomedical industry, the biological cloud platform is an inevitable choice for the integration and analysis of medical information. It improves the work efficiency of the biological information system and also realizes reliable and credible intelligent processing of biological resources. Cloud services in bioinformatics are mainly for the processing of biological data, such as the analysis and processing of genes, the testing and detection of human tissues and organs, and the storage and transportation of vaccines. Biomedical companies form a data chain on the cloud, and they provide services and transfer data to each other to create composite services. Therefore, our motivation is to improve process efficiency of biological cloud services. Users' business requirements have become complicated and diversified, which puts forward higher requirements for service scheduling strategies in cloud computing platforms. In addition, deep reinforcement learning shows strong perception and continuous decision-making capabilities in automatic control problems, which provides a new idea and method for solving the service scheduling and resource allocation problems in the cloud computing field. Therefore, this paper designs a composite service scheduling model under the containers instance mode which hybrids reservation and on-demand. The containers in the cluster are divided into two instance modes: reservation and on-demand. A composite service is described as a three-level structure: a composite service consists of multiple services, and a service consists of multiple service instances, where the service instance is the minimum scheduling unit. In addition, an improved Deep Q-Network (DQN) algorithm is proposed and applied to the scheduling algorithm of composite services. The experimental results show that applying our improved DQN algorithm to the composite services scheduling problem in the container cloud environment can effectively reduce the completion time of the composite services. Meanwhile, the method improves Quality of Service (QoS) and resource utilization in the container cloud environment.}, } @article {pmid36298902, year = {2022}, author = {Zhang, Y and Wu, Z and Lin, P and Pan, Y and Wu, Y and Zhang, L and Huangfu, J}, title = {Hand gestures recognition in videos taken with a lensless camera.}, journal = {Optics express}, volume = {30}, number = {22}, pages = {39520-39533}, doi = {10.1364/OE.470324}, pmid = {36298902}, issn = {1094-4087}, mesh = {*Gestures ; *Pattern Recognition, Automated/methods ; Algorithms ; Neural Networks, Computer ; }, abstract = {A lensless camera is an imaging system that uses a mask in place of a lens, making it thinner, lighter, and less expensive than a lensed camera. However, additional complex computation and time are required for image reconstruction. This work proposes a deep learning model named Raw3dNet that recognizes hand gestures directly on raw videos captured by a lensless camera without the need for image restoration. In addition to conserving computational resources, the reconstruction-free method provides privacy protection. Raw3dNet is a novel end-to-end deep neural network model for the recognition of hand gestures in lensless imaging systems. It is created specifically for raw video captured by a lensless camera and has the ability to properly extract and combine temporal and spatial features. The network is composed of two stages: 1. spatial feature extractor (SFE), which enhances the spatial features of each frame prior to temporal convolution; 2. 3D-ResNet, which implements spatial and temporal convolution of video streams. The proposed model achieves 98.59% accuracy on the Cambridge Hand Gesture dataset in the lensless optical experiment, which is comparable to the lensed-camera result. Additionally, the feasibility of physical object recognition is assessed. Further, we show that the recognition can be achieved with respectable accuracy using only a tiny portion of the original raw data, indicating the potential for reducing data traffic in cloud computing scenarios.}, } @article {pmid36298422, year = {2022}, author = {Amin, F and Abbasi, R and Mateen, A and Ali Abid, M and Khan, S}, title = {A Step toward Next-Generation Advancements in the Internet of Things Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298422}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) devices generate a large amount of data over networks; therefore, the efficiency, complexity, interfaces, dynamics, robustness, and interaction need to be re-examined on a large scale. This phenomenon will lead to seamless network connectivity and the capability to provide support for the IoT. The traditional IoT is not enough to provide support. Therefore, we designed this study to provide a systematic analysis of next-generation advancements in the IoT. We propose a systematic catalog that covers the most recent advances in the traditional IoT. An overview of the IoT from the perspectives of big data, data science, and network science disciplines and also connecting technologies is given. We highlight the conceptual view of the IoT, key concepts, growth, and most recent trends. We discuss and highlight the importance and the integration of big data, data science, and network science along with key applications such as artificial intelligence, machine learning, blockchain, federated learning, etc. Finally, we discuss various challenges and issues of IoT such as architecture, integration, data provenance, and important applications such as cloud and edge computing, etc. This article will provide aid to the readers and other researchers in an understanding of the IoT's next-generation developments and tell how they apply to the real world.}, } @article {pmid36298408, year = {2022}, author = {Farag, MM}, title = {Matched Filter Interpretation of CNN Classifiers with Application to HAR.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298408}, issn = {1424-8220}, support = {GRANT1610//King Faisal University/ ; }, mesh = {Humans ; *Neural Networks, Computer ; *Human Activities ; Machine Learning ; Smartphone ; }, abstract = {Time series classification is an active research topic due to its wide range of applications and the proliferation of sensory data. Convolutional neural networks (CNNs) are ubiquitous in modern machine learning (ML) models. In this work, we present a matched filter (MF) interpretation of CNN classifiers accompanied by an experimental proof of concept using a carefully developed synthetic dataset. We exploit this interpretation to develop an MF CNN model for time series classification comprising a stack of a Conv1D layer followed by a GlobalMaxPooling layer acting as a typical MF for automated feature extraction and a fully connected layer with softmax activation for computing class probabilities. The presented interpretation enables developing superlight highly accurate classifier models that meet the tight requirements of edge inference. Edge inference is emerging research that addresses the latency, availability, privacy, and connectivity concerns of the commonly deployed cloud inference. The MF-based CNN model has been applied to the sensor-based human activity recognition (HAR) problem due to its significant importance in a broad range of applications. The UCI-HAR, WISDM-AR, and MotionSense datasets are used for model training and testing. The proposed classifier is tested and benchmarked on an android smartphone with average accuracy and F1 scores of 98% and 97%, respectively, which outperforms state-of-the-art HAR methods in terms of classification accuracy and run-time performance. The proposed model size is less than 150 KB, and the average inference time is less than 1 ms. The presented interpretation helps develop a better understanding of CNN operation and decision mechanisms. The proposed model is distinguished from related work by jointly featuring interpretability, high accuracy, and low computational cost, enabling its ready deployment on a wide set of mobile devices for a broad range of applications.}, } @article {pmid36298402, year = {2022}, author = {Munir, T and Akbar, MS and Ahmed, S and Sarfraz, A and Sarfraz, Z and Sarfraz, M and Felix, M and Cherrez-Ojeda, I}, title = {A Systematic Review of Internet of Things in Clinical Laboratories: Opportunities, Advantages, and Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298402}, issn = {1424-8220}, mesh = {*Internet of Things ; Computer Security ; Laboratories, Clinical ; Privacy ; Software ; }, abstract = {The Internet of Things (IoT) is the network of physical objects embedded with sensors, software, electronics, and online connectivity systems. This study explores the role of IoT in clinical laboratory processes; this systematic review was conducted adhering to the PRISMA Statement 2020 guidelines. We included IoT models and applications across preanalytical, analytical, and postanalytical laboratory processes. PubMed, Cochrane Central, CINAHL Plus, Scopus, IEEE, and A.C.M. Digital library were searched between August 2015 to August 2022; the data were tabulated. Cohen's coefficient of agreement was calculated to quantify inter-reviewer agreements; a total of 18 studies were included with Cohen's coefficient computed to be 0.91. The included studies were divided into three classifications based on availability, including preanalytical, analytical, and postanalytical. The majority (77.8%) of the studies were real-tested. Communication-based approaches were the most common (83.3%), followed by application-based approaches (44.4%) and sensor-based approaches (33.3%) among the included studies. Open issues and challenges across the included studies included scalability, costs and energy consumption, interoperability, privacy and security, and performance issues. In this study, we identified, classified, and evaluated IoT applicability in clinical laboratory systems. This study presents pertinent findings for IoT development across clinical laboratory systems, for which it is essential that more rigorous and efficient testing and studies be conducted in the future.}, } @article {pmid36298235, year = {2022}, author = {Velichko, A and Huyut, MT and Belyaev, M and Izotov, Y and Korzun, D}, title = {Machine Learning Sensors for Diagnosis of COVID-19 Disease Using Routine Blood Values for Internet of Things Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298235}, issn = {1424-8220}, support = {22-11-20040//Russian Science Foundation/ ; }, mesh = {Humans ; *Internet of Things ; *COVID-19/diagnosis ; Cholesterol, HDL ; Machine Learning ; Amylases ; Triglycerides ; }, abstract = {Healthcare digitalization requires effective applications of human sensors, when various parameters of the human body are instantly monitored in everyday life due to the Internet of Things (IoT). In particular, machine learning (ML) sensors for the prompt diagnosis of COVID-19 are an important option for IoT application in healthcare and ambient assisted living (AAL). Determining a COVID-19 infected status with various diagnostic tests and imaging results is costly and time-consuming. This study provides a fast, reliable and cost-effective alternative tool for the diagnosis of COVID-19 based on the routine blood values (RBVs) measured at admission. The dataset of the study consists of a total of 5296 patients with the same number of negative and positive COVID-19 test results and 51 routine blood values. In this study, 13 popular classifier machine learning models and the LogNNet neural network model were exanimated. The most successful classifier model in terms of time and accuracy in the detection of the disease was the histogram-based gradient boosting (HGB) (accuracy: 100%, time: 6.39 sec). The HGB classifier identified the 11 most important features (LDL, cholesterol, HDL-C, MCHC, triglyceride, amylase, UA, LDH, CK-MB, ALP and MCH) to detect the disease with 100% accuracy. In addition, the importance of single, double and triple combinations of these features in the diagnosis of the disease was discussed. We propose to use these 11 features and their binary combinations as important biomarkers for ML sensors in the diagnosis of the disease, supporting edge computing on Arduino and cloud IoT service.}, } @article {pmid36298158, year = {2022}, author = {Merone, M and Graziosi, A and Lapadula, V and Petrosino, L and d'Angelis, O and Vollero, L}, title = {A Practical Approach to the Analysis and Optimization of Neural Networks on Embedded Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298158}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; *Neural Networks, Computer ; Cloud Computing ; Algorithms ; Computers ; }, abstract = {The exponential increase in internet data poses several challenges to cloud systems and data centers, such as scalability, power overheads, network load, and data security. To overcome these limitations, research is focusing on the development of edge computing systems, i.e., based on a distributed computing model in which data processing occurs as close as possible to where the data are collected. Edge computing, indeed, mitigates the limitations of cloud computing, implementing artificial intelligence algorithms directly on the embedded devices enabling low latency responses without network overhead or high costs, and improving solution scalability. Today, the hardware improvements of the edge devices make them capable of performing, even if with some constraints, complex computations, such as those required by Deep Neural Networks. Nevertheless, to efficiently implement deep learning algorithms on devices with limited computing power, it is necessary to minimize the production time and to quickly identify, deploy, and, if necessary, optimize the best Neural Network solution. This study focuses on developing a universal method to identify and port the best Neural Network on an edge system, valid regardless of the device, Neural Network, and task typology. The method is based on three steps: a trade-off step to obtain the best Neural Network within different solutions under investigation; an optimization step to find the best configurations of parameters under different acceleration techniques; eventually, an explainability step using local interpretable model-agnostic explanations (LIME), which provides a global approach to quantify the goodness of the classifier decision criteria. We evaluated several MobileNets on the Fudan Shangai-Tech dataset to test the proposed approach.}, } @article {pmid36298065, year = {2022}, author = {Torrisi, F and Amato, E and Corradino, C and Mangiagli, S and Del Negro, C}, title = {Characterization of Volcanic Cloud Components Using Machine Learning Techniques and SEVIRI Infrared Images.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {20}, pages = {}, pmid = {36298065}, issn = {1424-8220}, support = {OB.FU. 0867.010//INGV/ ; }, mesh = {Humans ; Atmosphere ; Gases ; Machine Learning ; *Volcanic Eruptions ; }, abstract = {Volcanic explosive eruptions inject several different types of particles and gasses into the atmosphere, giving rise to the formation and propagation of volcanic clouds. These can pose a serious threat to the health of people living near an active volcano and cause damage to air traffic. Many efforts have been devoted to monitor and characterize volcanic clouds. Satellite infrared (IR) sensors have been shown to be well suitable for volcanic cloud monitoring tasks. Here, a machine learning (ML) approach was developed in Google Earth Engine (GEE) to detect a volcanic cloud and to classify its main components using satellite infrared images. We implemented a supervised support vector machine (SVM) algorithm to segment a combination of thermal infrared (TIR) bands acquired by the geostationary MSG-SEVIRI (Meteosat Second Generation-Spinning Enhanced Visible and Infrared Imager). This ML algorithm was applied to some of the paroxysmal explosive events that occurred at Mt. Etna between 2020 and 2022. We found that the ML approach using a combination of TIR bands from the geostationary satellite is very efficient, achieving an accuracy of 0.86, being able to properly detect, track and map automatically volcanic ash clouds in near real-time.}, } @article {pmid36294134, year = {2022}, author = {Li, Z}, title = {Forecasting Weekly Dengue Cases by Integrating Google Earth Engine-Based Risk Predictor Generation and Google Colab-Based Deep Learning Modeling in Fortaleza and the Federal District, Brazil.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {20}, pages = {}, pmid = {36294134}, issn = {1660-4601}, support = {42061134019//National Natural Science Foundation of China/ ; QYZDB-SSW-DQC005//Key Research Program of Frontier Sciences of the Chinese Academy of Sciences/ ; E0V00110YZ//Institute of Geographic Sciences and Natural Resources Research, Chinese Acad-emy of Sciences/ ; }, mesh = {Humans ; Brazil/epidemiology ; *Dengue/epidemiology ; *Deep Learning ; Artificial Intelligence ; Search Engine ; Forecasting ; }, abstract = {Efficient and accurate dengue risk prediction is an important basis for dengue prevention and control, which faces challenges, such as downloading and processing multi-source data to generate risk predictors and consuming significant time and computational resources to train and validate models locally. In this context, this study proposed a framework for dengue risk prediction by integrating big geospatial data cloud computing based on Google Earth Engine (GEE) platform and artificial intelligence modeling on the Google Colab platform. It enables defining the epidemiological calendar, delineating the predominant area of dengue transmission in cities, generating the data of risk predictors, and defining multi-date ahead prediction scenarios. We implemented the experiments based on weekly dengue cases during 2013-2020 in the Federal District and Fortaleza, Brazil to evaluate the performance of the proposed framework. Four predictors were considered, including total rainfall (Rsum), mean temperature (Tmean), mean relative humidity (RHmean), and mean normalized difference vegetation index (NDVImean). Three models (i.e., random forest (RF), long-short term memory (LSTM), and LSTM with attention mechanism (LSTM-ATT)), and two modeling scenarios (i.e., modeling with or without dengue cases) were set to implement 1- to 4-week ahead predictions. A total of 24 models were built, and the results showed in general that LSTM and LSTM-ATT models outperformed RF models; modeling could benefit from using historical dengue cases as one of the predictors, and it makes the predicted curve fluctuation more stable compared with that only using climate and environmental factors; attention mechanism could further improve the performance of LSTM models. This study provides implications for future dengue risk prediction in terms of the effectiveness of GEE-based big geospatial data processing for risk predictor generation and Google Colab-based risk modeling and presents the benefits of using historical dengue data as one of the input features and the attention mechanism for LSTM modeling.}, } @article {pmid36293656, year = {2022}, author = {Alenoghena, CO and Onumanyi, AJ and Ohize, HO and Adejo, AO and Oligbi, M and Ali, SI and Okoh, SA}, title = {eHealth: A Survey of Architectures, Developments in mHealth, Security Concerns and Solutions.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {20}, pages = {}, pmid = {36293656}, issn = {1660-4601}, support = {TETF/ES/DR\&D-CE/NRF2020/SET1/67/VOL.1.//Tertiary Education Trust Fund, Nigeria/ ; }, mesh = {Humans ; Pandemics ; *COVID-19/epidemiology ; *Telemedicine ; Technology ; }, abstract = {The ramifications of the COVID-19 pandemic have contributed in part to a recent upsurge in the study and development of eHealth systems. Although it is almost impossible to cover all aspects of eHealth in a single discussion, three critical areas have gained traction. These include the need for acceptable eHealth architectures, the development of mobile health (mHealth) technologies, and the need to address eHealth system security concerns. Existing survey articles lack a synthesis of the most recent advancements in the development of architectures, mHealth solutions, and innovative security measures, which are essential components of effective eHealth systems. Consequently, the present article aims at providing an encompassing survey of these three aspects towards the development of successful and efficient eHealth systems. Firstly, we discuss the most recent innovations in eHealth architectures, such as blockchain-, Internet of Things (IoT)-, and cloud-based architectures, focusing on their respective benefits and drawbacks while also providing an overview of how they might be implemented and used. Concerning mHealth and security, we focus on key developments in both areas while discussing other critical topics of importance for eHealth systems. We close with a discussion of the important research challenges and potential future directions as they pertain to architecture, mHealth, and security concerns. This survey gives a comprehensive overview, including the merits and limitations of several possible technologies for the development of eHealth systems. This endeavor offers researchers and developers a quick snapshot of the information necessary during the design and decision-making phases of the eHealth system development lifecycle. Furthermore, we conclude that building a unified architecture for eHealth systems would require combining several existing designs. It also points out that there are still a number of problems to be solved, so more research and investment are needed to develop and deploy functional eHealth systems.}, } @article {pmid36280715, year = {2022}, author = {Schubert, PJ and Dorkenwald, S and Januszewski, M and Klimesch, J and Svara, F and Mancu, A and Ahmad, H and Fee, MS and Jain, V and Kornfeld, J}, title = {SyConn2: dense synaptic connectivity inference for volume electron microscopy.}, journal = {Nature methods}, volume = {19}, number = {11}, pages = {1367-1370}, pmid = {36280715}, issn = {1548-7105}, support = {RF1 MH117809/MH/NIMH NIH HHS/United States ; }, mesh = {Microscopy, Electron ; *Connectome ; Synapses ; Neurons ; Brain ; }, abstract = {The ability to acquire ever larger datasets of brain tissue using volume electron microscopy leads to an increasing demand for the automated extraction of connectomic information. We introduce SyConn2, an open-source connectome analysis toolkit, which works with both on-site high-performance compute environments and rentable cloud computing clusters. SyConn2 was tested on connectomic datasets with more than 10 million synapses, provides a web-based visualization interface and makes these data amenable to complex anatomical and neuronal connectivity queries.}, } @article {pmid36275963, year = {2022}, author = {Zhang, Y and Geng, P}, title = {Multi-Task Assignment Method of the Cloud Computing Platform Based on Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1789490}, pmid = {36275963}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Artificial Intelligence ; Bayes Theorem ; Algorithms ; Big Data ; }, abstract = {To realize load balancing of cloud computing platforms in big data processing, the method of finding the optimal load balancing physical host in the algorithm cycle is adopted at present. This optimal load balancing strategy that overly focuses on the current deployment problem has certain limitations. It will make the system less efficient and the user's waiting time unnecessarily prolonged. This paper proposes a task assignment method for long-term resource load balancing of cloud platforms based on artificial intelligence and big data (TABAI). The maximum posterior probability for each physical host is calculated using Bayesian theory. Euler's formula is used to calculate the similarity between the host with the largest posterior probability and other hosts as a threshold. The hosts are classified according to the threshold to determine the optimal cluster and then form the final set of candidate physical hosts. It improves the resource utilization and external service capability of the cloud platform by combining cluster analysis with Bayes' theorem to achieve global load balancing in the time dimension. The experimental results show that: TABAI has a smaller processing time than the traditional load balancing multi-task assignment method. When the time is >600 s, the standard deviation of TABAI decreases to a greater extent, and it has stronger external service capabilities.}, } @article {pmid36274993, year = {2022}, author = {Yentes, JM and Liu, WY and Zhang, K and Markvicka, E and Rennard, SI}, title = {Updated Perspectives on the Role of Biomechanics in COPD: Considerations for the Clinician.}, journal = {International journal of chronic obstructive pulmonary disease}, volume = {17}, number = {}, pages = {2653-2675}, pmid = {36274993}, issn = {1178-2005}, support = {L30 HL129255/HL/NHLBI NIH HHS/United States ; }, mesh = {Humans ; Biomechanical Phenomena ; *Pulmonary Disease, Chronic Obstructive/diagnosis ; Gait/physiology ; Walking ; Walking Speed ; }, abstract = {Patients with chronic obstructive pulmonary disease (COPD) demonstrate extra-pulmonary functional decline such as an increased prevalence of falls. Biomechanics offers insight into functional decline by examining mechanics of abnormal movement patterns. This review discusses biomechanics of functional outcomes, muscle mechanics, and breathing mechanics in patients with COPD as well as future directions and clinical perspectives. Patients with COPD demonstrate changes in their postural sway during quiet standing compared to controls, and these deficits are exacerbated when sensory information (eg, eyes closed) is manipulated. If standing balance is disrupted with a perturbation, patients with COPD are slower to return to baseline and their muscle activity is differential from controls. When walking, patients with COPD appear to adopt a gait pattern that may increase stability (eg, shorter and wider steps, decreased gait speed) in addition to altered gait variability. Biomechanical muscle mechanics (ie, tension, extensibility, elasticity, and irritability) alterations with COPD are not well documented, with relatively few articles investigating these properties. On the other hand, dyssynchronous motion of the abdomen and rib cage while breathing is well documented in patients with COPD. Newer biomechanical technologies have allowed for estimation of regional, compartmental, lung volumes during activity such as exercise, as well as respiratory muscle activation during breathing. Future directions of biomechanical analyses in COPD are trending toward wearable sensors, big data, and cloud computing. Each of these offers unique opportunities as well as challenges. Advanced analytics of sensor data can offer insight into the health of a system by quantifying complexity or fluctuations in patterns of movement, as healthy systems demonstrate flexibility and are thus adaptable to changing conditions. Biomechanics may offer clinical utility in prediction of 30-day readmissions, identifying disease severity, and patient monitoring. Biomechanics is complementary to other assessments, capturing what patients do, as well as their capability.}, } @article {pmid36274815, year = {2023}, author = {Bonino da Silva Santos, LO and Ferreira Pires, L and Graciano Martinez, V and Rebelo Moreira, JL and Silva Souza Guizzardi, R}, title = {Personal Health Train Architecture with Dynamic Cloud Staging.}, journal = {SN computer science}, volume = {4}, number = {1}, pages = {14}, pmid = {36274815}, issn = {2661-8907}, abstract = {Scientific advances, especially in the healthcare domain, can be accelerated by making data available for analysis. However, in traditional data analysis systems, data need to be moved to a central processing unit that performs analyses, which may be undesirable, e.g. due to privacy regulations in case these data contain personal information. This paper discusses the Personal Health Train (PHT) approach in which data processing is brought to the (personal health) data rather than the other way around, allowing (private) data accessed to be controlled, and to observe ethical and legal concerns. This paper introduces the PHT architecture and discusses the data staging solution that allows processing to be delegated to components spawned in a private cloud environment in case the (health) organisation hosting the data has limited resources to execute the required processing. This paper shows the feasibility and suitability of the solution with a relatively simple, yet representative, case study of data analysis of Covid-19 infections, which is performed by components that are created on demand and run in the Amazon Web Services platform. This paper also shows that the performance of our solution is acceptable, and that our solution is scalable. This paper demonstrates that the PHT approach enables data analysis with controlled access, preserving privacy and complying with regulations such as GDPR, while the solution is deployed in a private cloud environment.}, } @article {pmid36269974, year = {2022}, author = {Proctor, T and Seritan, S and Rudinger, K and Nielsen, E and Blume-Kohout, R and Young, K}, title = {Scalable Randomized Benchmarking of Quantum Computers Using Mirror Circuits.}, journal = {Physical review letters}, volume = {129}, number = {15}, pages = {150502}, doi = {10.1103/PhysRevLett.129.150502}, pmid = {36269974}, issn = {1079-7114}, abstract = {The performance of quantum gates is often assessed using some form of randomized benchmarking. However, the existing methods become infeasible for more than approximately five qubits. Here we show how to use a simple and customizable class of circuits-randomized mirror circuits-to perform scalable, robust, and flexible randomized benchmarking of Clifford gates. We show that this technique approximately estimates the infidelity of an average many-qubit logic layer, and we use simulations of up to 225 qubits with physically realistic error rates in the range 0.1%-1% to demonstrate its scalability. We then use up to 16 physical qubits of a cloud quantum computing platform to demonstrate that our technique can reveal and quantify crosstalk errors in many-qubit circuits.}, } @article {pmid36269885, year = {2023}, author = {Matar, A and Hansson, M and Slokenberga, S and Panagiotopoulos, A and Chassang, G and Tzortzatou, O and Pormeister, K and Uhlin, E and Cardone, A and Beauvais, M}, title = {A proposal for an international Code of Conduct for data sharing in genomics.}, journal = {Developing world bioethics}, volume = {23}, number = {4}, pages = {344-357}, doi = {10.1111/dewb.12381}, pmid = {36269885}, issn = {1471-8847}, support = {741716//Horizon 2020 Framework Programme/ ; }, mesh = {Humans ; *Genomics ; *Information Dissemination ; Research Personnel ; }, abstract = {As genomic research becomes commonplace across the world, there is an increased need to coordinate practices among researchers, especially with regard to data sharing. One such way is an international code of conduct. In September 2020, an expert panel consisting of representatives from various fields convened to discuss a draft proposal formed via a synthesis of existing professional codes and other recommendations. This article presents an overview and analysis of the main issues related to international genomic research that were discussed by the expert panel, and the results of the discussion and follow up responses by the experts. As a result, the article presents as an annex a proposal for an international code of conduct for data sharing in genomics that is meant to establish best practices.}, } @article {pmid36268157, year = {2022}, author = {Asif, RN and Abbas, S and Khan, MA and Atta-Ur-Rahman, and Sultan, K and Mahmud, M and Mosavi, A}, title = {Development and Validation of Embedded Device for Electrocardiogram Arrhythmia Empowered with Transfer Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5054641}, pmid = {36268157}, issn = {1687-5273}, mesh = {Humans ; *Electrocardiography/methods ; *Arrhythmias, Cardiac/diagnosis ; Cloud Computing ; Machine Learning ; Software ; }, abstract = {With the emergence of the Internet of Things (IoT), investigation of different diseases in healthcare improved, and cloud computing helped to centralize the data and to access patient records throughout the world. In this way, the electrocardiogram (ECG) is used to diagnose heart diseases or abnormalities. The machine learning techniques have been used previously but are feature-based and not as accurate as transfer learning; the proposed development and validation of embedded device prove ECG arrhythmia by using the transfer learning (DVEEA-TL) model. This model is the combination of hardware, software, and two datasets that are augmented and fused and further finds the accuracy results in high proportion as compared to the previous work and research. In the proposed model, a new dataset is made by the combination of the Kaggle dataset and the other, which is made by taking the real-time healthy and unhealthy datasets, and later, the AlexNet transfer learning approach is applied to get a more accurate reading in terms of ECG signals. In this proposed research, the DVEEA-TL model diagnoses the heart abnormality in respect of accuracy during the training and validation stages as 99.9% and 99.8%, respectively, which is the best and more reliable approach as compared to the previous research in this field.}, } @article {pmid36268145, year = {2022}, author = {Han, Z and Li, F and Wang, G}, title = {Financial Data Mining Model Based on K-Truss Community Query Model and Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9467623}, pmid = {36268145}, issn = {1687-5273}, mesh = {Humans ; *Artificial Intelligence ; *Data Mining ; Big Data ; Cloud Computing ; Algorithms ; }, abstract = {With the continuous development of Internet technology and related industries, emerging technologies such as big data and cloud computing have gradually integrated into and influenced social life. Emerging technologies have, to a large extent, revolutionized people's way of production and life and provided a lot of convenience for people's life. With the popularity of these technologies, information and data have also begun to explode. When we usually use an image storage system to process this information, we all know that an image contains countless pixels, and these pixels are interconnected to form the entire image. In real life, communities are like these pixels. On the Internet, communities are composed of interconnected parts. Nowadays, in various fields such as image modeling, we still have some problems, such as the problem of recognition rate, and we also found many problems when studying the community structure, which attracts more and more researchers, but the research on community query problems started late and the development is still relatively slow, so designing an excellent community query algorithm is a problem we urgently need to solve. With this goal, and based on previous research results, we have conducted in-depth discussions on community query algorithms, and hope that our research results can be applied to real life.}, } @article {pmid36267554, year = {2022}, author = {Jia, Z}, title = {Garden Landscape Design Method in Public Health Urban Planning Based on Big Data Analysis Technology.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2721247}, pmid = {36267554}, issn = {1687-9813}, mesh = {*Big Data ; *City Planning ; Gardens ; Public Health ; Data Analysis ; Technology ; }, abstract = {Aiming at the goal of high-quality development of the landscape architecture industry, we should actively promote the development and integration of digital, networked, and intelligent technologies and promote the intelligent and diversified development of the landscape architecture industry. Due to the limitation of drawing design technology and construction method, the traditional landscape architecture construction cannot really understand the public demands, and the construction scheme also relies on the experience and subjective aesthetics of professionals, resulting in improper connection between design and construction. At present, under the guidance of the national strategy, under the background of the rapid development of digital technologies such as 5G, big data, cloud computing, Internet of Things, and digital twins, the high integration of landscape architecture construction and digital technology has led to the transformation of the production mode of landscape architecture construction. Abundant professional data and convenient information processing platform enable landscape planners, designers, and builders to evaluate the whole life cycle of the project more scientifically and objectively and realize the digitalization of the whole process of investigation, analysis, design, construction, operation, and maintenance. For the landscape architecture industry, the significance of digital technology is not only to change the production tools but also to update the environmental awareness, design response, and construction methods, which makes the landscape architecture planning and design achieve the organic combination of qualitative and quantitative and also makes the landscape architecture discipline more scientific and rational. In this paper, the new method of combining grey relational degree with machine learning is used to provide new guidance for traditional landscape planning by using big data information in landscape design and has achieved very good results. The article analyzes the guidance of landscape architecture design under the big data in China and provides valuable reference for promoting the construction of landscape architecture in China.}, } @article {pmid36264891, year = {2022}, author = {Su, J and Su, K and Wang, S}, title = {Evaluation of digital economy development level based on multi-attribute decision theory.}, journal = {PloS one}, volume = {17}, number = {10}, pages = {e0270859}, pmid = {36264891}, issn = {1932-6203}, mesh = {Pregnancy ; Humans ; Female ; *Economic Development ; Artificial Intelligence ; Pandemics ; *COVID-19/epidemiology ; Decision Theory ; China ; }, abstract = {The maturity and commercialization of emerging digital technologies represented by artificial intelligence, cloud computing, block chain and virtual reality are giving birth to a new and higher economic form, that is, digital economy. Digital economy is different from the traditional industrial economy. It is clean, efficient, green and recyclable. It represents and promotes the future direction of global economic development, especially in the context of the sudden COVID-19 pandemic as a continuing disaster. Therefore, it is essential to establish the comprehensive evaluation model of digital economy development scientifically and reasonably. In this paper, first on the basis of literature analysis, the relevant indicators of digital economy development are collected manually and then screened by the grey dynamic clustering and rough set reduction theory. The evaluation index system of digital economy development is constructed from four dimensions: digital innovation impetus support, digital infrastructure construction support, national economic environment and digital policy guarantee, digital integration and application. Next the subjective weight and objective weight are calculated by the group FAHP method, entropy method and improved CRITIC method, and the combined weight is integrated with the thought of maximum variance. The grey correlation analysis and improved VIKOR model are combined to systematically evaluate the digital economy development level of 31 provinces and cities in China from 2013 to 2019. The results of empirical analysis show that the overall development of China's digital economy shows a trend of superposition and rise, and the development of digital economy in the four major economic zones is unbalanced. Finally, we put forward targeted opinions on the construction of China's provincial digital economy.}, } @article {pmid36264608, year = {2022}, author = {Moya-Galé, G and Walsh, SJ and Goudarzi, A}, title = {Automatic Assessment of Intelligibility in Noise in Parkinson Disease: Validation Study.}, journal = {Journal of medical Internet research}, volume = {24}, number = {10}, pages = {e40567}, pmid = {36264608}, issn = {1438-8871}, mesh = {Humans ; Dysarthria/etiology/complications ; *Parkinson Disease/complications ; Artificial Intelligence ; Speech Intelligibility ; *Speech Perception ; }, abstract = {BACKGROUND: Most individuals with Parkinson disease (PD) experience a degradation in their speech intelligibility. Research on the use of automatic speech recognition (ASR) to assess intelligibility is still sparse, especially when trying to replicate communication challenges in real-life conditions (ie, noisy backgrounds). Developing technologies to automatically measure intelligibility in noise can ultimately assist patients in self-managing their voice changes due to the disease.

OBJECTIVE: The goal of this study was to pilot-test and validate the use of a customized web-based app to assess speech intelligibility in noise in individuals with dysarthria associated with PD.

METHODS: In total, 20 individuals with dysarthria associated with PD and 20 healthy controls (HCs) recorded a set of sentences using their phones. The Google Cloud ASR API was used to automatically transcribe the speakers' sentences. An algorithm was created to embed speakers' sentences in +6-dB signal-to-noise multitalker babble. Results from ASR performance were compared to those from 30 listeners who orthographically transcribed the same set of sentences. Data were reduced into a single event, defined as a success if the artificial intelligence (AI) system transcribed a random speaker or sentence as well or better than the average of 3 randomly chosen human listeners. These data were further analyzed by logistic regression to assess whether AI success differed by speaker group (HCs or speakers with dysarthria) or was affected by sentence length. A discriminant analysis was conducted on the human listener data and AI transcriber data independently to compare the ability of each data set to discriminate between HCs and speakers with dysarthria.

RESULTS: The data analysis indicated a 0.8 probability (95% CI 0.65-0.91) that AI performance would be as good or better than the average human listener. AI transcriber success probability was not found to be dependent on speaker group. AI transcriber success was found to decrease with sentence length, losing an estimated 0.03 probability of transcribing as well as the average human listener for each word increase in sentence length. The AI transcriber data were found to offer the same discrimination of speakers into categories (HCs and speakers with dysarthria) as the human listener data.

CONCLUSIONS: ASR has the potential to assess intelligibility in noise in speakers with dysarthria associated with PD. Our results hold promise for the use of AI with this clinical population, although a full range of speech severity needs to be evaluated in future work, as well as the effect of different speaking tasks on ASR.}, } @article {pmid36259975, year = {2023}, author = {}, title = {Understanding enterprise data warehouses to support clinical and translational research: enterprise information technology relationships, data governance, workforce, and cloud computing.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {30}, number = {2}, pages = {407}, doi = {10.1093/jamia/ocac206}, pmid = {36259975}, issn = {1527-974X}, } @article {pmid36259009, year = {2022}, author = {Gendia, A}, title = {Cloud Based AI-Driven Video Analytics (CAVs) in Laparoscopic Surgery: A Step Closer to a Virtual Portfolio.}, journal = {Cureus}, volume = {14}, number = {9}, pages = {e29087}, pmid = {36259009}, issn = {2168-8184}, abstract = {AIMS: To outline the use of cloud-based artificial intelligence (AI)-driven video analytics (CAVs) in minimally invasive surgery and to propose their potential as a virtual portfolio for trainee and established surgeons. Methods: An independent online demonstration was requested from three platforms, namely Theator (Palo Alto, California, USA), Touch Surgery™ (Medtronic, London, England, UK), and C-SATS® (Seattle, Washington, USA). The assessed domains were online and app-based accessibility, the ability for timely trainee feedback, and AI integration for operation-specific steps and critical views.

RESULTS: The CAVs enable users to record surgeries with the advantage of limitless video storage through clouding and smart integration into theatre settings. This can be used to view surgeries and review trainee videos through a medium of communication and sharing with the ability to provide feedback. Theator and C-SATS® provide their users with surgical skills scoring systems with customizable options that can be used to provide structured feedback to trainees. Additionally, AI plays an important role in all three platforms by providing time-based analysis of steps and highlighting critical milestones.  Conclusion: Cloud-based AI-driven video analytics is an emerging new technology that enables users to store, analyze, and review videos. This technology has the potential to improve training, governance, and standardization procedures. Moreover, with the future adaptation of the technology, CAVs can be integrated into the trainees' portfolios as part of their virtual curriculum. This can enable a structured assessment of a surgeon's progression and degree of experience throughout their surgical career.}, } @article {pmid36258393, year = {2022}, author = {Yamamoto, Y and Shimobaba, T and Ito, T}, title = {HORN-9: Special-purpose computer for electroholography with the Hilbert transform.}, journal = {Optics express}, volume = {30}, number = {21}, pages = {38115-38127}, doi = {10.1364/OE.471720}, pmid = {36258393}, issn = {1094-4087}, abstract = {Holography is a technology that uses light interference and diffraction to record and reproduce three-dimensional (3D) information. Using computers, holographic 3D scenes (electroholography) have been widely studied. Nevertheless, its practical application requires enormous computing power, and current computers have limitations in real-time processing. In this study, we show that holographic reconstruction (HORN)-9, a special-purpose computer for electroholography with the Hilbert transform, can compute a 1, 920 × 1, 080-pixel computer-generated hologram from a point cloud of 65,000 points in 0.030 s (33 fps) on a single card. This performance is 8, 7, and 170 times more efficient than a previously developed HORN-8, a graphics processing unit, and a central processing unit (CPU), respectively. We also demonstrated the real-time processing and display of 400,000 points on multiple HORN-9s, achieving an acceleration of 600 times with four HORN-9 units compared with a single CPU.}, } @article {pmid36255917, year = {2022}, author = {Houskeeper, HF and Hooker, SB and Cavanaugh, KC}, title = {Spectrally simplified approach for leveraging legacy geostationary oceanic observations.}, journal = {Applied optics}, volume = {61}, number = {27}, pages = {7966-7977}, doi = {10.1364/AO.465491}, pmid = {36255917}, issn = {1539-4522}, mesh = {*Environmental Monitoring/methods ; *Ecosystem ; Satellite Imagery ; Oceans and Seas ; Water ; }, abstract = {The use of multispectral geostationary satellites to study aquatic ecosystems improves the temporal frequency of observations and mitigates cloud obstruction, but no operational capability presently exists for the coastal and inland waters of the United States. The Advanced Baseline Imager (ABI) on the current iteration of the Geostationary Operational Environmental Satellites, termed the R Series (GOES-R), however, provides sub-hourly imagery and the opportunity to overcome this deficit and to leverage a large repository of existing GOES-R aquatic observations. The fulfillment of this opportunity is assessed herein using a spectrally simplified, two-channel aquatic algorithm consistent with ABI wave bands to estimate the diffuse attenuation coefficient for photosynthetically available radiation, Kd(PAR). First, an in situ ABI dataset was synthesized using a globally representative dataset of above- and in-water radiometric data products. Values of Kd(PAR) were estimated by fitting the ratio of the shortest and longest visible wave bands from the in situ ABI dataset to coincident, in situKd(PAR) data products. The algorithm was evaluated based on an iterative cross-validation analysis in which 80% of the dataset was randomly partitioned for fitting and the remaining 20% was used for validation. The iteration producing the median coefficient of determination (R[2]) value (0.88) resulted in a root mean square difference of 0.319m[-1], or 8.5% of the range in the validation dataset. Second, coincident mid-day images of central and southern California from ABI and from the Moderate Resolution Imaging Spectroradiometer (MODIS) were compared using Google Earth Engine (GEE). GEE default ABI reflectance values were adjusted based on a near infrared signal. Matchups between the ABI and MODIS imagery indicated similar spatial variability (R[2]=0.60) between ABI adjusted blue-to-red reflectance ratio values and MODIS default diffuse attenuation coefficient for spectral downward irradiance at 490 nm, Kd(490), values. This work demonstrates that if an operational capability to provide ABI aquatic data products was realized, the spectral configuration of ABI would potentially support a sub-hourly, visible aquatic data product that is applicable to water-mass tracing and physical oceanography research.}, } @article {pmid36254227, year = {2022}, author = {Song, L and Wang, H and Shi, Z}, title = {A Literature Review Research on Monitoring Conditions of Mechanical Equipment Based on Edge Computing.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {9489306}, pmid = {36254227}, issn = {1176-2322}, abstract = {The motivation of this research is to review all methods used in data compression of collected data in monitoring the condition of equipment based on the framework of edge computing. Since a large amount of signal data is collected when monitoring conditions of mechanical equipment, namely, signals of running machines are continuously transmitted to be crunched, compressed data should be handled effectively. However, this process occupies resources since data transmission requires the allocation of a large capacity. To resolve this problem, this article examines the monitoring conditions of equipment based on edge computing. First, the signal is pre-processed by edge computing, so that the fault characteristics can be identified quickly. Second, signals with difficult-to-identify fault characteristics need to be compressed to save transmission resources. Then, different types of signal data collected in mechanical equipment conditions are compressed by various compression methods and uploaded to the cloud. Finally, the cloud platform, which has powerful processing capability, is processed to improve the volume of the data transmission. By examining and analyzing the monitoring conditions and signal compression methods of mechanical equipment, the future development trend is elaborated to provide references and ideas for the contemporary research of data monitoring and data compression algorithms. Consequently, the manuscript presents different compression methods in detail and clarifies the data compression methods used for the signal compression of equipment based on edge computing.}, } @article {pmid36253343, year = {2022}, author = {Kobayashi, K and Yoshida, H and Tanjo, T and Aida, K}, title = {Cloud service checklist for academic communities and customization for genome medical research.}, journal = {Human genome variation}, volume = {9}, number = {1}, pages = {36}, pmid = {36253343}, issn = {2054-345X}, support = {JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; JP21km0405501//Japan Agency for Medical Research and Development (AMED)/ ; }, abstract = {In this paper, we present a cloud service checklist designed to help IT administrators or researchers in academic organizations select the most suitable cloud services. This checklist, which comprises items that we believe IT administrators or researchers in academic organizations should consider when they adopt cloud services, comprehensively covers the issues related to a variety of cloud services, including security, functionality, performance, and law. In response to the increasing demands for storage and computing resources in genome medical science communities, various guidelines for using resources operated by external organizations, such as cloud services, have been published by different academic funding agencies and the Japanese government. However, it is sometimes difficult to identify the checklist items that satisfy the genome medical science community's guidelines, and some of these requirements are not included in the existing checklists. This issue provided our motivation for creating a cloud service checklist customized for genome medical research communities. The resulting customized checklist is designed to help researchers easily find information about the cloud services that satisfy the guidelines in genome medical science communities. Additionally, we explore whether many cloud service providers satisfy the requirements or checklist items in the cloud service checklist for genome medical research by evaluating their survey responses.}, } @article {pmid36248925, year = {2022}, author = {Bu, H and Xia, J and Wu, Q and Chen, L}, title = {Relationship Discovery and Hierarchical Embedding for Web Service Quality Prediction.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9240843}, pmid = {36248925}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Internet ; Research Design ; }, abstract = {Web Services Quality Prediction has become a popular research theme in Cloud Computing and the Internet of Things. Graph Convolutional Network (GCN)-based methods are more efficient by aggregating feature information from the local graph neighborhood. Despite the fact that these prior works have demonstrated better prediction performance, they are still challenged as follows: (1) first, the user-service bipartite graph is essentially a heterogeneous graph that contains four kinds of relationships. Previous GCN-based models have only focused on using some of these relationships. Therefore, how to fully mine and use the above relationships is critical to improving the prediction accuracy. (2) After the embedding is obtained from the GCNs, the commonly used similarity calculation methods for downstream prediction need to traverse the data one by one, which is time-consuming. To address these challenges, this work proposes a novel relationship discovery and hierarchical embedding method based on GCNs (named as RDHE), which designs a dual mechanism to represent services and users, respectively, designs a new community discovery method and a fast similarity calculation process, which can fully mine and utilize the relationships in the graph. The results of the experiment on the real data set show that this method greatly improved the accuracy of the web service quality prediction.}, } @article {pmid36248269, year = {2022}, author = {Mondal, P and Dutta, T and Qadir, A and Sharma, S}, title = {Radar and optical remote sensing for near real-time assessments of cyclone impacts on coastal ecosystems.}, journal = {Remote sensing in ecology and conservation}, volume = {8}, number = {4}, pages = {506-520}, pmid = {36248269}, issn = {2056-3485}, abstract = {Rapid impact assessment of cyclones on coastal ecosystems is critical for timely rescue and rehabilitation operations in highly human-dominated landscapes. Such assessments should also include damage assessments of vegetation for restoration planning in impacted natural landscapes. Our objective is to develop a remote sensing-based approach combining satellite data derived from optical (Sentinel-2), radar (Sentinel-1), and LiDAR (Global Ecosystem Dynamics Investigation) platforms for rapid assessment of post-cyclone inundation in non-forested areas and vegetation damage in a primarily forested ecosystem. We apply this multi-scalar approach for assessing damages caused by the cyclone Amphan that hit coastal India and Bangladesh in May 2020, severely flooding several districts in the two countries, and causing destruction to the Sundarban mangrove forests. Our analysis shows that at least 6821 sq. km. land across the 39 study districts was inundated even after 10 days after the cyclone. We further calculated the change in forest greenness as the difference in normalized difference vegetation index (NDVI) pre- and post-cyclone. Our findings indicate a <0.2 unit decline in NDVI in 3.45 sq. km. of the forest. Rapid assessment of post-cyclone damage in mangroves is challenging due to limited navigability of waterways, but critical for planning of mitigation and recovery measures. We demonstrate the utility of Otsu method, an automated statistical approach of the Google Earth Engine platform to identify inundated areas within days after a cyclone. Our radar-based inundation analysis advances current practices because it requires minimal user inputs, and is effective in the presence of high cloud cover. Such rapid assessment, when complemented with detailed information on species and vegetation composition, can inform appropriate restoration efforts in severely impacted regions and help decision makers efficiently manage resources for recovery and aid relief. We provide the datasets from this study on an open platform to aid in future research and planning endeavors.}, } @article {pmid36247859, year = {2022}, author = {Saba Raoof, S and Durai, MAS}, title = {A Comprehensive Review on Smart Health Care: Applications, Paradigms, and Challenges with Case Studies.}, journal = {Contrast media & molecular imaging}, volume = {2022}, number = {}, pages = {4822235}, pmid = {36247859}, issn = {1555-4317}, mesh = {Delivery of Health Care ; Humans ; *Internet of Things ; Quality of Life ; *Telemedicine/methods ; }, abstract = {Growth and advancement of the Deep Learning (DL) and the Internet of Things (IoT) are figuring out their way over the modern contemporary world through integrating various technologies in distinct fields viz, agriculture, manufacturing, energy, transportation, supply chains, cities, healthcare, and so on. Researchers had identified the feasibility of integrating deep learning, cloud, and IoT to enhance the overall automation, where IoT may prolong its application area through utilizing cloud services and the cloud can even prolong its applications through data acquired by IoT devices like sensors and deep learning for disease detection and diagnosis. This study explains a summary of various techniques utilized in smart healthcare, i.e., deep learning, cloud-based-IoT applications in smart healthcare, fog computing in smart healthcare, and challenges and issues faced by smart healthcare and it presents a wider scope as it is not intended for a particular application such aspatient monitoring, disease detection, and diagnosing and the technologies used for developing this smart systems are outlined. Smart health bestows the quality of life. Convenient and comfortable living is made possible by the services provided by smart healthcare systems (SHSs). Since healthcare is a massive area with enormous data and a broad spectrum of diseases associated with different organs, immense research can be done to overcome the drawbacks of traditional healthcare methods. Deep learning with IoT can effectively be applied in the healthcare sector to automate the diagnosing and treatment process even in rural areas remotely. Applications may include disease prevention and diagnosis, fitness and patient monitoring, food monitoring, mobile health, telemedicine, emergency systems, assisted living, self-management of chronic diseases, and so on.}, } @article {pmid36246518, year = {2022}, author = {Coelho, R and Braga, R and David, JMN and Stroele, V and Campos, F and Dantas, M}, title = {A Blockchain-Based Architecture for Trust in Collaborative Scientific Experimentation.}, journal = {Journal of grid computing}, volume = {20}, number = {4}, pages = {35}, pmid = {36246518}, issn = {1572-9184}, abstract = {In scientific collaboration, data sharing, the exchange of ideas and results are essential to knowledge construction and the development of science. Hence, we must guarantee interoperability, privacy, traceability (reinforcing transparency), and trust. Provenance has been widely recognized for providing a history of the steps taken in scientific experiments. Consequently, we must support traceability, assisting in scientific results' reproducibility. One of the technologies that can enhance trust in collaborative scientific experimentation is blockchain. This work proposes an architecture, named BlockFlow, based on blockchain, provenance, and cloud infrastructure to bring trust and traceability in the execution of collaborative scientific experiments. The proposed architecture is implemented on Hyperledger, and a scenario about the genomic sequencing of the SARS-CoV-2 coronavirus is used to evaluate the architecture, discussing the benefits of providing traceability and trust in collaborative scientific experimentation. Furthermore, the architecture addresses the heterogeneity of shared data, facilitating interpretation by geographically distributed researchers and analysis of such data. Through a blockchain-based architecture that provides support on provenance and blockchain, we can enhance data sharing, traceability, and trust in collaborative scientific experiments.}, } @article {pmid36240003, year = {2022}, author = {Kang, G and Kim, YG}, title = {Secure Collaborative Platform for Health Care Research in an Open Environment: Perspective on Accountability in Access Control.}, journal = {Journal of medical Internet research}, volume = {24}, number = {10}, pages = {e37978}, pmid = {36240003}, issn = {1438-8871}, mesh = {*Blockchain ; *Computer Security ; Health Services Research ; Humans ; Privacy ; Social Responsibility ; }, abstract = {BACKGROUND: With the recent use of IT in health care, a variety of eHealth data are increasingly being collected and stored by national health agencies. As these eHealth data can advance the modern health care system and make it smarter, many researchers want to use these data in their studies. However, using eHealth data brings about privacy and security concerns. The analytical environment that supports health care research must also consider many requirements. For these reasons, countries generally provide research platforms for health care, but some data providers (eg, patients) are still concerned about the security and privacy of their eHealth data. Thus, a more secure platform for health care research that guarantees the utility of eHealth data while focusing on its security and privacy is needed.

OBJECTIVE: This study aims to implement a research platform for health care called the health care big data platform (HBDP), which is more secure than previous health care research platforms. The HBDP uses attribute-based encryption to achieve fine-grained access control and encryption of stored eHealth data in an open environment. Moreover, in the HBDP, platform administrators can perform the appropriate follow-up (eg, block illegal users) and monitoring through a private blockchain. In other words, the HBDP supports accountability in access control.

METHODS: We first identified potential security threats in the health care domain. We then defined the security requirements to minimize the identified threats. In particular, the requirements were defined based on the security solutions used in existing health care research platforms. We then proposed the HBDP, which meets defined security requirements (ie, access control, encryption of stored eHealth data, and accountability). Finally, we implemented the HBDP to prove its feasibility.

RESULTS: This study carried out case studies for illegal user detection via the implemented HBDP based on specific scenarios related to the threats. As a result, the platform detected illegal users appropriately via the security agent. Furthermore, in the empirical evaluation of massive data encryption (eg, 100,000 rows with 3 sensitive columns within 46 columns) for column-level encryption, full encryption after column-level encryption, and full decryption including column-level decryption, our approach achieved approximately 3 minutes, 1 minute, and 9 minutes, respectively. In the blockchain, average latencies and throughputs in 1Org with 2Peers reached approximately 18 seconds and 49 transactions per second (TPS) in read mode and approximately 4 seconds and 120 TPS in write mode in 300 TPS.

CONCLUSIONS: The HBDP enables fine-grained access control and secure storage of eHealth data via attribute-based encryption cryptography. It also provides nonrepudiation and accountability through the blockchain. Therefore, we consider that our proposal provides a sufficiently secure environment for the use of eHealth data in health care research.}, } @article {pmid36237741, year = {2022}, author = {Konstantinou, C and Xanthopoulos, A and Tsaras, K and Skoularigis, J and Triposkiadis, F and Papagiannis, D}, title = {Vaccination Coverage Against Human Papillomavirus in Female Students in Cyprus.}, journal = {Cureus}, volume = {14}, number = {9}, pages = {e28936}, pmid = {36237741}, issn = {2168-8184}, abstract = {Background Human papillomavirus (HPV) has been associated with the development of several cancers and cardiovascular diseases in females. Nevertheless, there is still poor data on vaccination coverage against HPV in several countries, including Cyprus. The main target of the present research was to assess the vaccination status of female students in Cyprus. Methodology An online survey was conducted via a cloud-based short questionnaire on Google Forms. Students with a known email address were initially invited via email to complete the survey. The questionnaire was distributed to 340 students, aged 18-49 years old, who lived in Cyprus (60% response rate). Results The total vaccination coverage was 38.1%. The mean age of participants was 23.5 (±6.5) years. The major reason for non-vaccination was the belief that participants were not at risk of serious illness from HPV infection (22%), followed by the reported lack of time to get vaccinated (16%) and inertia (13%). The students who had information about the safety of HPV vaccines from electronic sources of information (television, websites, and blogs) had lower vaccination coverage compared to those who had received information from alternative sources (primary health centers, family doctors, or obstetricians) (relative risk (RR) = 1.923, 95% confidence interval (CI) = 0.9669-3.825; p = 0.033). No significant differences in vaccination rates between participants who were coming from schools of health sciences versus those from financial schools (RR = 1.082, 95% CI = 0.7574-1.544; p = 0.3348) were observed. Conclusions Public health policy interventions and education on HPV vaccines are effective ways to improve the awareness and acceptance rate of HPV vaccination among female students and improve the HPV vaccination coverage level in Cyprus.}, } @article {pmid36236773, year = {2022}, author = {Shumba, AT and Montanaro, T and Sergi, I and Fachechi, L and De Vittorio, M and Patrono, L}, title = {Leveraging IoT-Aware Technologies and AI Techniques for Real-Time Critical Healthcare Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236773}, issn = {1424-8220}, mesh = {Aged ; *Artificial Intelligence ; Biocompatible Materials ; *Blood Glucose ; Delivery of Health Care ; Humans ; Technology ; }, abstract = {Personalised healthcare has seen significant improvements due to the introduction of health monitoring technologies that allow wearable devices to unintrusively monitor physiological parameters such as heart health, blood pressure, sleep patterns, and blood glucose levels, among others. Additionally, utilising advanced sensing technologies based on flexible and innovative biocompatible materials in wearable devices allows high accuracy and precision measurement of biological signals. Furthermore, applying real-time Machine Learning algorithms to highly accurate physiological parameters allows precise identification of unusual patterns in the data to provide health event predictions and warnings for timely intervention. However, in the predominantly adopted architectures, health event predictions based on Machine Learning are typically obtained by leveraging Cloud infrastructures characterised by shortcomings such as delayed response times and privacy issues. Fortunately, recent works highlight that a new paradigm based on Edge Computing technologies and on-device Artificial Intelligence significantly improve the latency and privacy issues. Applying this new paradigm to personalised healthcare architectures can significantly improve their efficiency and efficacy. Therefore, this paper reviews existing IoT healthcare architectures that utilise wearable devices and subsequently presents a scalable and modular system architecture to leverage emerging technologies to solve identified shortcomings. The defined architecture includes ultrathin, skin-compatible, flexible, high precision piezoelectric sensors, low-cost communication technologies, on-device intelligence, Edge Intelligence, and Edge Computing technologies. To provide development guidelines and define a consistent reference architecture for improved scalable wearable IoT-based critical healthcare architectures, this manuscript outlines the essential functional and non-functional requirements based on deductions from existing architectures and emerging technology trends. The presented system architecture can be applied to many scenarios, including ambient assisted living, where continuous surveillance and issuance of timely warnings can afford independence to the elderly and chronically ill. We conclude that the distribution and modularity of architecture layers, local AI-based elaboration, and data packaging consistency are the more essential functional requirements for critical healthcare application use cases. We also identify fast response time, utility, comfort, and low cost as the essential non-functional requirements for the defined system architecture.}, } @article {pmid36236664, year = {2022}, author = {Shahzad, K and Zia, T and Qazi, EU}, title = {A Review of Functional Encryption in IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236664}, issn = {1424-8220}, support = {SRC-PR2-01//Security Research Center at Naif Arab University for Security Sciences/ ; }, abstract = {The Internet of Things (IoT) represents a growing aspect of how entities, including humans and organizations, are likely to connect with others in their public and private interactions. The exponential rise in the number of IoT devices, resulting from ever-growing IoT applications, also gives rise to new opportunities for exploiting potential security vulnerabilities. In contrast to conventional cryptosystems, frameworks that incorporate fine-grained access control offer better opportunities for protecting valuable assets, especially when the connectivity level is dense. Functional encryption is an exciting new paradigm of public-key encryption that supports fine-grained access control, generalizing a range of existing fine-grained access control mechanisms. This survey reviews the recent applications of functional encryption and the major cryptographic primitives that it covers, identifying areas where the adoption of these primitives has had the greatest impact. We first provide an overview of different application areas where these access control schemes have been applied. Then, an in-depth survey of how the schemes are used in a multitude of applications related to IoT is given, rendering a potential vision of security and integrity that this growing field promises. Towards the end, we identify some research trends and state the open challenges that current developments face for a secure IoT realization.}, } @article {pmid36236587, year = {2022}, author = {Qin, M and Liu, T and Hou, B and Gao, Y and Yao, Y and Sun, H}, title = {A Low-Latency RDP-CORDIC Algorithm for Real-Time Signal Processing of Edge Computing Devices in Smart Grid Cyber-Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236587}, issn = {1424-8220}, support = {2019YJ0309//Sichuan Provincial Science and Technology Department/ ; }, abstract = {Smart grids are being expanded in scale with the increasing complexity of the equipment. Edge computing is gradually replacing conventional cloud computing due to its low latency, low power consumption, and high reliability. The CORDIC algorithm has the characteristics of high-speed real-time processing and is very suitable for hardware accelerators in edge computing devices. The iterative calculation method of the CORDIC algorithm yet leads to problems such as complex structure and high consumption of hardware resource. In this paper, we propose an RDP-CORDIC algorithm which pre-computes all micro-rotation directions and transforms the conventional single-stage iterative structure into a three-stage and multi-stage combined iterative structure, thereby enabling it to solve the problems of the conventional CORDIC algorithm with many iterations and high consumption. An accuracy compensation algorithm for the direction prediction constant is also proposed to solve the problem of high ROM consumption in the high precision implementation of the RDP-CORDIC algorithm. The experimental results showed that the RDP-CORDIC algorithm had faster computation speed and lower resource consumption with higher guaranteed accuracy than other CORDIC algorithms. Therefore, the RDP-CORDIC algorithm proposed in this paper may effectively increase computation performance while reducing the power and resource consumption of edge computing devices in smart grid systems.}, } @article {pmid36236546, year = {2022}, author = {Busaeed, S and Katib, I and Albeshri, A and Corchado, JM and Yigitcanlar, T and Mehmood, R}, title = {LidSonic V2.0: A LiDAR and Deep-Learning-Based Green Assistive Edge Device to Enhance Mobility for the Visually Impaired.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236546}, issn = {1424-8220}, support = {RG-11-611-38//King Abdulaziz University/ ; }, mesh = {*Deep Learning ; *Disabled Persons ; Humans ; *Self-Help Devices ; *Visually Impaired Persons ; *Wheelchairs ; }, abstract = {Over a billion people around the world are disabled, among whom 253 million are visually impaired or blind, and this number is greatly increasing due to ageing, chronic diseases, and poor environments and health. Despite many proposals, the current devices and systems lack maturity and do not completely fulfill user requirements and satisfaction. Increased research activity in this field is required in order to encourage the development, commercialization, and widespread acceptance of low-cost and affordable assistive technologies for visual impairment and other disabilities. This paper proposes a novel approach using a LiDAR with a servo motor and an ultrasonic sensor to collect data and predict objects using deep learning for environment perception and navigation. We adopted this approach using a pair of smart glasses, called LidSonic V2.0, to enable the identification of obstacles for the visually impaired. The LidSonic system consists of an Arduino Uno edge computing device integrated into the smart glasses and a smartphone app that transmits data via Bluetooth. Arduino gathers data, operates the sensors on the smart glasses, detects obstacles using simple data processing, and provides buzzer feedback to visually impaired users. The smartphone application collects data from Arduino, detects and classifies items in the spatial environment, and gives spoken feedback to the user on the detected objects. In comparison to image-processing-based glasses, LidSonic uses far less processing time and energy to classify obstacles using simple LiDAR data, according to several integer measurements. We comprehensively describe the proposed system's hardware and software design, having constructed their prototype implementations and tested them in real-world environments. Using the open platforms, WEKA and TensorFlow, the entire LidSonic system is built with affordable off-the-shelf sensors and a microcontroller board costing less than USD 80. Essentially, we provide designs of an inexpensive, miniature green device that can be built into, or mounted on, any pair of glasses or even a wheelchair to help the visually impaired. Our approach enables faster inference and decision-making using relatively low energy with smaller data sizes, as well as faster communications for edge, fog, and cloud computing.}, } @article {pmid36236536, year = {2022}, author = {Lei, L and Kou, L and Zhan, X and Zhang, J and Ren, Y}, title = {An Anomaly Detection Algorithm Based on Ensemble Learning for 5G Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236536}, issn = {1424-8220}, mesh = {*Algorithms ; Cloud Computing ; Learning ; Machine Learning ; *Software ; }, abstract = {With the advent of the digital information age, new data services such as virtual reality, industrial Internet, and cloud computing have proliferated in recent years. As a result, it increases operator demand for 5G bearer networks by providing features such as high transmission capacity, ultra-long transmission distance, network slicing, and intelligent management and control. Software-defined networking, as a new network architecture, intends to increase network flexibility and agility and can better satisfy the demands of 5G networks for network slicing. Nevertheless, software-defined networking still faces the challenge of network intrusion. We propose an abnormal traffic detection method based on the stacking method and self-attention mechanism, which makes up for the shortcoming of the inability to track long-term dependencies between data samples in ensemble learning. Our method utilizes a self-attention mechanism and a convolutional network to automatically learn long-term associations between traffic samples and provide them to downstream tasks in sample embedding. In addition, we design a novel stacking ensemble method, which computes the sample embedding and the predicted values of the heterogeneous base learner through the fusion module to obtain the final outlier results. This paper conducts experiments on abnormal traffic datasets in the software-defined network environment, calculates precision, recall and F1-score, and compares and analyzes them with other algorithms. The experimental results show that the method designed in this paper achieves 0.9972, 0.9996, and 0.9984 in multiple indicators of precision, recall, and F1-score, respectively, which are better than the comparison methods.}, } @article {pmid36236523, year = {2022}, author = {Yi, F and Zhang, L and Xu, L and Yang, S and Lu, Y and Zhao, D}, title = {WSNEAP: An Efficient Authentication Protocol for IIoT-Oriented Wireless Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236523}, issn = {1424-8220}, mesh = {Computer Communication Networks ; *Computer Security ; *Internet of Things ; }, abstract = {With the development of the Industrial Internet of Things (IIoT), industrial wireless sensors need to upload the collected private data to the cloud servers, resulting in a large amount of private data being exposed on the Internet. Private data are vulnerable to hacking. Many complex wireless-sensor-authentication protocols have been proposed. In this paper, we proposed an efficient authentication protocol for IIoT-oriented wireless sensor networks. The protocol introduces the PUF chip, and uses the Bloom filter to save and query the challenge-response pairs generated by the PUF chip. It ensures the security of the physical layer of the device and reduces the computing cost and communication cost of the wireless sensor side. The protocol introduces a pre-authentication mechanism to achieve continuous authentication between the gateway and the cloud server. The overall computational cost of the protocol is reduced. Formal security analysis and informal security analysis proved that our proposed protocol has more security features. We implemented various security primitives using the MIRACL cryptographic library and GMP large number library. Our proposed protocol was compared in-depth with related work. Detailed experiments show that our proposed protocol significantly reduces the computational cost and communication cost on the wireless sensor side and the overall computational cost of the protocol.}, } @article {pmid36236264, year = {2022}, author = {Thirumalaisamy, M and Basheer, S and Selvarajan, S and Althubiti, SA and Alenezi, F and Srivastava, G and Lin, JC}, title = {Interaction of Secure Cloud Network and Crowd Computing for Smart City Data Obfuscation.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {19}, pages = {}, pmid = {36236264}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Software ; }, abstract = {There can be many inherent issues in the process of managing cloud infrastructure and the platform of the cloud. The platform of the cloud manages cloud software and legality issues in making contracts. The platform also handles the process of managing cloud software services and legal contract-based segmentation. In this paper, we tackle these issues directly with some feasible solutions. For these constraints, the Averaged One-Dependence Estimators (AODE) classifier and the SELECT Applicable Only to Parallel Server (SELECT-APSL ASA) method are proposed to separate the data related to the place. ASA is made up of the AODE and SELECT Applicable Only to Parallel Server. The AODE classifier is used to separate the data from smart city data based on the hybrid data obfuscation technique. The data from the hybrid data obfuscation technique manages 50% of the raw data, and 50% of hospital data is masked using the proposed transmission. The analysis of energy consumption before the cryptosystem shows the total packet delivered by about 71.66% compared with existing algorithms. The analysis of energy consumption after cryptosystem assumption shows 47.34% consumption, compared to existing state-of-the-art algorithms. The average energy consumption before data obfuscation decreased by 2.47%, and the average energy consumption after data obfuscation was reduced by 9.90%. The analysis of the makespan time before data obfuscation decreased by 33.71%. Compared to existing state-of-the-art algorithms, the study of makespan time after data obfuscation decreased by 1.3%. These impressive results show the strength of our methodology.}, } @article {pmid36227021, year = {2023}, author = {Yang, DM and Chang, TJ and Hung, KF and Wang, ML and Cheng, YF and Chiang, SH and Chen, MF and Liao, YT and Lai, WQ and Liang, KH}, title = {Smart healthcare: A prospective future medical approach for COVID-19.}, journal = {Journal of the Chinese Medical Association : JCMA}, volume = {86}, number = {2}, pages = {138-146}, pmid = {36227021}, issn = {1728-7731}, mesh = {Humans ; *COVID-19 ; Artificial Intelligence ; Post-Acute COVID-19 Syndrome ; Pandemics/prevention & control ; Delivery of Health Care ; }, abstract = {COVID-19 has greatly affected human life for over 3 years. In this review, we focus on smart healthcare solutions that address major requirements for coping with the COVID-19 pandemic, including (1) the continuous monitoring of severe acute respiratory syndrome coronavirus 2, (2) patient stratification with distinct short-term outcomes (eg, mild or severe diseases) and long-term outcomes (eg, long COVID), and (3) adherence to medication and treatments for patients with COVID-19. Smart healthcare often utilizes medical artificial intelligence (AI) and cloud computing and integrates cutting-edge biological and optoelectronic techniques. These are valuable technologies for addressing the unmet needs in the management of COVID. By leveraging deep learning/machine learning capabilities and big data, medical AI can perform precise prognosis predictions and provide reliable suggestions for physicians' decision-making. Through the assistance of the Internet of Medical Things, which encompasses wearable devices, smartphone apps, internet-based drug delivery systems, and telemedicine technologies, the status of mild cases can be continuously monitored and medications provided at home without the need for hospital care. In cases that develop into severe cases, emergency feedback can be provided through the hospital for rapid treatment. Smart healthcare can possibly prevent the development of severe COVID-19 cases and therefore lower the burden on intensive care units.}, } @article {pmid36225544, year = {2022}, author = {Li, H}, title = {Cloud Computing Image Processing Application in Athlete Training High-Resolution Image Detection.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7423411}, pmid = {36225544}, issn = {1687-5273}, mesh = {Algorithms ; *Artificial Intelligence ; Athletes ; *Cloud Computing ; Humans ; Image Processing, Computer-Assisted/methods ; }, abstract = {The rapid development of Internet of things mobile application technology and artificial intelligence technology has given birth to a lot of services that can meet the needs of modern life, such as augmented reality technology, face recognition services, and language recognition and translation, which are often applied to various fields, and some other aspects of information communication and processing services. It has been used on various mobile phone, computer, or tablet user clients. Terminal equipment is subject to the ultralow latency and low energy consumption requirements of the above-mentioned applications. Therefore, the gap between resource-demanding application services and resource-limited mobile devices will bring great problems to the current and future development of IoT mobile applications. Based on the local image features of depth images, this paper designs an image detection method for athletes' motion posture. First, according to the characteristics of the local image, the depth image of the athlete obtained through Kinect is converted into bone point data. Next, a 3-stage exploration algorithm is used to perform block matching calculations on the athlete's bone point image to predict the athlete's movement posture. At the same time, using the characteristics of the Euclidean distance of the bone point image, the movement behavior is recognized. According to the experimental results, for some external environmental factors, such as sun illumination and other factors, the image detection method designed in this paper can effectively avoid their interference and influence and show the movement posture of athletes, showing excellent accuracy and robustness in predicting the movement posture of athletes and action recognition. This method can simplify a series of calibration tasks in the initial stage of 3D video surveillance and infer the posture of the observation target and recognize it in real time. The one that has good application values has specific reference values for the same job.}, } @article {pmid36210997, year = {2022}, author = {B, D and M, L and R, A and Kallimani, JS and Walia, R and Belete, B}, title = {A Novel Feature Selection with Hybrid Deep Learning Based Heart Disease Detection and Classification in the e-Healthcare Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1167494}, pmid = {36210997}, issn = {1687-5273}, mesh = {Cloud Computing ; *Deep Learning ; *Heart Diseases/diagnosis ; Humans ; Neural Networks, Computer ; *Telemedicine ; }, abstract = {With the advancements in data mining, wearables, and cloud computing, online disease diagnosis services have been widely employed in the e-healthcare environment and improved the quality of the services. The e-healthcare services help to reduce the death rate by the earlier identification of the diseases. Simultaneously, heart disease (HD) is a deadly disorder, and patient survival depends on early diagnosis of HD. Early HD diagnosis and categorization play a key role in the analysis of clinical data. In the context of e-healthcare, we provide a novel feature selection with hybrid deep learning-based heart disease detection and classification (FSHDL-HDDC) model. The two primary preprocessing processes of the FSHDL-HDDC approach are data normalisation and the replacement of missing values. The FSHDL-HDDC method also necessitates the development of a feature selection method based on the elite opposition-based squirrel searchalgorithm (EO-SSA) in order to determine the optimal subset of features. Moreover, an attention-based convolutional neural network (ACNN) with long short-term memory (LSTM), called (ACNN-LSTM) model, is utilized for the detection of HD by using medical data. An extensive experimental study is performed to ensure the improved classification performance of the FSHDL-HDDC technique. A detailed comparison study reported the betterment of the FSHDL-HDDC method on existing techniques interms of different performance measures. The suggested system, the FSHDL-HDDC, has reached its maximum level of accuracy, which is 0.9772.}, } @article {pmid36210990, year = {2022}, author = {Chen, X and Huang, X}, title = {Application of Price Competition Model Based on Computational Neural Network in Risk Prediction of Transnational Investment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8906385}, pmid = {36210990}, issn = {1687-5273}, mesh = {Algorithms ; Commerce ; Industry ; *Investments ; *Neural Networks, Computer ; }, abstract = {Aiming at the scenario where edge devices rely on cloud servers for collaborative computing, this paper proposes an efficient edge-cloud collaborative reasoning method. In order to meet the application's specific requirements for delay or accuracy, an optimal division point selection algorithm is proposed. A kind of multichannel supply chain price game model is constructed, and nonlinear dynamics theory is introduced into the research of the multichannel supply chain market. According to the actual competition situation, the different business strategies of retailers are considered in the modeling, which makes the model closer to the actual competition situation. Taking the retailer's profit as an indicator, the influence of the chaos phenomenon on the market performance is analyzed. Compared with the previous studies, this thesis uses nonlinear theory to better reveal the operating laws of the economic system. This paper selects company A in the financial industry to acquire company B in Sweden. It is concluded that company B is currently facing financial difficulties, but its brand and technical advantages are far superior to company A. The indirect financial risk index of company B, that is, the investment environment, is analyzed, and the final investment environment score of the country where company B is located is 90 points, which is an excellent grade by scoring the investment environment of the target enterprise. Combining the investment environment score and the alarm situation prediction score, it is concluded that the postmerger financial risk warning level of company A is in serious alarm.}, } @article {pmid36207705, year = {2022}, author = {Zhao, Y and Rokhani, FZ and Sazlina, SG and Devaraj, NK and Su, J and Chew, BH}, title = {Defining the concepts of a smart nursing home and its potential technology utilities that integrate medical services and are acceptable to stakeholders: a scoping review.}, journal = {BMC geriatrics}, volume = {22}, number = {1}, pages = {787}, pmid = {36207705}, issn = {1471-2318}, mesh = {Aged ; Humans ; *Nursing Homes ; *Quality of Life ; Skilled Nursing Facilities ; Technology ; }, abstract = {BACKGROUND AND OBJECTIVES: Smart technology in nursing home settings has the potential to elevate an operation that manages more significant number of older residents. However, the concepts, definitions, and types of smart technology, integrated medical services, and stakeholders' acceptability of smart nursing homes are less clear. This scoping review aims to define a smart nursing home and examine the qualitative evidence on technological feasibility, integration of medical services, and acceptability of the stakeholders.

METHODS: Comprehensive searches were conducted on stakeholders' websites (Phase 1) and 11 electronic databases (Phase 2), for existing concepts of smart nursing home, on what and how technologies and medical services were implemented in nursing home settings, and acceptability assessment by the stakeholders. The publication year was inclusive from January 1999 to September 2021. The language was limited to English and Chinese. Included articles must report nursing home settings related to older adults ≥ 60 years old with or without medical demands but not bed-bound. Technology Readiness Levels were used to measure the readiness of new technologies and system designs. The analysis was guided by the Framework Method and the smart technology adoption behaviours of elder consumers theoretical model. The results were reported according to the PRISMA-ScR.

RESULTS: A total of 177 literature (13 website documents and 164 journal articles) were selected. Smart nursing homes are technology-assisted nursing homes that allow the life enjoyment of their residents. They used IoT, computing technologies, cloud computing, big data and AI, information management systems, and digital health to integrate medical services in monitoring abnormal events, assisting daily living, conducting teleconsultation, managing health information, and improving the interaction between providers and residents. Fifty-five percent of the new technologies were ready for use in nursing homes (levels 6-7), and the remaining were proven the technical feasibility (levels 1-5). Healthcare professionals with higher education, better tech-savviness, fewer years at work, and older adults with more severe illnesses were more acceptable to smart technologies.

CONCLUSIONS: Smart nursing homes with integrated medical services have great potential to improve the quality of care and ensure older residents' quality of life.}, } @article {pmid36206751, year = {2022}, author = {Chen, L and Yu, L and Liu, Y and Xu, H and Ma, L and Tian, P and Zhu, J and Wang, F and Yi, K and Xiao, H and Zhou, F and Yang, Y and Cheng, Y and Bai, L and Wang, F and Zhu, Y}, title = {Space-time-regulated imaging analyzer for smart coagulation diagnosis.}, journal = {Cell reports. Medicine}, volume = {3}, number = {10}, pages = {100765}, pmid = {36206751}, issn = {2666-3791}, mesh = {*Artificial Intelligence ; Prospective Studies ; *Blood Coagulation ; Blood Coagulation Factors ; Fibrinogen/analysis ; }, abstract = {The development of intelligent blood coagulation diagnoses is awaited to meet the current need for large clinical time-sensitive caseloads due to its efficient and automated diagnoses. Herein, a method is reported and validated to realize it through artificial intelligence (AI)-assisted optical clotting biophysics (OCB) properties identification. The image differential calculation is used for precise acquisition of OCB properties with elimination of initial differences, and the strategy of space-time regulation allows on-demand space time OCB properties identification and enables diverse blood function diagnoses. The integrated applications of smartphones and cloud computing offer a user-friendly automated analysis for accurate and convenient diagnoses. The prospective assays of clinical cases (n = 41) show that the system realizes 97.6%, 95.1%, and 100% accuracy for coagulation factors, fibrinogen function, and comprehensive blood coagulation diagnoses, respectively. This method should enable more low-cost and convenient diagnoses and provide a path for potential diagnostic-markers finding.}, } @article {pmid36206264, year = {2022}, author = {Fu, Z}, title = {Computer cyberspace security mechanism supported by cloud computing.}, journal = {PloS one}, volume = {17}, number = {10}, pages = {e0271546}, pmid = {36206264}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Computer Security ; Computers ; Internet ; *NAD ; }, abstract = {To improve the cybersecurity of Cloud Computing (CC) system. This paper proposes a Network Anomaly Detection (NAD) model based on the Fuzzy-C-Means (FCM) clustering algorithm. Secondly, the Cybersecurity Assessment Model (CAM) based on Grey Relational Grade (GRG) is creatively constructed. Finally, combined with Rivest Shamir Adleman (RSA) algorithm, this work proposes a CC network-oriented data encryption technology, selects different data sets for different models, and tests each model through design experiments. The results show that the average Correct Detection Rate (CDR) of the NAD model for different types of abnormal data is 93.33%. The average False Positive Rate (FPR) and the average Unreported Rate (UR) are 6.65% and 16.27%, respectively. Thus, the NAD model can ensure a high detection accuracy in the case of sufficient data. Meanwhile, the cybersecurity situation prediction by the CAM is in good agreement with the actual situation. The error between the average value of cybersecurity situation prediction and the actual value is only 0.82%, and the prediction accuracy is high. The RSA algorithm can control the average encryption time for very large text, about 12s. The decryption time is slightly longer but within a reasonable range. For different-size text, the encryption time is maintained within 0.5s. This work aims to provide important technical support for anomaly detection, overall security situation analysis, and data transmission security protection of CC systems to improve their cybersecurity.}, } @article {pmid36204298, year = {2022}, author = {Zhang, C and Cheng, T and Li, D and Yu, X and Chen, F and He, Q}, title = {Low-host double MDA workflow for uncultured ASFV positive blood and serum sample sequencing.}, journal = {Frontiers in veterinary science}, volume = {9}, number = {}, pages = {936781}, pmid = {36204298}, issn = {2297-1769}, abstract = {African swine fever (ASF) is a highly lethal and contagious disease caused by African swine fever virus (ASFV). Whole-genome sequencing of ASFV is necessary to study its mutation, recombination, and trace its transmission. Uncultured samples have a considerable amount of background DNA, which causes waste of sequencing throughput, storage space, and computing resources. Sequencing methods attempted for uncultured samples have various drawbacks. In this study, we improved C18 spacer MDA (Multiple Displacement Amplification)-combined host DNA exhaustion strategy to remove background DNA and fit NGS and TGS sequencing. Using this workflow, we successfully sequenced two uncultured ASFV positive samples. The results show that this method can significantly reduce the percentage of background DNA. We also developed software that can perform real-time base call and analyses in set intervals of ASFV TGS sequencing reads on a cloud server.}, } @article {pmid36197869, year = {2023}, author = {Guo, MH and Liu, ZN and Mu, TJ and Hu, SM}, title = {Beyond Self-Attention: External Attention Using Two Linear Layers for Visual Tasks.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {45}, number = {5}, pages = {5436-5447}, doi = {10.1109/TPAMI.2022.3211006}, pmid = {36197869}, issn = {1939-3539}, abstract = {Attention mechanisms, especially self-attention, have played an increasingly important role in deep feature representation for visual tasks. Self-attention updates the feature at each position by computing a weighted sum of features using pair-wise affinities across all positions to capture the long-range dependency within a single sample. However, self-attention has quadratic complexity and ignores potential correlation between different samples. This article proposes a novel attention mechanism which we call external attention, based on two external, small, learnable, shared memories, which can be implemented easily by simply using two cascaded linear layers and two normalization layers; it conveniently replaces self-attention in existing popular architectures. External attention has linear complexity and implicitly considers the correlations between all data samples. We further incorporate the multi-head mechanism into external attention to provide an all-MLP architecture, external attention MLP (EAMLP), for image classification. Extensive experiments on image classification, object detection, semantic segmentation, instance segmentation, image generation, and point cloud analysis reveal that our method provides results comparable or superior to the self-attention mechanism and some of its variants, with much lower computational and memory costs.}, } @article {pmid36194325, year = {2023}, author = {Zhou, Y and Hu, Z and Geng, Q and Ma, J and Liu, J and Wang, M and Wang, Y}, title = {Monitoring and analysis of desertification surrounding Qinghai Lake (China) using remote sensing big data.}, journal = {Environmental science and pollution research international}, volume = {30}, number = {7}, pages = {17420-17436}, pmid = {36194325}, issn = {1614-7499}, mesh = {Humans ; *Remote Sensing Technology ; *Conservation of Natural Resources/methods ; Lakes ; Big Data ; Environmental Monitoring/methods ; China ; }, abstract = {Desertification is one of the most serious ecological environmental problems in the world. Monitoring the spatiotemporal dynamics of desertification is crucial for its control. The region around Qinghai Lake, in the northeastern part of the Qinghai-Tibet Plateau in China, is a special ecological function area and a climate change sensitive area, making its environmental conditions a great concern. Using cloud computing via Google Earth Engine (GEE), we collected Landsat 5 TM, Landsat 8 OLI/TIRS, and MODIS Albedo images from 2000 to 2020 in the region around Qinghai Lake, acquired land surface albedo (Albedo), and normalized vegetation index (NDVI) to build a remote sensing monitoring model of desertification. Our results showed that the desertification difference index based on the Albedo-NDVI feature space could reflect the degree of desertification in the region around Qinghai Lake. GEE offers significant advantages, such as massive data processing and long-term dynamic monitoring. The desertification land area fluctuated downward in the study area from 2000 to 2020, and the overall desertification status improved. Natural factors, such as climate change from warm-dry to warm-wet and decreased wind speed, and human factors improved the desertification situation. The findings indicate that desertification in the region around Qinghai Lake has been effectively controlled, and the overall desertification trend is improving.}, } @article {pmid36190152, year = {2022}, author = {Greene, D}, title = {Landlords of the internet: Big data and big real estate.}, journal = {Social studies of science}, volume = {52}, number = {6}, pages = {904-927}, doi = {10.1177/03063127221124943}, pmid = {36190152}, issn = {1460-3659}, mesh = {Humans ; *Big Data ; *Internet ; Commerce ; Technology ; }, abstract = {Who owns the internet? It depends where you look. The physical assets at the core of the internet, the warehouses that store the cloud's data and interlink global networks, are owned not by technology firms like Google and Facebook but by commercial real estate barons who compete with malls and property storage empires. Granted an empire by the US at the moment of the internet's commercialization, these internet landlords shaped how the network of networks that we call the internet physically connects, and how personal and business data is stored and transmitted. Under their governance, internet exchanges, colocation facilities, and data centers take on a double life as financialized real estate assets that circle the globe even as their servers and cables are firmly rooted in place. The history of internet landlords forces a fundamental reconsideration of the business model at the base of the internet. This history makes clear that the internet was never an exogenous shock to capitalist social relations, but rather a touchstone example of an economic system increasingly ruled by asset owners like landlords.}, } @article {pmid36188195, year = {2022}, author = {Zhou, Y and Varzaneh, MG}, title = {Efficient and scalable patients clustering based on medical big data in cloud platform.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {49}, pmid = {36188195}, issn = {2192-113X}, abstract = {With the outbreak and popularity of COVID-19 pandemic worldwide, the volume of patients is increasing rapidly all over the world, which brings a big risk and challenge for the maintenance of public healthcare. In this situation, quick integration and analysis of the medical records of patients in a cloud platform are of positive and valuable significance for accurate recognition and scientific diagnosis of the healthy conditions of potential patients. However, due to the big volume of medical data of patients distributed in different platforms (e.g., multiple hospitals), how to integrate these data for patient clustering and analysis in a time-efficient and scalable manner in cloud platform is still a challenging task, while guaranteeing the capability of privacy-preservation. Motivated by this fact, a time-efficient, scalable and privacy-guaranteed patient clustering method in cloud platform is proposed in this work. At last, we demonstrate the competitive advantages of our method via a set of simulated experiments. Experiment results with competitive methods in current research literatures have proved the feasibility of our proposal.}, } @article {pmid36185458, year = {2022}, author = {Moser, N and Yu, LS and Rodriguez Manzano, J and Malpartida-Cardenas, K and Au, A and Arkell, P and Cicatiello, C and Moniri, A and Miglietta, L and Wang, WH and Wang, SF and Holmes, A and Chen, YH and Georgiou, P}, title = {Quantitative detection of dengue serotypes using a smartphone-connected handheld lab-on-chip platform.}, journal = {Frontiers in bioengineering and biotechnology}, volume = {10}, number = {}, pages = {892853}, pmid = {36185458}, issn = {2296-4185}, abstract = {Dengue is one of the most prevalent infectious diseases in the world. Rapid, accurate and scalable diagnostics are key to patient management and epidemiological surveillance of the dengue virus (DENV), however current technologies do not match required clinical sensitivity and specificity or rely on large laboratory equipment. In this work, we report the translation of our smartphone-connected handheld Lab-on-Chip (LoC) platform for the quantitative detection of two dengue serotypes. At its core, the approach relies on the combination of Complementary Metal-Oxide-Semiconductor (CMOS) microchip technology to integrate an array of 78 × 56 potentiometric sensors, and a label-free reverse-transcriptase loop mediated isothermal amplification (RT-LAMP) assay. The platform communicates to a smartphone app which synchronises results in real time with a secure cloud server hosted by Amazon Web Services (AWS) for epidemiological surveillance. The assay on our LoC platform (RT-eLAMP) was shown to match performance on a gold-standard fluorescence-based real-time instrument (RT-qLAMP) with synthetic DENV-1 and DENV-2 RNA and extracted RNA from 9 DENV-2 clinical isolates, achieving quantitative detection in under 15 min. To validate the portability of the platform and the geo-tagging capabilities, we led our study in the laboratories at Imperial College London, UK, and Kaohsiung Medical Hospital, Taiwan. This approach carries high potential for application in low resource settings at the point of care (PoC).}, } @article {pmid36179156, year = {2022}, author = {Sun, J and Endo, S and Lin, H and Hayden, P and Vedral, V and Yuan, X}, title = {Perturbative Quantum Simulation.}, journal = {Physical review letters}, volume = {129}, number = {12}, pages = {120505}, doi = {10.1103/PhysRevLett.129.120505}, pmid = {36179156}, issn = {1079-7114}, abstract = {Approximation based on perturbation theory is the foundation for most of the quantitative predictions of quantum mechanics, whether in quantum many-body physics, chemistry, quantum field theory, or other domains. Quantum computing provides an alternative to the perturbation paradigm, yet state-of-the-art quantum processors with tens of noisy qubits are of limited practical utility. Here, we introduce perturbative quantum simulation, which combines the complementary strengths of the two approaches, enabling the solution of large practical quantum problems using limited noisy intermediate-scale quantum hardware. The use of a quantum processor eliminates the need to identify a solvable unperturbed Hamiltonian, while the introduction of perturbative coupling permits the quantum processor to simulate systems larger than the available number of physical qubits. We present an explicit perturbative expansion that mimics the Dyson series expansion and involves only local unitary operations, and show its optimality over other expansions under certain conditions. We numerically benchmark the method for interacting bosons, fermions, and quantum spins in different topologies, and study different physical phenomena, such as information propagation, charge-spin separation, and magnetism, on systems of up to 48 qubits only using an 8+1 qubit quantum hardware. We demonstrate our scheme on the IBM quantum cloud, verifying its noise robustness and illustrating its potential for benchmarking large quantum processors with smaller ones.}, } @article {pmid36174081, year = {2022}, author = {Mul, E and Ancin Murguzur, FJ and Hausner, VH}, title = {Impact of the COVID-19 pandemic on human-nature relations in a remote nature-based tourism destination.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0273354}, pmid = {36174081}, issn = {1932-6203}, mesh = {*COVID-19/epidemiology ; Human Characteristics ; Humans ; Pandemics ; *Tourism ; Travel ; }, abstract = {Tourism and nature-based recreation has changed dramatically during the COVID-19 pandemic. Travel restrictions caused sharp declines in visitation numbers, particularly in remote areas, such as northern Norway. In addition, the pandemic may have altered human-nature relationships by changing visitor behaviour and preferences. We studied visitor numbers and behaviour in northern Norway, based on user-generated data, in the form of photographic material that was uploaded to the popular online platform Flickr. A total of 195.200 photographs, taken by 5.247 photographers were subjected to Google's "Cloud Vision" automatic content analysis algorithm. The resulting collection of labels that were assigned to each photograph was analysed in structural topic models, using photography date (relative to the start of the pandemic measures in Norway) and reported or estimated photographers' nationality as explanatory variables. Our results show that nature-based recreation relating to "mountains" and "winter" became more prevalent during the pandemic, amongst both domestic and international photographers. Shifts in preferences due to the pandemic outbreak strongly depended on nationality, with domestic visitors demonstrating a wide interest in topics while international visitors maintained their preference for nature-based experiences. Among those activities that suffered the most from decline in international tourism was northern lights and cruises as indicated by the topic models. On the other hand, images depicting mountains and flora and fauna increased their prevalence during the pandemic. Domestic visitors, on the other hand, spent more time in urban settings as a result of restrictions, which results in a higher prevalence of non-nature related images. Our results underscore the need to consider the dynamic nature of human-nature relationships. The contrast in flexibility to adapt to changing conditions and travel restrictions should be incorporated in collaborative efforts of municipalities and tour operators to develop sustainable local nature-based tourism products, particularly in remote areas.}, } @article {pmid36172315, year = {2022}, author = {Jiang, Y and Lei, Y}, title = {Implementation of Trusted Traceability Query Using Blockchain and Deep Reinforcement Learning in Resource Management.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6559517}, pmid = {36172315}, issn = {1687-5273}, mesh = {Algorithms ; *Blockchain ; Cloud Computing ; Technology ; }, abstract = {To better track the source of goods and maintain the quality of goods, the present work uses blockchain technology to establish a system for trusted traceability queries and information management. Primarily, the analysis is made on the shortcomings of the traceability system in the field of agricultural products at the present stage; the study is conducted on the application of the traceability system to blockchain technology, and a new model of agricultural product traceability system is established based on the blockchain technology. Then, a study is carried out on the task scheduling problem of resource clusters in cloud computing resource management. The present work expands the task model and uses the deep Q network algorithm in deep reinforcement learning to solve various optimization objectives preset in the task scheduling problem. Next, a resource management algorithm based on a deep Q network is proposed. Finally, the performance of the algorithm is analyzed from the aspects of parameters, structure, and task load. Experiments show that the algorithm is better than Shortest Job First (SJF), Tetris [∗] , Packer, and other classic task scheduling algorithms in different optimization objectives. In the traceability system test, the traceability accuracy is 99% for the constructed system in the first group of samples. In the second group, the traceability accuracy reaches 98% for the constructed system. In general, the traceability accuracy of the system proposed here is above 98% in 8 groups of experimental samples, and the traceability accuracy is close for each experimental group. The resource management approach of the traceability system constructed here provides some ideas for the application of reinforcement learning technology in the construction of traceability systems.}, } @article {pmid36171329, year = {2022}, author = {Wolf, K and Dawson, RJ and Mills, JP and Blythe, P and Morley, J}, title = {Towards a digital twin for supporting multi-agency incident management in a smart city.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {16221}, pmid = {36171329}, issn = {2045-2322}, mesh = {*Ambulances ; Cities ; Cloud Computing ; *Emergency Medical Services ; Floods ; }, abstract = {Cost-effective on-demand computing resources can help to process the increasing number of large, diverse datasets generated from smart internet-enabled technology, such as sensors, CCTV cameras, and mobile devices, with high temporal resolution. Category 1 emergency services (Ambulance, Fire and Rescue, and Police) can benefit from access to (near) real-time traffic- and weather data to coordinate multiple services, such as reassessing a route on the transport network affected by flooding or road incidents. However, there is a tendency not to utilise available smart city data sources, due to the heterogeneous data landscape, lack of real-time information, and communication inefficiencies. Using a systems engineering approach, we identify the current challenges faced by stakeholders involved in incident response and formulate future requirements for an improved system. Based on these initial findings, we develop a use case using Microsoft Azure cloud computing technology for analytical functionalities that can better support stakeholders in their response to an incident. Our prototype allows stakeholders to view available resources, send automatic updates and integrate location-based real-time weather and traffic data. We anticipate our study will provide a foundation for the future design of a data ontology for multi-agency incident response in smart cities of the future.}, } @article {pmid36164525, year = {2022}, author = {Roy, B and Bari, E}, title = {Examining the relationship between land surface temperature and landscape features using spectral indices with Google Earth Engine.}, journal = {Heliyon}, volume = {8}, number = {9}, pages = {e10668}, pmid = {36164525}, issn = {2405-8440}, abstract = {Land surface temperature (LST) is strongly influenced by landscape features as they change the thermal characteristics of the surface greatly. Normalized Difference Vegetation Index (NDVI), Normalized Difference Water Index (NDWI), Normalized Difference Built-up Index (NDBI), and Normalized Difference Bareness Index (NDBAI) correspond to vegetation cover, water bodies, impervious build-ups, and bare lands, respectively. These indices were utilized to demonstrate the relationship between multiple landscape features and LST using the spectral indices derived from images of Landsat 5 Thematic Mapper (TM), and Landsat 8 Operational Land Imager (OLI) of Sylhet Sadar Upazila (2000-2018). Google Earth Engine (GEE) cloud computing platform was used to filter, process, and analyze trends with logistic regression. LST and other spectral indices were calculated. Changes in LST (2000-2018) range from -6 °C to +4 °C in the study area. Because of higher vegetation cover and reserve forest, the north-eastern part of the study region had the greatest variations in LST. The spectral indices corresponding to landscape features have a considerable explanatory capacity for describing LST scenarios. The correlation of these indices with LST ranges from -0.52 (NDBI) to +0.57 (NDVI).}, } @article {pmid36161827, year = {2022}, author = {Huemer, J and Kronschläger, M and Ruiss, M and Sim, D and Keane, PA and Findl, O and Wagner, SK}, title = {Diagnostic accuracy of code-free deep learning for detection and evaluation of posterior capsule opacification.}, journal = {BMJ open ophthalmology}, volume = {7}, number = {1}, pages = {}, pmid = {36161827}, issn = {2397-3269}, support = {MR/T000953/1/MRC_/Medical Research Council/United Kingdom ; MR/T019050/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Area Under Curve ; *Capsule Opacification/diagnosis ; *Deep Learning ; Humans ; Retrospective Studies ; Vision Disorders ; }, abstract = {OBJECTIVE: To train and validate a code-free deep learning system (CFDLS) on classifying high-resolution digital retroillumination images of posterior capsule opacification (PCO) and to discriminate between clinically significant and non-significant PCOs.

METHODS AND ANALYSIS: For this retrospective registry study, three expert observers graded two independent datasets of 279 images three separate times with no PCO to severe PCO, providing binary labels for clinical significance. The CFDLS was trained and internally validated using 179 images of a training dataset and externally validated with 100 images. Model development was through Google Cloud AutoML Vision. Intraobserver and interobserver variabilities were assessed using Fleiss kappa (κ) coefficients and model performance through sensitivity, specificity and area under the curve (AUC).

RESULTS: Intraobserver variability κ values for observers 1, 2 and 3 were 0.90 (95% CI 0.86 to 0.95), 0.94 (95% CI 0.90 to 0.97) and 0.88 (95% CI 0.82 to 0.93). Interobserver agreement was high, ranging from 0.85 (95% CI 0.79 to 0.90) between observers 1 and 2 to 0.90 (95% CI 0.85 to 0.94) for observers 1 and 3. On internal validation, the AUC of the CFDLS was 0.99 (95% CI 0.92 to 1.0); sensitivity was 0.89 at a specificity of 1. On external validation, the AUC was 0.97 (95% CI 0.93 to 0.99); sensitivity was 0.84 and specificity was 0.92.

CONCLUSION: This CFDLS provides highly accurate discrimination between clinically significant and non-significant PCO equivalent to human expert graders. The clinical value as a potential decision support tool in different models of care warrants further research.}, } @article {pmid36160943, year = {2022}, author = {Sulis, E and Amantea, IA and Aldinucci, M and Boella, G and Marinello, R and Grosso, M and Platter, P and Ambrosini, S}, title = {An ambient assisted living architecture for hospital at home coupled with a process-oriented perspective.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {}, number = {}, pages = {1-19}, pmid = {36160943}, issn = {1868-5137}, abstract = {The growing number of next-generation applications offers a relevant opportunity for healthcare services, generating an urgent need for architectures for systems integration. Moreover, the huge amount of stored information related to events can be explored by adopting a process-oriented perspective. This paper discusses an Ambient Assisted Living healthcare architecture to manage hospital home-care services. The proposed solution relies on adopting an event manager to integrate sources ranging from personal devices to web-based applications. Data are processed on a federated cloud platform offering computing infrastructure and storage resources to improve scientific research. In a second step, a business process analysis of telehealth and telemedicine applications is considered. An initial study explored the business process flow to capture the main sequences of tasks, activities, events. This step paves the way for the integration of process mining techniques to compliance monitoring in an AAL architecture framework.}, } @article {pmid36157083, year = {2023}, author = {Ahmad, I and Abdullah, S and Ahmed, A}, title = {IoT-fog-based healthcare 4.0 system using blockchain technology.}, journal = {The Journal of supercomputing}, volume = {79}, number = {4}, pages = {3999-4020}, pmid = {36157083}, issn = {0920-8542}, abstract = {Real-time tracking and surveillance of patients' health has become ubiquitous in the healthcare sector as a result of the development of fog, cloud computing, and Internet of Things (IoT) technologies. Medical IoT (MIoT) equipment often transfers health data to a pharmaceutical data center, where it is saved, evaluated, and made available to relevant stakeholders or users. Fog layers have been utilized to increase the scalability and flexibility of IoT-based healthcare services, by providing quick response times and low latency. Our proposed solution focuses on an electronic healthcare system that manages both critical and non-critical patients simultaneously. Fog layer is distributed into two halves: critical fog cluster and non-critical fog cluster. Critical patients are handled at critical fog clusters for quick response, while non-critical patients are handled using blockchain technology at non-critical fog cluster, which protects the privacy of patient health records. The suggested solution requires little modification to the current IoT ecosystem while decrease the response time for critical messages and offloading the cloud infrastructure. Reduced storage requirements for cloud data centers benefit users in addition to saving money on construction and operating expenses. In addition, we examined the proposed work for recall, accuracy, precision, and F-score. The results show that the suggested approach is successful in protecting privacy while retaining standard network settings. Moreover, suggested system and benchmark are evaluated in terms of system response time, drop rate, throughput, fog, and cloud utilization. Evaluated results clearly indicate the performance of proposed system is better than benchmark.}, } @article {pmid36156947, year = {2022}, author = {Yue, Q}, title = {Dynamic Database Design of Sports Quality Based on Genetic Data Algorithm and Artificial Intelligence.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7473109}, pmid = {36156947}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Exercise ; Humans ; *Sports ; Students ; Surveys and Questionnaires ; }, abstract = {According to the traditional data mining method, it is no longer applicable to obtain knowledge from the database, and the knowledge mined in the past must be constantly updated. In the last few years, Internet technology and cloud computing technology have emerged. The emergence of these two technologies has brought about Earth-shaking changes in certain industries. In order to efficiently retrieve and count a large amount of data at a lower cost, big data technology is proposed. Big data technology has played an important role for data with various types, huge quantities, and extremely fast changing speeds. However, big data technology still has some limitations, and researchers still cannot obtain the value of data in a short period of time with low cost and high efficiency. The sports database constructed in this paper can effectively carry out statistics and analysis on the data of sports learning. In the prototype system, log files can be mined, classified, and preprocessed. For the incremental data obtained by preprocessing, incremental data mining can be performed, a classification model can be established, and the database can be updated to provide users with personalized services. Through the method of data survey, the author studied the students' exercise status, and the feedback data show that college students lack the awareness of physical exercise and have no fitness habit. It is necessary to accelerate the reform of college sports and cultivate students' good sports awareness.}, } @article {pmid36156946, year = {2022}, author = {Zhu, J}, title = {The Usage of Designing the Urban Sculpture Scene Based on Edge Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9346771}, pmid = {36156946}, issn = {1687-5273}, mesh = {*Algorithms ; *Computers ; Electrocardiography ; Humans ; }, abstract = {To not only achieve the goal of urban cultural construction but also save the cost of urban sculpture space design, EC (edge computing) is combined with urban sculpture space design and planning first. Then it briefly discusses the service category, system architecture, advantages, and characteristics of urban sculpture, as well as the key points and difficulties of its construction, and the layered architecture of EC for urban sculpture spaces is proposed. Secondly, the cloud edge combination technology is adopted, and the urban sculpture is used as a specific function of the edge system node to conduct an in-depth analysis to build an urban sculpture safety supervision system architecture platform. Finally, the actual energy required for implementation is predicted and evaluated, the specific monitoring system coverage is set up, and some equations are made for calculating the energy consumption of the monitored machines according to the number of devices and route planning required by the urban sculpture safety supervision system. An optimization algorithm for energy consumption is proposed based on reinforcement learning and compared with the three control groups. The results show that when the seven monitoring devices cover detection points less than 800, the required energy consumption increases linearly. When the detection devices cover more than 800 detection points, the required energy consumption is stable and varies from 10000 to 12000; that is, when the number of monitoring devices is 7, the optimal number of monitoring points is about 800. When the number of detection points is fixed, increasing the number of monitoring devices in a small range can reduce the total energy consumption. The optimization algorithm based on the reinforcement learning proposal can obtain an approximate optimal solution. The research results show that the combination of edge computing and urban sculpture can expand the function of urban sculpture and make it serve people better.}, } @article {pmid36156942, year = {2022}, author = {Zheng, M and Liu, B and Sun, L}, title = {LawRec: Automatic Recommendation of Legal Provisions Based on Legal Text Analysis.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6313161}, pmid = {36156942}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Humans ; *Neural Networks, Computer ; Technology ; }, abstract = {Smart court technologies are making full use of modern science to promote the modernization of the trial system and trial capabilities, for example, artificial intelligence, Internet of things, and cloud computing. The smart court technologies can improve the efficiency of case handling and achieving convenience for the people. Article recommendation is an important part of intelligent trial. For ordinary people without legal background, the traditional information retrieval system that searches laws and regulations based on keywords is not applicable because they do not have the ability to extract professional legal vocabulary from complex case processes. This paper proposes a law recommendation framework, called LawRec, based on Bidirectional Encoder Representation from Transformers (BERT) and Skip-Recurrent Neural Network (Skip-RNN) models. It intends to integrate the knowledge of legal provisions with the case description and uses the BERT model to learn the case description text and legal knowledge, respectively. At last, laws and regulations for cases can be recommended. Experiment results show that the proposed LawRec can achieve better performance than state-of-the-art methods.}, } @article {pmid36153857, year = {2022}, author = {Park, JY and Lee, K and Chung, DR}, title = {Public interest in the digital transformation accelerated by the COVID-19 pandemic and perception of its future impact.}, journal = {The Korean journal of internal medicine}, volume = {37}, number = {6}, pages = {1223-1233}, pmid = {36153857}, issn = {2005-6648}, mesh = {Humans ; Pandemics ; *COVID-19/epidemiology ; Artificial Intelligence ; *Virtual Reality ; Perception ; }, abstract = {BACKGROUND/AIMS: The coronavirus disease 2019 (COVID-19) pandemic has accelerated digital transformation (DT). We investigated the trend of the public interest in technologies regarding the DT and Koreans' experiences and their perceptions of the future impact of these technologies.

METHODS: Using Google Trends, the relative search volume (RSV) for topics including "coronavirus," "artificial intelligence," "cloud," "big data," and "metaverse" were retrieved for the period from January 2020 to January 2022. A survey was conducted to assess the population's knowledge, experience, and perceptions regarding the DT.

RESULTS: The RSV for "metaverse" showed an increasing trend, in contrast to those for "cloud," "big data," and "coronavirus." The RSVs for DT-related keywords had a negative correlation with the number of new weekly COVID-19 cases. In our survey, 78.1% responded that the positive impact of the DT on future lives would outweigh the negative impact. The predictors for this positive perception included experiences with the metaverse (4.0-fold) and virtual reality (VR)/augmented reality (AR) education (3.8-fold). Respondents predicted that the biggest change would occur in the healthcare sector after transportation/ communication.

CONCLUSION: Koreans' search interest for "metaverse" showed an increasing trend during the COVID-19 pandemic. Koreans believe that DT will bring about big changes in the healthcare sector. Most of the survey respondents have a positive outlook about the impact of DT on future life, and the predictors for this positive perception include the experiences with the metaverse or VR/AR education. Healthcare professionals need to accelerate the adoption of DT in clinical practice, education and training.}, } @article {pmid36151775, year = {2022}, author = {Zhao, XG and Cao, H}, title = {Linking research of biomedical datasets.}, journal = {Briefings in bioinformatics}, volume = {23}, number = {6}, pages = {}, doi = {10.1093/bib/bbac373}, pmid = {36151775}, issn = {1477-4054}, support = {2018YFD0901103//Key Research and Development Program of the Ministry of Science and Technology/ ; }, mesh = {Humans ; *Ecosystem ; *Algorithms ; Knowledge ; }, abstract = {Biomedical data preprocessing and efficient computing can be as important as the statistical methods used to fit the data; data processing needs to consider application scenarios, data acquisition and individual rights and interests. We review common principles, knowledge and methods of integrated research according to the whole-pipeline processing mechanism diverse, coherent, sharing, auditable and ecological. First, neuromorphic and native algorithms integrate diverse datasets, providing linear scalability and high visualization. Second, the choice mechanism of different preprocessing, analysis and transaction methods from raw to neuromorphic was summarized on the node and coordinator platforms. Third, combination of node, network, cloud, edge, swarm and graph builds an ecosystem of cohort integrated research and clinical diagnosis and treatment. Looking forward, it is vital to simultaneously combine deep computing, mass data storage and massively parallel communication.}, } @article {pmid36146408, year = {2022}, author = {Jeong, Y and Kim, T}, title = {A Cluster-Driven Adaptive Training Approach for Federated Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146408}, issn = {1424-8220}, mesh = {*Algorithms ; Humans ; *Learning ; Machine Learning ; }, abstract = {Federated learning (FL) is a promising collaborative learning approach in edge computing, reducing communication costs and addressing the data privacy concerns of traditional cloud-based training. Owing to this, diverse studies have been conducted to distribute FL into industry. However, there still remain the practical issues of FL to be solved (e.g., handling non-IID data and stragglers) for an actual implementation of FL. To address these issues, in this paper, we propose a cluster-driven adaptive training approach (CATA-Fed) to enhance the performance of FL training in a practical environment. CATA-Fed employs adaptive training during the local model updates to enhance the efficiency of training, reducing the waste of time and resources due to the presence of the stragglers and also provides a straggler mitigating scheme, which can reduce the workload of straggling clients. In addition to this, CATA-Fed clusters the clients considering the data size and selects the training participants within a cluster to reduce the magnitude differences of local gradients collected in the global model update under a statistical heterogeneous condition (e.g., non-IID data). During this client selection process, a proportional fair scheduling is employed for securing the data diversity as well as balancing the load of clients. We conduct extensive experiments using three benchmark datasets (MNIST, Fashion-MNIST, and CIFAR-10), and the results show that CATA-Fed outperforms the previous FL schemes (FedAVG, FedProx, and TiFL) with regard to the training speed and test accuracy under the diverse FL conditions.}, } @article {pmid36146382, year = {2022}, author = {Caro-Via, S and Vidaña-Vila, E and Ginovart-Panisello, GJ and Martínez-Suquía, C and Freixes, M and Alsina-Pagès, RM}, title = {Edge-Computing Meshed Wireless Acoustic Sensor Network for Indoor Sound Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146382}, issn = {1424-8220}, mesh = {*Acoustics ; Humans ; Monitoring, Physiologic ; }, abstract = {This work presents the design of a wireless acoustic sensor network (WASN) that monitors indoor spaces. The proposed network would enable the acquisition of valuable information on the behavior of the inhabitants of the space. This WASN has been conceived to work in any type of indoor environment, including houses, hospitals, universities or even libraries, where the tracking of people can give relevant insight, with a focus on ambient assisted living environments. The proposed WASN has several priorities and differences compared to the literature: (i) presenting a low-cost flexible sensor able to monitor wide indoor areas; (ii) balance between acoustic quality and microphone cost; and (iii) good communication between nodes to increase the connectivity coverage. A potential application of the proposed network could be the generation of a sound map of a certain location (house, university, offices, etc.) or, in the future, the acoustic detection of events, giving information about the behavior of the inhabitants of the place under study. Each node of the network comprises an omnidirectional microphone and a computation unit, which processes acoustic information locally following the edge-computing paradigm to avoid sending raw data to a cloud server, mainly for privacy and connectivity purposes. Moreover, this work explores the placement of acoustic sensors in a real scenario, following acoustic coverage criteria. The proposed network aims to encourage the use of real-time non-invasive devices to obtain behavioral and environmental information, in order to take decisions in real-time with the minimum intrusiveness in the location under study.}, } @article {pmid36146368, year = {2022}, author = {Barron, A and Sanchez-Gallegos, DD and Carrizales-Espinoza, D and Gonzalez-Compean, JL and Morales-Sandoval, M}, title = {On the Efficient Delivery and Storage of IoT Data in Edge-Fog-Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146368}, issn = {1424-8220}, support = {41756 PRONACES-CONACYT//Consejo Nacional de Ciencia y Tecnología/ ; }, mesh = {*Cloud Computing ; *Computer Communication Networks ; Electrocardiography ; Internet ; }, abstract = {Cloud storage has become a keystone for organizations to manage large volumes of data produced by sensors at the edge as well as information produced by deep and machine learning applications. Nevertheless, the latency produced by geographic distributed systems deployed on any of the edge, the fog, or the cloud, leads to delays that are observed by end-users in the form of high response times. In this paper, we present an efficient scheme for the management and storage of Internet of Thing (IoT) data in edge-fog-cloud environments. In our proposal, entities called data containers are coupled, in a logical manner, with nano/microservices deployed on any of the edge, the fog, or the cloud. The data containers implement a hierarchical cache file system including storage levels such as in-memory, file system, and cloud services for transparently managing the input/output data operations produced by nano/microservices (e.g., a sensor hub collecting data from sensors at the edge or machine learning applications processing data at the edge). Data containers are interconnected through a secure and efficient content delivery network, which transparently and automatically performs the continuous delivery of data through the edge-fog-cloud. A prototype of our proposed scheme was implemented and evaluated in a case study based on the management of electrocardiogram sensor data. The obtained results reveal the suitability and efficiency of the proposed scheme.}, } @article {pmid36146364, year = {2022}, author = {Alvear-Puertas, VE and Burbano-Prado, YA and Rosero-Montalvo, PD and Tözün, P and Marcillo, F and Hernandez, W}, title = {Smart and Portable Air-Quality Monitoring IoT Low-Cost Devices in Ibarra City, Ecuador.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146364}, issn = {1424-8220}, support = {Research project: IEA.WHP.21.02//Universidad de Las Américas/ ; Research project: CEPRA XII-2018-13//Corporacion Ecuatoriana para el Desarrollo de la Investigacion y la Academia/ ; award ref: NNF20OC0064411//Novo Nordisk Fonden/ ; }, mesh = {*Air Pollution/analysis ; Ecuador ; Environmental Monitoring/methods ; Gases/analysis ; *Internet of Things ; }, abstract = {Nowadays, increasing air-pollution levels are a public health concern that affects all living beings, with the most polluting gases being present in urban environments. For this reason, this research presents portable Internet of Things (IoT) environmental monitoring devices that can be installed in vehicles and that send message queuing telemetry transport (MQTT) messages to a server, with a time series database allocated in edge computing. The visualization stage is performed in cloud computing to determine the city air-pollution concentration using three different labels: low, normal, and high. To determine the environmental conditions in Ibarra, Ecuador, a data analysis scheme is used with outlier detection and supervised classification stages. In terms of relevant results, the performance percentage of the IoT nodes used to infer air quality was greater than 90%. In addition, the memory consumption was 14 Kbytes in a flash and 3 Kbytes in a RAM, reducing the power consumption and bandwidth needed in traditional air-pollution measuring stations.}, } @article {pmid36146329, year = {2022}, author = {Maruta, K and Nishiuchi, H and Nakazato, J and Tran, GK and Sakaguchi, K}, title = {5G/B5G mmWave Cellular Networks with MEC Prefetching Based on User Context Information.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146329}, issn = {1424-8220}, support = {723171//European Commission/ ; 0159-0048//Ministry of Internal Affairs and Communications/ ; 0155-0062//Ministry of Internal Affairs and Communications/ ; 00101//National Institute of Information and Communications Technology/ ; }, abstract = {To deal with recent increasing mobile traffic, ultra-broadband communication with millimeter-wave (mmWave) has been regarded as a key technology for 5G cellular networks. In a previous study, a mmWave heterogeneous network was composed of several mmWave small cells overlaid on the coverage of a macro cell. However, as seen from the optical fiber penetration rate worldwide, it is difficult to say that backhaul with Gbps order is available everywhere. In the case of using mmWave access under a limited backhaul capacity, it becomes a bottleneck at the backhaul; thus, mmWave access cannot fully demonstrate its potential. On the other hand, the concept of multi-access edge computing (MEC) has been proposed to decrease the response latency compared to cloud computing by deploying storage and computation resources to the user side of mobile networks. This paper introduces MEC into mmWave heterogeneous networks and proposes a content prefetching algorithm to resolve such backhaul issues. Context information, such as the destination, mobility, and traffic tendency, is shared through the macro cell to the prefetch application and data that the users request. Prefetched data is stored in the MEC and then transmitted via mmWave without a backhaul bottleneck. The effectiveness is verified through computer simulations where we implement realistic user mobility as well as traffic and backhauling models. The results show that the proposed framework achieved 95% system capacity even under the constraint of a 1 Gbps backhaul link.}, } @article {pmid36146134, year = {2022}, author = {Alghamdi, A and Zhu, J and Yin, G and Shorfuzzaman, M and Alsufyani, N and Alyami, S and Biswas, S}, title = {Blockchain Empowered Federated Learning Ecosystem for Securing Consumer IoT Features Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146134}, issn = {1424-8220}, mesh = {*Blockchain ; Computer Security ; Ecosystem ; *Internet of Things ; Privacy ; }, abstract = {Resource constraint Consumer Internet of Things (CIoT) is controlled through gateway devices (e.g., smartphones, computers, etc.) that are connected to Mobile Edge Computing (MEC) servers or cloud regulated by a third party. Recently Machine Learning (ML) has been widely used in automation, consumer behavior analysis, device quality upgradation, etc. Typical ML predicts by analyzing customers' raw data in a centralized system which raises the security and privacy issues such as data leakage, privacy violation, single point of failure, etc. To overcome the problems, Federated Learning (FL) developed an initial solution to ensure services without sharing personal data. In FL, a centralized aggregator collaborates and makes an average for a global model used for the next round of training. However, the centralized aggregator raised the same issues, such as a single point of control leaking the updated model and interrupting the entire process. Additionally, research claims data can be retrieved from model parameters. Beyond that, since the Gateway (GW) device has full access to the raw data, it can also threaten the entire ecosystem. This research contributes a blockchain-controlled, edge intelligence federated learning framework for a distributed learning platform for CIoT. The federated learning platform allows collaborative learning with users' shared data, and the blockchain network replaces the centralized aggregator and ensures secure participation of gateway devices in the ecosystem. Furthermore, blockchain is trustless, immutable, and anonymous, encouraging CIoT end users to participate. We evaluated the framework and federated learning outcomes using the well-known Stanford Cars dataset. Experimental results prove the effectiveness of the proposed framework.}, } @article {pmid36146113, year = {2022}, author = {Liu, X and Zhao, X and Liu, G and Huang, F and Huang, T and Wu, Y}, title = {Collaborative Task Offloading and Service Caching Strategy for Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146113}, issn = {1424-8220}, support = {5700-202141454A-0-0-00//the 2021 State Grid Corporation of China Science and Technology Program/ ; }, mesh = {*Algorithms ; Computer Simulation ; }, abstract = {Mobile edge computing (MEC), which sinks the functions of cloud servers, has become an emerging paradigm to solve the contradiction between delay-sensitive tasks and resource-constrained terminals. Task offloading assisted by service caching in a collaborative manner can reduce delay and balance the edge load in MEC. Due to the limited storage resources of edge servers, it is a significant issue to develop a dynamical service caching strategy according to the actual variable user demands in task offloading. Therefore, this paper investigates the collaborative task offloading problem assisted by a dynamical caching strategy in MEC. Furthermore, a two-level computing strategy called joint task offloading and service caching (JTOSC) is proposed to solve the optimized problem. The outer layer in JTOSC iteratively updates the service caching decisions based on the Gibbs sampling. The inner layer in JTOSC adopts the fairness-aware allocation algorithm and the offloading revenue preference-based bilateral matching algorithm to get a great computing resource allocation and task offloading scheme. The simulation results indicate that the proposed strategy outperforms the other four comparison strategies in terms of maximum offloading delay, service cache hit rate, and edge load balance.}, } @article {pmid36146069, year = {2022}, author = {Li, D and Mao, Y and Chen, X and Li, J and Liu, S}, title = {Deployment and Allocation Strategy for MEC Nodes in Complex Multi-Terminal Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {18}, pages = {}, pmid = {36146069}, issn = {1424-8220}, support = {2018YFB2100100//Yunnan Power Gird Co./ ; }, abstract = {Mobile edge computing (MEC) has become an effective solution for insufficient computing and communication problems for the Internet of Things (IoT) applications due to its rich computing resources on the edge side. In multi-terminal scenarios, the deployment scheme of edge nodes has an important impact on system performance and has become an essential issue in end-edge-cloud architecture. In this article, we consider specific factors, such as spatial location, power supply, and urgency requirements of terminals, with respect to building an evaluation model to solve the allocation problem. An evaluation model based on reward, energy consumption, and cost factors is proposed. The genetic algorithm is applied to determine the optimal edge node deployment and allocation strategies. Moreover, we compare the proposed method with the k-means and ant colony algorithms. The results show that the obtained strategies achieve good evaluation results under problem constraints. Furthermore, we conduct comparison tests with different attributes to further test the performance of the proposed method.}, } @article {pmid36141163, year = {2022}, author = {Tang, X and Xu, L and Chen, G}, title = {Research on the Rapid Diagnostic Method of Rolling Bearing Fault Based on Cloud-Edge Collaboration.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {9}, pages = {}, pmid = {36141163}, issn = {1099-4300}, support = {QKHJC-ZK〔2021〕YB271//The Science and Technology Foundation of Guizhou Province/ ; QKHZC〔2022〕YB074//Guizhou Science and Technology Support Project/ ; }, abstract = {Recent deep-learning methods for fault diagnosis of rolling bearings need a significant amount of computing time and resources. Most of them cannot meet the requirements of real-time fault diagnosis of rolling bearings under the cloud computing framework. This paper proposes a quick cloud-edge collaborative bearing fault diagnostic method based on the tradeoff between the advantages and disadvantages of cloud and edge computing. First, a collaborative cloud-based framework and an improved DSCNN-GAP algorithm are suggested to build a general model using the public bearing fault dataset. Second, the general model is distributed to each edge node, and a limited number of unique fault samples acquired by each edge node are used to quickly adjust the parameters of the model before running diagnostic tests. Finally, a fusion result is made from the diagnostic results of each edge node by DS evidence theory. Experiment results show that the proposed method not only improves diagnostic accuracy by DSCNN-GAP and fusion of multi-sensors, but also decreases diagnosis time by migration learning with the cloud-edge collaborative framework. Additionally, the method can effectively enhance data security and privacy protection.}, } @article {pmid36124594, year = {2022}, author = {Lin, HY and Tsai, TT and Wu, HR and Ku, MS}, title = {Secure access control using updateable attribute keys.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {11}, pages = {11367-11379}, doi = {10.3934/mbe.2022529}, pmid = {36124594}, issn = {1551-0018}, mesh = {*Algorithms ; Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; }, abstract = {In the era of cloud computing, the technique of access control is vital to protect the confidentiality and integrity of cloud data. From the perspective of servers, they should only allow authenticated clients to gain the access of data. Specifically, the server will share a communication channel with the client by generating a common session key. It is thus regarded as a symmetric key for encrypting data in the current channel. An access control mechanism using attribute-based encryptions is most flexible, since the decryption privilege can be granted to the ones who have sufficient attributes. In the paper, the authors propose a secure access control consisting of the attributed-based mutual authentication and the attribute-based encryption. The most appealing property of our system is that the attribute keys associated with each user is periodically updatable. Moreover, we will also show that our system fulfills the security of fuzzy selective-ID assuming the hardness of Decisional Modified Bilinear Diffie-Hellman (DMBDH) problem.}, } @article {pmid36124579, year = {2022}, author = {Liu, D and Li, Z and Wang, C and Ren, Y}, title = {Enabling secure mutual authentication and storage checking in cloud-assisted IoT.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {11}, pages = {11034-11046}, doi = {10.3934/mbe.2022514}, pmid = {36124579}, issn = {1551-0018}, abstract = {Internet of things (IoT) is a technology that can collect the data sensed by the devices for the further real-time services. Using the technique of cloud computing to assist IoT devices in data storing can eliminate the disadvantage of the constrained local storage and computing capability. However, the complex network environment makes cloud servers vulnerable to attacks, and adversaries pretend to be legal IoT clients trying to access the cloud server. Hence, it is necessary to provide a mechanism of mutual authentication for the cloud system to enhance the storage security. In this paper, a secure mutual authentication is proposed for cloud-assisted IoT. Note that the technique of chameleon hash signature is used to construct the authentication. Moreover, the proposed scheme can provide storage checking with the assist of a fully-trusted entity, which highly improves the checking fairness and efficiency. Security analysis proves that the proposed scheme in this paper is correct. Performance analysis demonstrates that the proposed scheme can be performed with high efficiency.}, } @article {pmid36124116, year = {2022}, author = {Wu, Y and Zheng, C and Xie, L and Hao, M}, title = {Cloud-Based English Multimedia for Universities Test Questions Modeling and Applications.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4563491}, pmid = {36124116}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computers ; Humans ; *Multimedia ; Software ; Universities ; }, abstract = {This study constructs a cloud computing-based college English multimedia test question modeling and application through an in-depth study of cloud computing and college English multimedia test questions. The emergence of cloud computing technology undoubtedly provides a new and ideal method to solve test data and paper management problems. This study analyzes the advantages of the Hadoop computing platform and MapReduce computing model and builds a distributed computing platform based on Hadoop using universities' existing hardware and software resources. The study analyzes the advantages of the Hadoop computing platform and the MapReduce computing model. The UML model of the system is given, the system is implemented, the system is tested functionally, and the results of the analysis are given. Multimedia is the critical link to realizing the optimization of English test questions. The proper use of multimedia test questions will undoubtedly become an inevitable trend in the development of English test questions in the future, which requires every worker on the education front to continuously analyze and study the problems arising from multimedia teaching, summarize the experience of multimedia teaching, and explore new methods of multimedia teaching, so that multimedia teaching can better promote the optimization of English test questions in colleges and universities and better serve the education teaching.}, } @article {pmid36118826, year = {2022}, author = {Zhang, F and Zhang, Z and Xiao, H}, title = {Research on Medical Big Data Analysis and Disease Prediction Method Based on Artificial Intelligence.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {4224287}, pmid = {36118826}, issn = {1748-6718}, mesh = {*Artificial Intelligence ; *Big Data ; Cloud Computing ; Data Analysis ; Humans ; }, abstract = {In recent years, the continuous development of big data, cloud services, Internet+, artificial intelligence, and other technologies has accelerated the improvement of data communication services in the traditional pharmaceutical industry. It plays a leading role in the development of my country's pharmaceutical industry, deepening the reform of the health system, improving the efficiency and quality of medical services, and developing new technologies. In this context, we make the following research and draw the following conclusions: (1) the scale of my country's medical big data market is constantly increasing, and the global medical big data market is also increasing. Compared with the global medical big data market, China's medical big data has grown at a faster rate. From the initial 10.33% in 2015, the proportion has reached 38.7% after 7 years, and the proportion has increased by 28.37%. (2) Generally speaking, urine is mainly slightly acidic, that is, the pH is around 6.0, the normal range is 5.0 to 7.0, and there are also neutral or slightly alkaline. 8 and 7.5 are generally people with some physical problems. In recent years, the pharmaceutical industry has continuously developed technologies such as big data, cloud computing, Internet+, and artificial intelligence by improving data transmission services. As an important strategic resource of the country, the generation of great medical skills and great information is of great significance to the development of my country's pharmaceutical industry and the deepening of the reform of the national medical system. Improve the efficiency and level of medical services, and establish forms and services. Accelerate economic growth. In this sense, we set out to explore.}, } @article {pmid36108415, year = {2022}, author = {Shoeibi, A and Moridian, P and Khodatars, M and Ghassemi, N and Jafari, M and Alizadehsani, R and Kong, Y and Gorriz, JM and Ramírez, J and Khosravi, A and Nahavandi, S and Acharya, UR}, title = {An overview of deep learning techniques for epileptic seizures detection and prediction based on neuroimaging modalities: Methods, challenges, and future works.}, journal = {Computers in biology and medicine}, volume = {149}, number = {}, pages = {106053}, doi = {10.1016/j.compbiomed.2022.106053}, pmid = {36108415}, issn = {1879-0534}, mesh = {Algorithms ; *Deep Learning ; Electroencephalography/methods ; *Epilepsy/diagnostic imaging ; Humans ; Neuroimaging ; Seizures/diagnostic imaging ; }, abstract = {Epilepsy is a disorder of the brain denoted by frequent seizures. The symptoms of seizure include confusion, abnormal staring, and rapid, sudden, and uncontrollable hand movements. Epileptic seizure detection methods involve neurological exams, blood tests, neuropsychological tests, and neuroimaging modalities. Among these, neuroimaging modalities have received considerable attention from specialist physicians. One method to facilitate the accurate and fast diagnosis of epileptic seizures is to employ computer-aided diagnosis systems (CADS) based on deep learning (DL) and neuroimaging modalities. This paper has studied a comprehensive overview of DL methods employed for epileptic seizures detection and prediction using neuroimaging modalities. First, DL-based CADS for epileptic seizures detection and prediction using neuroimaging modalities are discussed. Also, descriptions of various datasets, preprocessing algorithms, and DL models which have been used for epileptic seizures detection and prediction have been included. Then, research on rehabilitation tools has been presented, which contains brain-computer interface (BCI), cloud computing, internet of things (IoT), hardware implementation of DL techniques on field-programmable gate array (FPGA), etc. In the discussion section, a comparison has been carried out between research on epileptic seizure detection and prediction. The challenges in epileptic seizures detection and prediction using neuroimaging modalities and DL models have been described. In addition, possible directions for future works in this field, specifically for solving challenges in datasets, DL, rehabilitation, and hardware models, have been proposed. The final section is dedicated to the conclusion which summarizes the significant findings of the paper.}, } @article {pmid36107981, year = {2022}, author = {Kim, YK and Kim, HJ and Lee, H and Chang, JW}, title = {Correction: Privacy-preserving parallel kNN classification algorithm using index-based filtering in cloud computing.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0274981}, pmid = {36107981}, issn = {1932-6203}, abstract = {[This corrects the article DOI: 10.1371/journal.pone.0267908.].}, } @article {pmid36107827, year = {2022}, author = {Zhuang, Y and Jiang, N}, title = {Progressive privacy-preserving batch retrieval of lung CT image sequences based on edge-cloud collaborative computation.}, journal = {PloS one}, volume = {17}, number = {9}, pages = {e0274507}, pmid = {36107827}, issn = {1932-6203}, mesh = {*Computer Security ; Lung/diagnostic imaging ; *Privacy ; Tomography, X-Ray Computed ; }, abstract = {BACKGROUND: A computer tomography image (CI) sequence can be regarded as a time-series data that is composed of a great deal of nearby and similar CIs. Since the computational and I/O costs of similarity measure, encryption, and decryption calculation during a similarity retrieval of the large CI sequences (CIS) are extremely high, deploying all retrieval tasks in the cloud, however, will lead to excessive computing load on the cloud, which will greatly and negatively affect the retrieval performance.

METHODOLOGIES: To tackle the above challenges, the paper proposes a progressive privacy-preserving Batch Retrieval scheme for the lung CISs based on edge-cloud collaborative computation called the BRS method. There are four supporting techniques to enable the BRS method, such as: 1) batch similarity measure for CISs, 2) CIB-based privacy preserving scheme, 3) uniform edge-cloud index framework, and 4) edge buffering.

RESULTS: The experimental results reveal that our method outperforms the state-of-the-art approaches in terms of efficiency and scalability, drastically reducing response time by lowering network communication costs while enhancing retrieval safety and accuracy.}, } @article {pmid36105640, year = {2022}, author = {Veeraiah, D and Mohanty, R and Kundu, S and Dhabliya, D and Tiwari, M and Jamal, SS and Halifa, A}, title = {Detection of Malicious Cloud Bandwidth Consumption in Cloud Computing Using Machine Learning Techniques.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4003403}, pmid = {36105640}, issn = {1687-5273}, mesh = {*Cloud Computing ; Fuzzy Logic ; Humans ; *Machine Learning ; }, abstract = {The Internet of Things, sometimes known as IoT, is a relatively new kind of Internet connectivity that connects physical objects to the Internet in a way that was not possible in the past. The Internet of Things is another name for this concept (IoT). The Internet of Things has a larger attack surface as a result of its hyperconnectivity and heterogeneity, both of which are characteristics of the IoT. In addition, since the Internet of Things devices are deployed in managed and uncontrolled contexts, it is conceivable for malicious actors to build new attacks that target these devices. As a result, the Internet of Things (IoT) requires self-protection security systems that are able to autonomously interpret attacks in IoT traffic and efficiently handle the attack scenario by triggering appropriate reactions at a pace that is faster than what is currently available. In order to fulfill this requirement, fog computing must be utilised. This type of computing has the capability of integrating an intelligent self-protection mechanism into the distributed fog nodes. This allows the IoT application to be protected with the least amount of human intervention while also allowing for faster management of attack scenarios. Implementing a self-protection mechanism at malicious fog nodes is the primary objective of this research work. This mechanism should be able to detect and predict known attacks based on predefined attack patterns, as well as predict novel attacks based on no predefined attack patterns, and then choose the most appropriate response to neutralise the identified attack. In the environment of the IoT, a distributed Gaussian process regression is used at fog nodes to anticipate attack patterns that have not been established in the past. This allows for the prediction of new cyberattacks in the environment. It predicts attacks in an uncertain IoT setting at a speedier rate and with greater precision than prior techniques. It is able to effectively anticipate both low-rate and high-rate assaults in a more timely manner within the dispersed fog nodes, which enables it to mount a more accurate defence. In conclusion, a fog computing-based self-protection system is developed to choose the most appropriate reaction using fuzzy logic for detected or anticipated assaults using the suggested detection and prediction mechanisms. This is accomplished by utilising a self-protection system that is based on the development of a self-protection system that utilises the suggested detection and prediction mechanisms. The findings of the experimental investigation indicate that the proposed system identifies threats, lowers bandwidth usage, and thwarts assaults at a rate that is twenty-five percent faster than the cloud-based system implementation.}, } @article {pmid36103218, year = {2022}, author = {Huang, H and Aschettino, S and Lari, N and Lee, TH and Rosenberg, SS and Ng, X and Muthuri, S and Bakshi, A and Bishop, K and Ezzeldin, H}, title = {A Versatile and Scalable Platform That Streamlines Data Collection for Patient-Centered Studies: Usability and Feasibility Study.}, journal = {JMIR formative research}, volume = {6}, number = {9}, pages = {e38579}, pmid = {36103218}, issn = {2561-326X}, abstract = {BACKGROUND: The Food and Drug Administration Center for Biologics Evaluation and Research (CBER) established the Biologics Effectiveness and Safety (BEST) Initiative with several objectives, including the expansion and enhancement of CBER's access to fit-for-purpose data sources, analytics, tools, and infrastructures to improve the understanding of patient experiences with conditions related to CBER-regulated products. Owing to existing challenges in data collection, especially for rare disease research, CBER recognized the need for a comprehensive platform where study coordinators can engage with study participants and design and deploy studies while patients or caregivers could enroll, consent, and securely participate as well.

OBJECTIVE: This study aimed to increase awareness and describe the design, development, and novelty of the Survey of Health and Patient Experience (SHAPE) platform, its functionality and application, quality improvement efforts, open-source availability, and plans for enhancement.

METHODS: SHAPE is hosted in a Google Cloud environment and comprises 3 parts: the administrator application, participant app, and application programming interface. The administrator can build a study comprising a set of questionnaires and self-report entries through the app. Once the study is deployed, the participant can access the app, consent to the study, and complete its components. To build SHAPE to be scalable and flexible, we leveraged the open-source software development kit, Ionic Framework. This enabled the building and deploying of apps across platforms, including iOS, Android, and progressive web applications, from a single codebase by using standardized web technologies. SHAPE has been integrated with a leading Health Level 7 (HL7®) Fast Healthcare Interoperability Resources (FHIR®) application programming interface platform, 1upHealth, which allows participants to consent to 1-time data pull of their electronic health records. We used an agile-based process that engaged multiple stakeholders in SHAPE's design and development.

RESULTS: SHAPE allows study coordinators to plan, develop, and deploy questionnaires to obtain important end points directly from patients or caregivers. Electronic health record integration enables access to patient health records, which can validate and enhance the accuracy of data-capture methods. The administrator can then download the study data into HL7® FHIR®-formatted JSON files. In this paper, we illustrate how study coordinators can use SHAPE to design patient-centered studies. We demonstrate its broad applicability through a hypothetical type 1 diabetes cohort study and an ongoing pilot study on metachromatic leukodystrophy to implement best practices for designing a regulatory-grade natural history study for rare diseases.

CONCLUSIONS: SHAPE is an intuitive and comprehensive data-collection tool for a variety of clinical studies. Further customization of this versatile and scalable platform allows for multiple use cases. SHAPE can capture patient perspectives and clinical data, thereby providing regulators, clinicians, researchers, and patient advocacy organizations with data to inform drug development and improve patient outcomes.}, } @article {pmid36100587, year = {2022}, author = {Wang, C and Kon, WY and Ng, HJ and Lim, CC}, title = {Experimental symmetric private information retrieval with measurement-device-independent quantum network.}, journal = {Light, science & applications}, volume = {11}, number = {1}, pages = {268}, pmid = {36100587}, issn = {2047-7538}, abstract = {Secure information retrieval is an essential task in today's highly digitised society. In some applications, it may be necessary that user query's privacy and database content's security are enforced. For these settings, symmetric private information retrieval (SPIR) could be employed, but its implementation is known to be demanding, requiring a private key-exchange network as the base layer. Here, we report for the first time a realisation of provably-secure SPIR supported by a quantum-secure key-exchange network. The SPIR scheme looks at biometric security, offering secure retrieval of 582-byte fingerprint files from a database with 800 entries. Our experimental results clearly demonstrate the feasibility of SPIR with quantum secure communications, thereby opening up new possibilities in secure distributed data storage and cloud computing over the future Quantum Internet.}, } @article {pmid36093501, year = {2022}, author = {Ahamed Ahanger, T and Aldaej, A and Atiquzzaman, M and Ullah, I and Yousufudin, M}, title = {Distributed Blockchain-Based Platform for Unmanned Aerial Vehicles.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4723124}, pmid = {36093501}, issn = {1687-5273}, mesh = {*Blockchain ; Computer Communication Networks ; Computer Security ; Delivery of Health Care ; Unmanned Aerial Devices ; }, abstract = {Internet of Things (IoT)-inspired drone environment is having a greater influence on daily lives in the form of drone-based smart electricity monitoring, traffic routing, and personal healthcare. However, communication between drones and ground control systems must be protected to avoid potential vulnerabilities and improve coordination among scattered UAVs in the IoT context. In the current paper, a distributed UAV scheme is proposed that uses blockchain technology and a network topology similar to the IoT and cloud server to secure communications during data collection and transmission and reduce the likelihood of attack by maliciously manipulated UAVs. As an alternative to relying on a traditional blockchain approach, a unique, safe, and lightweight blockchain architecture is proposed that reduces computing and storage requirements while keeping privacy and security advantages. In addition, a unique reputation-based consensus protocol is built to assure the dependability of the decentralized network. Numerous types of transactions are established to characterize diverse data access. To validate the presented blockchain-based distributed system, performance evaluations are conducted to estimate the statistical effectiveness in the form of temporal delay, packet flow efficacy, precision, specificity, sensitivity, and security efficiency.}, } @article {pmid36093500, year = {2022}, author = {Zhu, G and Li, X and Zheng, C and Wang, L}, title = {Multimedia Fusion Privacy Protection Algorithm Based on IoT Data Security under Network Regulations.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3574812}, pmid = {36093500}, issn = {1687-5273}, mesh = {Algorithms ; Computer Security ; Data Collection ; *Multimedia ; *Privacy ; }, abstract = {This study provides an in-depth analysis and research on multimedia fusion privacy protection algorithms based on IoT data security in a network regulation environment. Aiming at the problem of collusion and conspiracy to deceive users in the process of outsourced computing and outsourced verification, a safe, reliable, and collusion-resistant scheme based on blockchain is studied for IoT outsourced data computing and public verification, with the help of distributed storage methods, where smart devices encrypt the collected data and upload them to the DHT for storage along with the results of this data given by the cloud server. After testing, the constructed model has a privacy-preserving budget value of 0.6 and the smallest information leakage ratio of multimedia fusion data based on IoT data security when the decision tree depth is 6. After using this model under this condition, the maximum value of the information leakage ratio of multimedia fusion data based on IoT data security is reduced from 0.0865 to 0.003, and the data security is significantly improved. In the consensus verification process, to reduce the consensus time and ensure the operating efficiency of the system, a consensus node selection algorithm is proposed, thereby reducing the time complexity of the consensus. Based on the smart grid application scenario, the security and performance of the proposed model are analyzed. This study proves the correctness of this scheme by using BAN logic and proves the security of this scheme under the stochastic prediction machine model. Finally, this study compares the security aspects and performance aspects of the scheme with some existing similar schemes and shows that the scheme is feasible under IoT.}, } @article {pmid36093488, year = {2022}, author = {Alyami, J and Sadad, T and Rehman, A and Almutairi, F and Saba, T and Bahaj, SA and Alkhurim, A}, title = {Cloud Computing-Based Framework for Breast Tumor Image Classification Using Fusion of AlexNet and GLCM Texture Features with Ensemble Multi-Kernel Support Vector Machine (MK-SVM).}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7403302}, pmid = {36093488}, issn = {1687-5273}, mesh = {Aged ; *Breast Neoplasms/diagnostic imaging ; Cloud Computing ; Diagnosis, Computer-Assisted/methods ; Female ; Humans ; Image Processing, Computer-Assisted/methods ; *Support Vector Machine ; }, abstract = {Breast cancer is common among women all over the world. Early identification of breast cancer lowers death rates. However, it is difficult to determine whether these are cancerous or noncancerous lesions due to their inconsistencies in image appearance. Machine learning techniques are widely employed in imaging analysis as a diagnostic method for breast cancer classification. However, patients cannot take advantage of remote areas as these systems are unavailable on clouds. Thus, breast cancer detection for remote patients is indispensable, which can only be possible through cloud computing. The user is allowed to feed images into the cloud system, which is further investigated through the computer aided diagnosis (CAD) system. Such systems could also be used to track patients, older adults, especially with disabilities, particularly in remote areas of developing countries that do not have medical facilities and paramedic staff. In the proposed CAD system, a fusion of AlexNet architecture and GLCM (gray-level cooccurrence matrix) features are used to extract distinguishable texture features from breast tissues. Finally, to attain higher precision, an ensemble of MK-SVM is used. For testing purposes, the proposed model is applied to the MIAS dataset, a commonly used breast image database, and achieved 96.26% accuracy.}, } @article {pmid36093280, year = {2022}, author = {Xie, Y and Zhang, K and Kou, H and Mokarram, MJ}, title = {Private anomaly detection of student health conditions based on wearable sensors in mobile cloud computing.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {38}, pmid = {36093280}, issn = {2192-113X}, abstract = {With the continuous spread of COVID-19 virus, how to guarantee the healthy living of people especially the students who are of relative weak physique is becoming a key research issue of significant values. Specifically, precise recognition of the anomaly in student health conditions is beneficial to the quick discovery of potential patients. However, there are so many students in each school that the education managers cannot know about the health conditions of students in a real-time manner and accurately recognize the possible anomaly among students quickly. Fortunately, the quick development of mobile cloud computing technologies and wearable sensors has provided a promising way to monitor the real-time health conditions of students and find out the anomalies timely. However, two challenges are present in the above anomaly detection issue. First, the health data monitored by massive wearable sensors are often massive and updated frequently, which probably leads to high sensor-cloud transmission cost for anomaly detection. Second, the health data of students are often sensitive enough, which probably impedes the integration of health data in cloud environment even renders the health data-based anomaly detection infeasible. In view of these challenges, we propose a time-efficient and privacy-aware anomaly detection solution for students with wearable sensors in mobile cloud computing environment. At last, we validate the effectiveness and efficiency of our work via a set of simulated experiments.}, } @article {pmid36092002, year = {2022}, author = {Vadde, U and Kompalli, VS}, title = {Energy efficient service placement in fog computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e1035}, pmid = {36092002}, issn = {2376-5992}, abstract = {The Internet of Things (IoT) concept evolved into a slew of applications. To satisfy the requests of these applications, using cloud computing is troublesome because of the high latency caused by the distance between IoT devices and cloud resources. Fog computing has become promising with its geographically distributed infrastructure for providing resources using fog nodes near IoT devices, thereby reducing the bandwidth and latency. A geographical distribution, heterogeneity and resource constraints of fog nodes introduce the key challenge of placing application modules/services in such a large scale infrastructure. In this work, we propose an improved version of the JAYA approach for optimal placement of modules that minimizes the energy consumption of a fog landscape. We analyzed the performance in terms of energy consumption, network usage, delays and execution time. Using iFogSim, we ran simulations and observed that our approach reduces on average 31% of the energy consumption compared to modern methods.}, } @article {pmid36091662, year = {2023}, author = {Singh, A and Chatterjee, K}, title = {Edge computing based secure health monitoring framework for electronic healthcare system.}, journal = {Cluster computing}, volume = {26}, number = {2}, pages = {1205-1220}, pmid = {36091662}, issn = {1386-7857}, abstract = {Nowadays, Smart Healthcare Systems (SHS) are frequently used by people for personal healthcare observations using various smart devices. The SHS uses IoT technology and cloud infrastructure for data capturing, transmitting it through smart devices, data storage, processing, and healthcare advice. Processing such a huge amount of data from numerous IoT devices in a short time is quite challenging. Thus, technological frameworks such as edge computing or fog computing can be used as a middle layer between cloud and user in SHS. It reduces the response time for data processing at the lower level (edge level). But, Edge of Things (EoT) also suffers from security and privacy issues. A robust healthcare monitoring framework with secure data storage and access is needed. It will provide a quick response in case of the production of abnormal data and store/access the sensitive data securely. This paper proposed a Secure Framework based on the Edge of Things (SEoT) for Smart healthcare systems. This framework is mainly designed for real-time health monitoring, maintaining the security and confidentiality of the healthcare data in a controlled manner. This paper included clustering approaches for analyzing bio-signal data for abnormality detection and Attribute-Based Encryption (ABE) for bio-signal data security and secure access. The experimental results of the proposed framework show improved performance with maintaining the accuracy of up to 98.5% and data security.}, } @article {pmid36091551, year = {2022}, author = {Guo, C and Li, H}, title = {Application of 5G network combined with AI robots in personalized nursing in China: A literature review.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {948303}, pmid = {36091551}, issn = {2296-2565}, mesh = {Artificial Intelligence ; China ; Delivery of Health Care ; Humans ; *Robotics ; *Telemedicine ; }, abstract = {The medical and healthcare industry is currently developing into digitization. Attributed to the rapid development of advanced technologies such as the 5G network, cloud computing, artificial intelligence (AI), and big data, and their wide applications in the medical industry, the medical model is shifting into an intelligent one. By combining the 5G network with cloud healthcare platforms and AI, nursing robots can effectively improve the overall medical efficacy. Meanwhile, patients can enjoy personalized medical services, the supply and the sharing of medical and healthcare services are promoted, and the digital transformation of the healthcare industry is accelerated. In this paper, the application and practice of 5G network technology in the medical industry are introduced, including telecare, 5G first-aid remote medical service, and remote robot applications. Also, by combining application characteristics of AI and development requirements of smart healthcare, the overall planning, intelligence, and personalization of the 5G network in the medical industry, as well as opportunities and challenges of its application in the field of nursing are discussed. This paper provides references to the development and application of 5G network technology in the field of medical service.}, } @article {pmid36086197, year = {2022}, author = {Amin, AB and Wang, S and David, U and Noh, Y}, title = {Applicability of Cloud Native-based Healthcare Monitoring Platform (CN-HMP) in Older Adult Facilities.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2022}, number = {}, pages = {2684-2688}, doi = {10.1109/EMBC48229.2022.9871998}, pmid = {36086197}, issn = {2694-0604}, mesh = {Aged ; *Cloud Computing ; Computer Communication Networks ; *Delivery of Health Care ; Electrocardiography ; Health Facilities ; Humans ; }, abstract = {Over the past few decades, the world has faced the huge demographic change in the aging population, which makes significant challenges in healthcare systems. The increasing older adult population along with the current health workforce shortage creates a struggling situation for current facilities and personnel to meet the demand. To tackle this situation, cloud computing is a fast-growing area in digital healthcare and it allows to settle up a modern distributed system environment, capable of scaling to tens of thousands of self healing multitenant nodes for healthcare applications. In addition, cloud native architecture is recently getting focused as an ideal structure for multi-node based healthcare monitoring system due to its high scalability, low latency, and rapid and stable maintainability. In this study, we proposed a cloud native-based rapid, robust, and productive digital healthcare platform which allows to manage and care for a large number of patient groups. To validate our platform, we simulated our Cloud Nativebased Healthcare Monitoring Platform (CN-HMP) with real-time setup and evaluated the performance in terms of request response time, data packets delivery, and end-to-end latency. We found it showing less than 0.1 ms response time in at least 92.5% of total requests up to 3K requests, and no data packet loss along with more than 28% of total data packets with no latency and only ≈ 0.6% of those with maximum latency (3 ms) in 24-hour observation. Clinical Relevance- This study and relevant experiment demonstrate the suitability of the CN-HMP to support providers and nurses for elderly patients healthcare with regular monitoring in older adult facilities.}, } @article {pmid36082003, year = {2021}, author = {Aghababaei, M and Ebrahimi, A and Naghipour, AA and Asadi, E and Verrelst, J}, title = {Vegetation Types Mapping Using Multi-Temporal Landsat Images in the Google Earth Engine Platform.}, journal = {Remote sensing}, volume = {13}, number = {22}, pages = {4683}, pmid = {36082003}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Vegetation Types (VTs) are important managerial units, and their identification serves as essential tools for the conservation of land covers. Despite a long history of Earth observation applications to assess and monitor land covers, the quantitative detection of sparse VTs remains problematic, especially in arid and semiarid areas. This research aimed to identify appropriate multi-temporal datasets to improve the accuracy of VTs classification in a heterogeneous landscape in Central Zagros, Iran. To do so, first the Normalized Difference Vegetation Index (NDVI) temporal profile of each VT was identified in the study area for the period of 2018, 2019, and 2020. This data revealed strong seasonal phenological patterns and key periods of VTs separation. It led us to select the optimal time series images to be used in the VTs classification. We then compared single-date and multi-temporal datasets of Landsat 8 images within the Google Earth Engine (GEE) platform as the input to the Random Forest classifier for VTs detection. The single-date classification gave a median Overall Kappa (OK) and Overall Accuracy (OA) of 51% and 64%, respectively. Instead, using multi-temporal images led to an overall kappa accuracy of 74% and an overall accuracy of 81%. Thus, the exploitation of multi-temporal datasets favored accurate VTs classification. In addition, the presented results underline that available open access cloud-computing platforms such as the GEE facilitates identifying optimal periods and multitemporal imagery for VTs classification.}, } @article {pmid36081832, year = {2022}, author = {Estévez, J and Salinero-Delgado, M and Berger, K and Pipia, L and Rivera-Caicedo, JP and Wocher, M and Reyes-Muñoz, P and Tagliabue, G and Boschetti, M and Verrelst, J}, title = {Gaussian processes retrieval of crop traits in Google Earth Engine based on Sentinel-2 top-of-atmosphere data.}, journal = {Remote sensing of environment}, volume = {273}, number = {}, pages = {112958}, pmid = {36081832}, issn = {0034-4257}, support = {755617/ERC_/European Research Council/International ; }, abstract = {The unprecedented availability of optical satellite data in cloud-based computing platforms, such as Google Earth Engine (GEE), opens new possibilities to develop crop trait retrieval models from the local to the planetary scale. Hybrid retrieval models are of interest to run in these platforms as they combine the advantages of physically- based radiative transfer models (RTM) with the flexibility of machine learning regression algorithms. Previous research with GEE primarily relied on processing bottom-of-atmosphere (BOA) reflectance data, which requires atmospheric correction. In the present study, we implemented hybrid models directly into GEE for processing Sentinel-2 (S2) Level-1C (L1C) top-of-atmosphere (TOA) reflectance data into crop traits. To achieve this, a training dataset was generated using the leaf-canopy RTM PROSAIL in combination with the atmospheric model 6SV. Gaussian process regression (GPR) retrieval models were then established for eight essential crop traits namely leaf chlorophyll content, leaf water content, leaf dry matter content, fractional vegetation cover, leaf area index (LAI), and upscaled leaf variables (i.e., canopy chlorophyll content, canopy water content and canopy dry matter content). An important pre-requisite for implementation into GEE is that the models are sufficiently light in order to facilitate efficient and fast processing. Successful reduction of the training dataset by 78% was achieved using the active learning technique Euclidean distance-based diversity (EBD). With the EBD-GPR models, highly accurate validation results of LAI and upscaled leaf variables were obtained against in situ field data from the validation study site Munich-North-Isar (MNI), with normalized root mean square errors (NRMSE) from 6% to 13%. Using an independent validation dataset of similar crop types (Italian Grosseto test site), the retrieval models showed moderate to good performances for canopy-level variables, with NRMSE ranging from 14% to 50%, but failed for the leaf-level estimates. Obtained maps over the MNI site were further compared against Sentinel-2 Level 2 Prototype Processor (SL2P) vegetation estimates generated from the ESA Sentinels' Application Platform (SNAP) Biophysical Processor, proving high consistency of both retrievals (R [2] from 0.80 to 0.94). Finally, thanks to the seamless GEE processing capability, the TOA-based mapping was applied over the entirety of Germany at 20 m spatial resolution including information about prediction uncertainty. The obtained maps provided confidence of the developed EBD-GPR retrieval models for integration in the GEE framework and national scale mapping from S2-L1C imagery. In summary, the proposed retrieval workflow demonstrates the possibility of routine processing of S2 TOA data into crop traits maps at any place on Earth as required for operational agricultural applications.}, } @article {pmid36081813, year = {2021}, author = {Salinero-Delgado, M and Estévez, J and Pipia, L and Belda, S and Berger, K and Gómez, VP and Verrelst, J}, title = {Monitoring Cropland Phenology on Google Earth Engine Using Gaussian Process Regression.}, journal = {Remote sensing}, volume = {14}, number = {1}, pages = {146}, pmid = {36081813}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Monitoring cropland phenology from optical satellite data remains a challenging task due to the influence of clouds and atmospheric artifacts. Therefore, measures need to be taken to overcome these challenges and gain better knowledge of crop dynamics. The arrival of cloud computing platforms such as Google Earth Engine (GEE) has enabled us to propose a Sentinel-2 (S2) phenology end-to-end processing chain. To achieve this, the following pipeline was implemented: (1) the building of hybrid Gaussian Process Regression (GPR) retrieval models of crop traits optimized with active learning, (2) implementation of these models on GEE (3) generation of spatiotemporally continuous maps and time series of these crop traits with the use of gap-filling through GPR fitting, and finally, (4) calculation of land surface phenology (LSP) metrics such as the start of season (SOS) or end of season (EOS). Overall, from good to high performance was achieved, in particular for the estimation of canopy-level traits such as leaf area index (LAI) and canopy chlorophyll content, with normalized root mean square errors (NRMSE) of 9% and 10%, respectively. By means of the GPR gap-filling time series of S2, entire tiles were reconstructed, and resulting maps were demonstrated over an agricultural area in Castile and Leon, Spain, where crop calendar data were available to assess the validity of LSP metrics derived from crop traits. In addition, phenology derived from the normalized difference vegetation index (NDVI) was used as reference. NDVI not only proved to be a robust indicator for the calculation of LSP metrics, but also served to demonstrate the good phenology quality of the quantitative trait products. Thanks to the GEE framework, the proposed workflow can be realized anywhere in the world and for any time window, thus representing a shift in the satellite data processing paradigm. We anticipate that the produced LSP metrics can provide meaningful insights into crop seasonal patterns in a changing environment that demands adaptive agricultural production.}, } @article {pmid36081177, year = {2022}, author = {Kum, S and Oh, S and Yeom, J and Moon, J}, title = {Optimization of Edge Resources for Deep Learning Application with Batch and Model Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081177}, issn = {1424-8220}, support = {2021-0-01578//Institute for Information and Communications Technology Promotion/ ; }, mesh = {*Deep Learning ; Workload ; }, abstract = {As deep learning technology paves its way, real-world applications that make use of it become popular these days. Edge computing architecture is one of the service architectures to realize the deep learning based service, which makes use of the resources near the data source or client. In Edge computing architecture it becomes important to manage resource usage, and there is research on optimization of deep learning, such as pruning or binarization, which makes deep learning models more lightweight, along with the research for the efficient distribution of workloads on cloud or edge resources. Those are to reduce the workload on edge resources. In this paper, a usage optimization method with batch and model management is proposed. The proposed method is to increase the utilization of GPU resource by modifying the batch size of the input of an inference application. To this end, the inference pipelines are identified to see how the different kinds of resources are used, and then the effect of batch inference on GPU is measured. The proposed method consists of a few modules, including a tool for batch size management which is able to change a batch size with respect to the available resources, and another one for model management which supports on-the-fly update of a model. The proposed methods are implemented on a real-time video analysis application and deployed in the Kubernetes cluster as a Docker container. The result shows that the proposed method can optimize the usage of edge resources for real-time video analysis deep learning applications.}, } @article {pmid36081143, year = {2022}, author = {Strigaro, D and Cannata, M and Lepori, F and Capelli, C and Lami, A and Manca, D and Seno, S}, title = {Open and Cost-Effective Digital Ecosystem for Lake Water Quality Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081143}, issn = {1424-8220}, support = {523544//European Commission/ ; 523544//Repubblica e Cantone Ticino/ ; }, mesh = {Cost-Benefit Analysis ; *Ecosystem ; *Lakes ; Software ; Water Quality ; }, abstract = {In some sectors of the water resources management, the digital revolution process is slowed by some blocking factors such as costs, lack of digital expertise, resistance to change, etc. In addition, in the era of Big Data, many are the sources of information available in this field, but they are often not fully integrated. The adoption of different proprietary solutions to sense, collect and manage data is one of the main problems that hampers the availability of a fully integrated system. In this context, the aim of the project is to verify if a fully open, cost-effective and replicable digital ecosystem for lake monitoring can fill this gap and help the digitalization process using cloud based technology and an Automatic High-Frequency Monitoring System (AHFM) built using open hardware and software components. Once developed, the system is tested and validated in a real case scenario by integrating the historical databases and by checking the performance of the AHFM system. The solution applied the edge computing paradigm in order to move some computational work from server to the edge and fully exploiting the potential offered by low power consuming devices.}, } @article {pmid36081126, year = {2022}, author = {Azamuddin, WMH and Aman, AHM and Hassan, R and Mansor, N}, title = {Comparison of Named Data Networking Mobility Methodology in a Merged Cloud Internet of Things and Artificial Intelligence Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36081126}, issn = {1424-8220}, support = {FRGS/1/2019/ICT03/UKM/02/1//National University of Malaysia/ ; }, mesh = {Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Technology ; }, abstract = {In-network caching has evolved into a new paradigm, paving the way for the creation of Named Data Networking (NDN). Rather than simply being typical Internet technology, NDN serves a range of functions, with a focus on consumer-driven network architecture. The NDN design has been proposed as a method for replacing Internet Protocol (IP) addresses with identified content. This study adds to current research on NDN, artificial intelligence (AI), cloud computing, and the Internet of Things (IoT). The core contribution of this paper is the merging of cloud IoT (C-IoT) and NDN-AI-IoT. To be precise, this study provides possible methodological and parameter explanations of the technologies via three methods: KITE, a producer mobility support scheme (PMSS), and hybrid network mobility (hybrid NeMO). KITE uses the indirection method to transmit content using simple NDN communication; the PMSS improves producer operation by reducing handover latency; and hybrid NeMO provides a binding information table to replace the base function of forwarding information. This study also describes mathematical equations for signaling cost and handover latency. Using the network simulator ndnSIM NS-3, this study highlights producer mobility operation. Mathematical equations for each methodology are developed based on the mobility scenario to measure handover latency and signaling cost. The results show that the efficiency of signaling cost for hybrid NeMO is approximately 4% better than that of KITE and the PMSS, while the handover latency for hybrid NeMO is 46% lower than that of KITE and approximately 60% lower than that of the PMSS.}, } @article {pmid36080827, year = {2022}, author = {McRae, MP and Rajsri, KS and Alcorn, TM and McDevitt, JT}, title = {Smart Diagnostics: Combining Artificial Intelligence and In Vitro Diagnostics.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {17}, pages = {}, pmid = {36080827}, issn = {1424-8220}, support = {R01DE024392/NH/NIH HHS/United States ; 5U54EB027690-04/NH/NIH HHS/United States ; 5 U01 DE017793-2/NH/NIH HHS/United States ; N/A//Renaissance Health Services Corporation/ ; R01DE031319-01/NH/NIH HHS/United States ; R01 DE031319/DE/NIDCR NIH HHS/United States ; N/A//Delta Dental of Michigan/ ; 4R44DE 025798-02/NH/NIH HHS/United States ; N/A//Cancer Prevention and Research Institute of Texas/ ; 3 U01 DE017793-02S1/NH/NIH HHS/United States ; 1RC2DE020785-01/NH/NIH HHS/United States ; }, mesh = {Artificial Intelligence ; *Biosensing Techniques ; *COVID-19/diagnosis ; COVID-19 Testing ; Humans ; Microfluidics ; Point-of-Care Systems ; }, abstract = {We are beginning a new era of Smart Diagnostics-integrated biosensors powered by recent innovations in embedded electronics, cloud computing, and artificial intelligence (AI). Universal and AI-based in vitro diagnostics (IVDs) have the potential to exponentially improve healthcare decision making in the coming years. This perspective covers current trends and challenges in translating Smart Diagnostics. We identify essential elements of Smart Diagnostics platforms through the lens of a clinically validated platform for digitizing biology and its ability to learn disease signatures. This platform for biochemical analyses uses a compact instrument to perform multiclass and multiplex measurements using fully integrated microfluidic cartridges compatible with the point of care. Image analysis digitizes biology by transforming fluorescence signals into inputs for learning disease/health signatures. The result is an intuitive Score reported to the patients and/or providers. This AI-linked universal diagnostic system has been validated through a series of large clinical studies and used to identify signatures for early disease detection and disease severity in several applications, including cardiovascular diseases, COVID-19, and oral cancer. The utility of this Smart Diagnostics platform may extend to multiple cell-based oncology tests via cross-reactive biomarkers spanning oral, colorectal, lung, bladder, esophageal, and cervical cancers, and is well-positioned to improve patient care, management, and outcomes through deployment of this resilient and scalable technology. Lastly, we provide a future perspective on the direction and trajectory of Smart Diagnostics and the transformative effects they will have on health care.}, } @article {pmid36079676, year = {2022}, author = {Shi, F and Zhou, B and Zhou, H and Zhang, H and Li, H and Li, R and Guo, Z and Gao, X}, title = {Spatial Autocorrelation Analysis of Land Use and Ecosystem Service Value in the Huangshui River Basin at the Grid Scale.}, journal = {Plants (Basel, Switzerland)}, volume = {11}, number = {17}, pages = {}, pmid = {36079676}, issn = {2223-7747}, support = {2019QZKK0105//the Second Qinghai-Tibet Plateau Scientific Expedition and Research Program/ ; U21A2021//the National Natural Science Foundation of China/ ; 2021-ZJ-913//the Natural Science Foundation of Qinghai Province of China/ ; }, abstract = {The Huangshui River Basin is one of the most densely populated areas on the Qinghai-Tibet Plateau and is characterized by a high level of human activity. The contradiction between ecological protection and socioeconomic development has become increasingly prominent; determining how to achieve the balanced and coordinated development of the Huangshui River Basin is an important task. Thus, this study used the Google Earth Engine (GEE) cloud-computing platform and Sentinel-1/2 data, supplemented with an ALOS digital elevation model (ALOS DEM) and field survey data, and combined a remote sensing classification method, grid method, and ecosystem service value (ESV) evaluation method to study the spatial correlation and interaction between land use (LU) and ESV in the Huangshui River Basin. The following results were obtained: (1) on the GEE platform, Sentinel-1/2 active and passive remote sensing data, combined with the gradient tree-boosting algorithm, can efficiently produce highly accurate LU data with a spatial resolution of 10 m in the Huangshui River Basin; the overall accuracy (OA) reached 88%. (2) The total ESV in the Huangshui River Basin in 2020 was CNY 33.18 billion (USD 4867.2 million), of which woodland and grassland were the main contributors to ESV. In the Huangshui River Basin, the LU type, LU degree, and ESV have significant positive spatial correlations, with urban and agricultural areas showing an H-H agglomeration in terms of LU degree, with woodlands, grasslands, reservoirs, and wetlands showing an H-H agglomeration in terms of ESV. (3) There is a significant negative spatial correlation between the LU degree and ESV in the Huangshui River Basin, indicating that the enhancement of the LU degree in the basin could have a negative spatial spillover effect on the ESV of surrounding areas. Thus, green development should be the future direction of progress in the Huangshui River Basin, i.e., while maintaining and expanding the land for ecological protection and restoration, and the LU structure should be actively adjusted to ensure ecological security and coordinated and sustainable socioeconomic development in the Basin.}, } @article {pmid36078329, year = {2022}, author = {Feng, H and Wang, F and Song, G and Liu, L}, title = {Digital Transformation on Enterprise Green Innovation: Effect and Transmission Mechanism.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {17}, pages = {}, pmid = {36078329}, issn = {1660-4601}, mesh = {China ; Financing, Government ; *Government ; *Sustainable Development ; }, abstract = {With the development of blockchain, big data, cloud computing and other new technologies, how to achieve innovative development and green sustainable development in digital transformation has become one of the key issues for enterprises to obtain and maintain core competitiveness. However, little of the literature has paid attention to the impact of digital transformation on enterprise green innovation. Using the data of Chinese A-share listed companies from 2010 to 2020, this paper empirically analyzes the impact of enterprise digital transformation on green innovation and its transmission mechanism, by constructing double fixed-effect models. The results show that digital transformation has remarkably promoted the green innovation of enterprises. R&D investment, government subsidies, and income tax burden have played a conductive role between digital transformation and enterprise green innovation. Furthermore, digital transformation can significantly promote the high-quality green innovation of enterprises and also plays a more significant role in promoting the green innovation of high-tech enterprises and state-owned enterprises. A robustness test is carried out by using the lag data and changing the measurement methods of the dependent variable and independent variables, and the research conclusions are still valid. Based on resource-based theory and dynamic capability theory, this paper reveals the impact path of digital transformation on enterprise green innovation, further expanding the research field of digital transformation and enriching the research on the influencing factors of enterprise green innovation. This paper provides policy suggestions for the government to improve the enterprise green innovation level by increasing government subsidies and providing tax incentives and also provides reference for digital transformation enterprises to accelerate green innovation by increasing R&D investment, obtaining government subsidies, and acquiring tax policy support.}, } @article {pmid36075919, year = {2022}, author = {Sheffield, NC and Bonazzi, VR and Bourne, PE and Burdett, T and Clark, T and Grossman, RL and Spjuth, O and Yates, AD}, title = {From biomedical cloud platforms to microservices: next steps in FAIR data and analysis.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {553}, pmid = {36075919}, issn = {2052-4463}, support = {201535/Z/16/Z/WT_/Wellcome Trust/United Kingdom ; R35GM128636//U.S. Department of Health & Human Services | NIH | National Institute of General Medical Sciences (NIGMS)/ ; }, abstract = {The biomedical research community is investing heavily in biomedical cloud platforms. Cloud computing holds great promise for addressing challenges with big data and ensuring reproducibility in biology. However, despite their advantages, cloud platforms in and of themselves do not automatically support FAIRness. The global push to develop biomedical cloud platforms has led to new challenges, including platform lock-in, difficulty integrating across platforms, and duplicated effort for both users and developers. Here, we argue that these difficulties are systemic and emerge from incentives that encourage development effort on self-sufficient platforms and data repositories instead of interoperable microservices. We argue that many of these issues would be alleviated by prioritizing microservices and access to modular data in smaller chunks or summarized form. We propose that emphasizing modularity and interoperability would lead to a more powerful Unix-like ecosystem of web services for biomedical analysis and data retrieval. We challenge funders, developers, and researchers to support a vision to improve interoperability through microservices as the next generation of cloud-based bioinformatics.}, } @article {pmid36072746, year = {2022}, author = {Cheng, Q and Dang, CN}, title = {Using GIS Remote Sensing Image Data for Wetland Monitoring and Environmental Simulation.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7886358}, pmid = {36072746}, issn = {1687-5273}, mesh = {Artificial Intelligence ; Environmental Monitoring/methods ; Geographic Information Systems ; *Remote Sensing Technology ; *Wetlands ; }, abstract = {Through a comprehensive theoretical basis and actual test analysis of the application system design and functional efficiency of the cloud platform, this paper puts forward an artificial intelligence environmental data monitoring and wetland environmental simulation method based on GIS remote sensing images. First, the basic storage and computing functions have been enhanced at the physical layer. Second, the middleware layer is more flexible in the use of management methods and strategies. There are many strategies and methods that can be used in combination. Finally, based on this, the application system design framework is more convenient and faster so that you can focus on business logic, and the strategic advantages of certain functions are very obvious. The method of object-oriented classification and visual interpretation using UAV image data and satellite remote sensing images from the typical recovery area and treatment area of wetland from 2016 to 2020 is given in detail together to extract wetland information and use GIS software for dynamic calculation. Using the wetland transmission matrix method, the distribution map of the characteristic types of the survey areas in the four periods and the conversion status of the characteristic types at each stage were obtained, and the effect of wetland treatment was quantitatively studied.}, } @article {pmid36072717, year = {2022}, author = {Aggarwal, A and Kumar, S and Bhatt, A and Shah, MA}, title = {Solving User Priority in Cloud Computing Using Enhanced Optimization Algorithm in Workflow Scheduling.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7855532}, pmid = {36072717}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Workflow ; }, abstract = {Cloud computing is a procedure of stockpiling as well as retrieval of data or computer services over the Internet that allows all its users to remotely access the data centers. Cloud computing provides all required services to the users, but every platform has its share of pros and cons, and another major problem in the cloud is task scheduling or workflow scheduling. Multiple factors are becoming a challenge for scheduling in cloud computing namely the heterogeneity of resources, tasks, and user priority. User priority has been encountered as the most challenging problem during the last decade as the number of users is increasing worldwide. This issue has been resolved by an advanced encryption standard (AES) algorithm, which decreases the response time and execution delay of the user-request. There are multifarious tasks, for instance, deploying the data on the cloud, that will be executed according to first come first serve (FCFS) and not on the payment basis, which provides an ease to the users. These investigated techniques are 30.21%, 25.20%, 25.30%, 30.25%, 24.26%, and 36.9 8% improved in comparison with the traditional FFOA, DE, ABC, PSO, GA, and ETC, respectively. Moreover, during iteration number 5, this approach is 15.20%, 20.22%, 30.56%, 26.30%, and 36.23% improved than that of the traditional techniques FFOA, DE, ABC, PSO, GA, and ETC, respectively. This investigated method is more efficient and applicable in certain arenas where user priority is the primary concern and can offer all the required services to the users without any interruption.}, } @article {pmid36065132, year = {2022}, author = {Feser, M and König, P and Fiebig, A and Arend, D and Lange, M and Scholz, U}, title = {On the way to plant data commons - a genotyping use case.}, journal = {Journal of integrative bioinformatics}, volume = {19}, number = {4}, pages = {}, pmid = {36065132}, issn = {1613-4516}, mesh = {*Ecosystem ; Genotype ; *Computational Biology ; Software ; }, abstract = {Over the last years it has been observed that the progress in data collection in life science has created increasing demand and opportunities for advanced bioinformatics. This includes data management as well as the individual data analysis and often covers the entire data life cycle. A variety of tools have been developed to store, share, or reuse the data produced in the different domains such as genotyping. Especially imputation, as a subfield of genotyping, requires good Research Data Management (RDM) strategies to enable use and re-use of genotypic data. To aim for sustainable software, it is necessary to develop tools and surrounding ecosystems, which are reusable and maintainable. Reusability in the context of streamlined tools can e.g. be achieved by standardizing the input and output of the different tools and adapting to open and broadly used file formats. By using such established file formats, the tools can also be connected with others, improving the overall interoperability of the software. Finally, it is important to build strong communities that maintain the tools by developing and contributing new features and maintenance updates. In this article, concepts for this will be presented for an imputation service.}, } @article {pmid36062125, year = {2022}, author = {Guan, J and Xu, H and Wang, Y and Ma, Y and Wang, Y and Gao, R and Yu, K}, title = {Digital Economy and Health: A Case Study of a Leading Enterprise's Value Mining Mode in the Global Big Health Market.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {904186}, pmid = {36062125}, issn = {2296-2565}, mesh = {Aged ; Bayes Theorem ; *COVID-19/epidemiology ; *Ecosystem ; Humans ; Industry ; }, abstract = {Coronavirus disease 2019 (COVID-19) swept across the world and posed a serious threat to human health. Health and elderly care enterprises are committed to continuously improving people's health. With the rapid development of the digital economy, many enterprises have established digital product-service ecosystems after combining "Internet +," big data, cloud computing, and the big health industry. This paper uses the case study method to analyze the overseas market value mining mode of health and elderly care enterprises through in-depth research on leading health and elderly care enterprises. This study explores the value mining mode of the leading enterprise's global big health market using a cluster analysis and Bayesian model with the support of data on geographical characteristics, users' sleep habits, and national big health. This paper theoretically summarizes the successful cases of health and elderly care enterprises through digital transformation, which provides a useful reference for the intelligent transformation of the health and elderly care industry.}, } @article {pmid36062066, year = {2022}, author = {Rufin, P and Bey, A and Picoli, M and Meyfroidt, P}, title = {Large-area mapping of active cropland and short-term fallows in smallholder landscapes using PlanetScope data.}, journal = {International journal of applied earth observation and geoinformation : ITC journal}, volume = {112}, number = {}, pages = {102937}, pmid = {36062066}, issn = {1569-8432}, abstract = {Cropland mapping in smallholder landscapes is challenged by complex and fragmented landscapes, labor-intensive and unmechanized land management causing high within-field variability, rapid dynamics in shifting cultivation systems, and substantial proportions of short-term fallows. To overcome these challenges, we here present a large-area mapping framework to identify active cropland and short-term fallows in smallholder landscapes for the 2020/2021 growing season at 4.77 m spatial resolution. Our study focuses on Northern Mozambique, an area comprising 381,698 km[2]. The approach is based on Google Earth Engine and time series of PlanetScope mosaics made openly available through Norwaýs International Climate and Forest Initiative (NICFI) data program. We conducted multi-temporal coregistration of the PlanetScope data using seasonal Sentinel-2 base images and derived consistent and gap-free seasonal time series metrics to classify active cropland and short-term fallows. An iterative active learning framework based on Random Forest class probabilities was used for training rare classes and uncertain regions. The map was accurate (area-adjusted overall accuracy 88.6% ± 1.5%), with the main error type being the commission of active cropland. Error-adjusted area estimates of active cropland extent (61,799.5 km[2] ± 4,252.5 km[2]) revealed that existing global and regional land cover products tend to under-, or over-estimate active cropland extent, respectively. Short-term fallows occupied 28.9% of the cropland in our reference sample (13% of the mapped cropland), with consolidated agricultural regions showing the highest shares of short-term fallows. Our approach relies on openly available PlanetScope data and cloud-based processing in Google Earth Engine, which minimizes financial constraints and maximizes replicability of the methods. All code and maps were made available for further use.}, } @article {pmid36061493, year = {2022}, author = {Zhou, D}, title = {Mobility and interlinkage: the transformation and new approaches for anthropological research.}, journal = {International journal of anthropology and ethnology}, volume = {6}, number = {1}, pages = {13}, doi = {10.1186/s41257-022-00072-x}, pmid = {36061493}, issn = {2366-1003}, abstract = {Mobility and interlinkage have become the most important characteristics of our time. The mobility and interlinkage of people, material and information constitute the way and rules of the operation of today's world. Internet links, cloud computing, complex database and human computation have changed the way people relate to the world, thus the anthropology for understanding and interpretation of human cultures have changed correspondingly. Cultures in the state of mobility and interlinkage, such as spatial changes, the evolution of interpersonal relationships and the new cultural order, have become a new subject.}, } @article {pmid36060618, year = {2023}, author = {Katal, A and Dahiya, S and Choudhury, T}, title = {Energy efficiency in cloud computing data centers: a survey on software technologies.}, journal = {Cluster computing}, volume = {26}, number = {3}, pages = {1845-1875}, pmid = {36060618}, issn = {1386-7857}, abstract = {Cloud computing is a commercial and economic paradigm that has gained traction since 2006 and is presently the most significant technology in IT sector. From the notion of cloud computing to its energy efficiency, cloud has been the subject of much discussion. The energy consumption of data centres alone will rise from 200 TWh in 2016 to 2967 TWh in 2030. The data centres require a lot of power to provide services, which increases CO2 emissions. In this survey paper, software-based technologies that can be used for building green data centers and include power management at individual software level has been discussed. The paper discusses the energy efficiency in containers and problem-solving approaches used for reducing power consumption in data centers. Further, the paper also gives details about the impact of data centers on environment that includes the e-waste and the various standards opted by different countries for giving rating to the data centers. This article goes beyond just demonstrating new green cloud computing possibilities. Instead, it focuses the attention and resources of academia and society on a critical issue: long-term technological advancement. The article covers the new technologies that can be applied at the individual software level that includes techniques applied at virtualization level, operating system level and application level. It clearly defines different measures at each level to reduce the energy consumption that clearly adds value to the current environmental problem of pollution reduction. This article also addresses the difficulties, concerns, and needs that cloud data centres and cloud organisations must grasp, as well as some of the factors and case studies that influence green cloud usage.}, } @article {pmid36059591, year = {2022}, author = {Moqurrab, SA and Tariq, N and Anjum, A and Asheralieva, A and Malik, SUR and Malik, H and Pervaiz, H and Gill, SS}, title = {A Deep Learning-Based Privacy-Preserving Model for Smart Healthcare in Internet of Medical Things Using Fog Computing.}, journal = {Wireless personal communications}, volume = {126}, number = {3}, pages = {2379-2401}, pmid = {36059591}, issn = {0929-6212}, abstract = {With the emergence of COVID-19, smart healthcare, the Internet of Medical Things, and big data-driven medical applications have become even more important. The biomedical data produced is highly confidential and private. Unfortunately, conventional health systems cannot support such a colossal amount of biomedical data. Hence, data is typically stored and shared through the cloud. The shared data is then used for different purposes, such as research and discovery of unprecedented facts. Typically, biomedical data appear in textual form (e.g., test reports, prescriptions, and diagnosis). Unfortunately, such data is prone to several security threats and attacks, for example, privacy and confidentiality breach. Although significant progress has been made on securing biomedical data, most existing approaches yield long delays and cannot accommodate real-time responses. This paper proposes a novel fog-enabled privacy-preserving model called δ r sanitizer, which uses deep learning to improve the healthcare system. The proposed model is based on a Convolutional Neural Network with Bidirectional-LSTM and effectively performs Medical Entity Recognition. The experimental results show that δ r sanitizer outperforms the state-of-the-art models with 91.14% recall, 92.63% in precision, and 92% F1-score. The sanitization model shows 28.77% improved utility preservation as compared to the state-of-the-art.}, } @article {pmid36059392, year = {2022}, author = {Srivastava, DK and Tiwari, PK and Srivastava, M and Dawadi, BR}, title = {An Energy-Efficient Strategy and Secure VM Placement Algorithm in Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5324202}, pmid = {36059392}, issn = {1687-5273}, abstract = {One of the important and challenging tasks in cloud computing is to obtain the usefulness of cloud by implementing several specifications for our needs, to meet the present growing demands, and to minimize energy consumption as much as possible and ensure proper utilization of computing resources. An excellent mapping scheme has been derived which maps virtual machines (VMs) to physical machines (PMs), which is also known as virtual machine (VM) placement, and this needs to be implemented. The tremendous diversity of computing resources, tasks, and virtualization processes in the cloud causes the consolidation method to be more complex, tedious, and problematic. An algorithm for reducing energy use and resource allocation is proposed for implementation in this article. This algorithm was developed with the help of a Cloud System Model, which enables mapping between VMs and PMs and among tasks of VMs. The methodology used in this algorithm also supports lowering the number of PMs that are in an active state and optimizes the total time taken to process a set of tasks (also known as makespan time). Using the CloudSim Simulator tool, we evaluated and assessed the energy consumption and makespan time. The results are compiled and then compared graphically with respect to other existing energy-efficient VM placement algorithms.}, } @article {pmid36052034, year = {2022}, author = {Gan, B and Zhang, C}, title = {An Improved Model of Product Classification Feature Extraction and Recognition Based on Intelligent Image Recognition.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2926669}, pmid = {36052034}, issn = {1687-5273}, mesh = {*Algorithms ; Cloud Computing ; Commerce ; Humans ; *Software ; }, abstract = {With the development of the new generation of technological revolution, the manufacturing industry has entered the era of intelligent manufacturing, and people have higher and higher requirements for the technology, industry, and application of product manufacturing. At present, some factories have introduced intelligent image recognition technology into the production process in order to meet the needs of customers' personalized customization. However, the current image recognition technology has limited capabilities. When faced with many special customized products or complex types of small batch products in the market, it is still impossible to perfectly analyze the product requirements and put them into production. Therefore, this paper conducts in-depth research on the improved model of product classification feature extraction and recognition based on intelligent image recognition: 3D modeling of the target product is carried out, and various data of the model are analyzed and recorded to facilitate subsequent work. Use the tools and the established 3D model tosimulate the parameters of the product in the real scene, and record them. Atthe same time, various methods such as image detection and edge analysis areused to maximize the accuracy of the obtained parameters, and variousalgorithms are used for cross-validation to obtain the correct rate of the obtaineddata, and the standard is 90% and above. Build a data platform, compare simulated data with display data by software and algorithm, and check by cloud computing force, so that the model data can be as close to the parameters of the real product as possible. Experimental results show that the algorithm has high accuracy and can meet the requirements of different classification prospects in actual production.}, } @article {pmid36048352, year = {2022}, author = {Jiang, F and Deng, M and Tang, J and Fu, L and Sun, H}, title = {Integrating spaceborne LiDAR and Sentinel-2 images to estimate forest aboveground biomass in Northern China.}, journal = {Carbon balance and management}, volume = {17}, number = {1}, pages = {12}, pmid = {36048352}, issn = {1750-0680}, support = {CX20210852//the Postgraduate Scientific Research Innovation Project of Hunan Province/ ; XLK201986//Scientific Research Fund of Hunan Provincial Forestry Department/ ; 31971578//the project of the National Natural Science Foundation of China/ ; }, abstract = {BACKGROUND: Fast and accurate forest aboveground biomass (AGB) estimation and mapping is the basic work of forest management and ecosystem dynamic investigation, which is of great significance to evaluate forest quality, resource assessment, and carbon cycle and management. The Ice, Cloud, and Land Elevation Satellite-2 (ICESat-2), as one of the latest launched spaceborne light detection and ranging (LiDAR) sensors, can penetrate the forest canopy and has the potential to obtain accurate forest vertical structure parameters on a large scale. However, the along-track segments of canopy height provided by ICESat-2 cannot be used to obtain comprehensive AGB spatial distribution. To make up for the deficiency of spaceborne LiDAR, the Sentinel-2 images provided by google earth engine (GEE) were used as the medium to integrate with ICESat-2 for continuous AGB mapping in our study. Ensemble learning can summarize the advantages of estimation models and achieve better estimation results. A stacking algorithm consisting of four non-parametric base models which are the backpropagation (BP) neural network, k-nearest neighbor (kNN), support vector machine (SVM), and random forest (RF) was proposed for AGB modeling and estimating in Saihanba forest farm, northern China.

RESULTS: The results show that stacking achieved the best AGB estimation accuracy among the models, with an R[2] of 0.71 and a root mean square error (RMSE) of 45.67 Mg/ha. The stacking resulted in the lowest estimation error with the decreases of RMSE by 22.6%, 27.7%, 23.4%, and 19.0% compared with those from the BP, kNN, SVM, and RF, respectively.

CONCLUSION: Compared with using Sentinel-2 alone, the estimation errors of all models have been significantly reduced after adding the LiDAR variables of ICESat-2 in AGB estimation. The research demonstrated that ICESat-2 has the potential to improve the accuracy of AGB estimation and provides a reference for dynamic forest resources management and monitoring.}, } @article {pmid36048148, year = {2022}, author = {Krissinel, E and Lebedev, AA and Uski, V and Ballard, CB and Keegan, RM and Kovalevskiy, O and Nicholls, RA and Pannu, NS and Skubák, P and Berrisford, J and Fando, M and Lohkamp, B and Wojdyr, M and Simpkin, AJ and Thomas, JMH and Oliver, C and Vonrhein, C and Chojnowski, G and Basle, A and Purkiss, A and Isupov, MN and McNicholas, S and Lowe, E and Triviño, J and Cowtan, K and Agirre, J and Rigden, DJ and Uson, I and Lamzin, V and Tews, I and Bricogne, G and Leslie, AGW and Brown, DG}, title = {CCP4 Cloud for structure determination and project management in macromolecular crystallography.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {78}, number = {Pt 9}, pages = {1079-1089}, pmid = {36048148}, issn = {2059-7983}, support = {BB/L007037/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007040/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007083/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S005099/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/S007105/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BBF020384/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; MC_UP_A025_1012/MRC_/Medical Research Council/United Kingdom ; MC_U105184325/MRC_/Medical Research Council/United Kingdom ; 349-2013-597//Röntgen-Ångström Cluster/ ; TKI 16219//Nederlandse Wetenschappelijke Organisatie/ ; }, mesh = {*Cloud Computing ; Crystallography, X-Ray ; Macromolecular Substances/chemistry ; *Software ; }, abstract = {Nowadays, progress in the determination of three-dimensional macromolecular structures from diffraction images is achieved partly at the cost of increasing data volumes. This is due to the deployment of modern high-speed, high-resolution detectors, the increased complexity and variety of crystallographic software, the use of extensive databases and high-performance computing. This limits what can be accomplished with personal, offline, computing equipment in terms of both productivity and maintainability. There is also an issue of long-term data maintenance and availability of structure-solution projects as the links between experimental observations and the final results deposited in the PDB. In this article, CCP4 Cloud, a new front-end of the CCP4 software suite, is presented which mitigates these effects by providing an online, cloud-based environment for crystallographic computation. CCP4 Cloud was developed for the efficient delivery of computing power, database services and seamless integration with web resources. It provides a rich graphical user interface that allows project sharing and long-term storage for structure-solution projects, and can be linked to data-producing facilities. The system is distributed with the CCP4 software suite version 7.1 and higher, and an online publicly available instance of CCP4 Cloud is provided by CCP4.}, } @article {pmid36046635, year = {2022}, author = {Nickel, S and Bremer, K and Dierks, ML and Haack, M and Wittmar, S and Borgetto, B and Kofahl, C}, title = {Digitization in health-related self-help - Results of an online survey among self-help organizations in Germany.}, journal = {Digital health}, volume = {8}, number = {}, pages = {20552076221120726}, pmid = {36046635}, issn = {2055-2076}, abstract = {BACKGROUND: Nowadays, much hope and expectations are associated with digitization in the health sector. The digital change also affects health-related self-help. A nationwide survey of self-help organizations (SHOs) aimed to show chances and limitations in the use of interactive IT tools like webforums, online meetings or social media as well as digital infrastructures for their organizational management. In this survey, we also determined whether SHO staff themselves have support and qualification needs with regard to this topic.

DESIGN: The online survey was conducted between 14 November and 8 December 2019, i.e., immediately before the outbreak of the Covid-19 pandemic. The questionnaire consisted of 50 questions consisting of 180 single items which could be answered in 30-40 min. After two reminder letters, 119 questionnaires of the SHOs were gathered and analysed.

RESULTS: SHOs already have a lot of experience with digital media/tools (e.g., own homepage, social media, cloud computing). Some tools are attested a "high" or "very high" benefit by more than 80% of users. Perceived benefits, however, are also facing a number of problems, ranging from lack of resources to data protection issues. Despite, or even because of the limits of digitization, there is great desire and need for support and further training in SHOs (and self-help groups).

CONCLUSIONS: At many points in the survey it was shown that digital media can be a useful extension of "traditional" collective self-help. Taking into account the risks and limitations associated with digital tools, SHOs can be central stakeholders in digitization in health-related self-help.

The study was financially supported by the Federal Ministry of Health, Germany. A detailed representation of the results is publicly available at: https://www.uke.de/dish.}, } @article {pmid36035822, year = {2022}, author = {Zala, K and Thakkar, HK and Jadeja, R and Dholakia, NH and Kotecha, K and Jain, DK and Shukla, M}, title = {On the Design of Secured and Reliable Dynamic Access Control Scheme of Patient E-Healthcare Records in Cloud Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3804553}, pmid = {36035822}, issn = {1687-5273}, mesh = {*Computer Security ; Confidentiality ; Delivery of Health Care ; Humans ; Privacy ; *Telemedicine ; }, abstract = {Traditional healthcare services have changed into modern ones in which doctors can diagnose patients from a distance. All stakeholders, including patients, ward boy, life insurance agents, physicians, and others, have easy access to patients' medical records due to cloud computing. The cloud's services are very cost-effective and scalable, and provide various mobile access options for a patient's electronic health records (EHRs). EHR privacy and security are critical concerns despite the many benefits of the cloud. Patient health information is extremely sensitive and important, and sending it over an unencrypted wireless media raises a number of security hazards. This study suggests an innovative and secure access system for cloud-based electronic healthcare services storing patient health records in a third-party cloud service provider. The research considers the remote healthcare requirements for maintaining patient information integrity, confidentiality, and security. There will be fewer attacks on e-healthcare records now that stakeholders will have a safe interface and data on the cloud will not be accessible to them. End-to-end encryption is ensured by using multiple keys generated by the key conclusion function (KCF), and access to cloud services is granted based on a person's identity and the relationship between the parties involved, which protects their personal information that is the methodology used in the proposed scheme. The proposed scheme is best suited for cloud-based e-healthcare services because of its simplicity and robustness. Using different Amazon EC2 hosting options, we examine how well our cloud-based web application service works when the number of requests linearly increases. The performance of our web application service that runs in the cloud is based on how many requests it can handle per second while keeping its response time constant. The proposed secure access scheme for cloud-based web applications was compared to the Ethereum blockchain platform, which uses internet of things (IoT) devices in terms of execution time, throughput, and latency.}, } @article {pmid36033780, year = {2022}, author = {Deng, C and Yu, Q and Luo, G and Zhao, Z and Li, Y}, title = {Big data-driven intelligent governance of college students' physical health: System and strategy.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {924025}, pmid = {36033780}, issn = {2296-2565}, mesh = {*Artificial Intelligence ; *Big Data ; Exercise ; Humans ; Students ; Surveys and Questionnaires ; }, abstract = {With the development of information technology, the application of a new generation of information technologies, such as big data, Internet Plus, and artificial intelligence, in the sports field is an emerging, novel trend. This paper examined the relevant research results and literature on physical education, computer science, pedagogy, management, and other disciplines, then used a self-made questionnaire to investigate the physical health status of Chinese college students. The big data were subsequently analyzed, which provided a scientific basis for the construction of an intelligent governance system for college students' physical health. Intelligent devices may be used to obtain big data resources, master the physical sports development and psychological status of college students, and push personalized sports prescriptions to solve the problems existing in college students' physical health. Research shows that there are four reasons for the continuous decline in Chinese college students' physical health levels. These are students' lack of positive exercise consciousness and healthy sports values (85.43%), a weak family sports concept and lack of physical exercise habits (62.76%), poor implementation of school sports policies (55.35%), and people's distorted sports value orientation (42.27%). Through the connecting effect of data, we can bring together the positive role of the government, school, society, family, and students so as to create an interlinked impact to promote students' physical health. The problems of insufficient platform utilization, lack of teaching resources, lagging research, and insufficient combination with big data in the intelligent governance of physical health of Chinese college students can be solved by building an intelligent governance system of physical health. Such a system would be composed of school infrastructure, data resources and technology processing, and intelligent service applications. Among these, school infrastructure refers to the material foundation and technical support. The material foundation includes perceptions, storage, computing, networks, and other equipment, and the technical support includes cloud computing, mobile Internet, the Internet of Things, artificial intelligence, and deep learning. Data resources refer to smart data, such as stadium data, physical health management data, and students' sports behavior data, which are mined from data resources such as students' physical development, physical health, and sports through big data technology and intelligent wearable devices. Intelligent managers provide efficient, intelligent, accurate, and personalized intelligent sports services for college students through data resource value mining, venue space-time optimization, health knowledge discovery, sports prescription pushes, etc. Finally, we put forward the development strategy for further deepening and improving the big data-driven intelligent governance system for college students' physical health. The intelligent governance system of physical health driven by big data and its development strategy can not only accurately guide and improve the physical health level of college students but also realize integrated teaching inside and outside physical education classes.}, } @article {pmid36033031, year = {2022}, author = {Liu, Y and Chen, L and Yao, Z}, title = {The application of artificial intelligence assistant to deep learning in teachers' teaching and students' learning processes.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {929175}, pmid = {36033031}, issn = {1664-1078}, abstract = {With the emergence of big data, cloud computing, and other technologies, artificial intelligence (AI) technology has set off a new wave in the field of education. The application of AI technology to deep learning in university teachers' teaching and students' learning processes is an innovative way to promote the quality of teaching and learning. This study proposed the deep learning-based assessment to measure whether students experienced an improvement in terms of their mastery of knowledge, development of abilities, and emotional experiences. It also used comparative analysis of pre-tests and post-tests through online questionnaires to test the results. The impact of technology on teachers' teaching and students' learning processes, identified the problems in the teaching and learning processes in the context of the application of AI technology, and proposed strategies for reforming and optimizing teaching and learning. It recommends the application of software and platforms, such as Waston and Knewton, under the orientation of AI technology to improve efficiency in teaching and learning, optimize course design, and engage students in deep learning. The contribution of this research is that the teaching and learning processes will be enhanced by the use of intelligent and efficient teaching models on the teachers' side and personalized and in-depth learning on the students' side. On the one hand, the findings are helpful for teachers to better grasp the actual conditions of in-class teaching in real time, carry out intelligent lesson preparations, enrich teaching methods, improve teaching efficiency, and achieve personalized and precision teaching. On the other hand, it also provides a space of intelligent support for students with different traits in terms of learning and effectively improves students' innovation ability, ultimately achieving the purpose of "artificial intelligence + education."}, } @article {pmid36032802, year = {2022}, author = {Mi, J and Sun, X and Zhang, S and Liu, N}, title = {Residential Environment Pollution Monitoring System Based on Cloud Computing and Internet of Things.}, journal = {International journal of analytical chemistry}, volume = {2022}, number = {}, pages = {1013300}, pmid = {36032802}, issn = {1687-8760}, abstract = {In order to solve the problems of single monitoring factor, weak comprehensive analysis ability, and poor real time performance in traditional environmental monitoring systems, a research method of residential environment pollution monitoring system based on cloud computing and Internet of Things is proposed. The method mainly includes two parts: an environmental monitoring terminal and an environmental pollution monitoring and management platform. Through the Wi-Fi module, the data is sent to the environmental pollution monitoring and management platform in real time. The environmental monitoring management platform is mainly composed of environmental pollution monitoring server, web server, and mobile terminal. The results are as follows. The data measured by the system is close to the data measured by the instrument, and the overall error is small. The measurement error of harmful gases is about 6%. PM 2.5 is about 6.5%. Noise is about 1%. The average time for sensor data update is 0.762 s. The average alarm response time is 2 s. The average data transfer time is 2 s. Practice has proved that the environmental pollution monitoring and alarm system operates stably and can realize real-time collection and transmission of data such as noise, PM 2.5, harmful gas concentration, illumination, GPS, and video images, providing a reliable guarantee for timely environmental pollution control.}, } @article {pmid36017455, year = {2022}, author = {Venkateswarlu, Y and Baskar, K and Wongchai, A and Gauri Shankar, V and Paolo Martel Carranza, C and Gonzáles, JLA and Murali Dharan, AR}, title = {An Efficient Outlier Detection with Deep Learning-Based Financial Crisis Prediction Model in Big Data Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4948947}, pmid = {36017455}, issn = {1687-5273}, mesh = {Algorithms ; *Big Data ; Cloud Computing ; *Deep Learning ; Machine Learning ; }, abstract = {As Big Data, Internet of Things (IoT), cloud computing (CC), and other ideas and technologies are combined for social interactions. Big data technologies improve the treatment of financial data for businesses. At present, an effective tool can be used to forecast the financial failures and crises of small and medium-sized enterprises. Financial crisis prediction (FCP) plays a major role in the country's economic phenomenon. Accurate forecasting of the number and probability of failure is an indication of the development and strength of national economies. Normally, distinct approaches are planned for an effective FCP. Conversely, classifier efficiency and predictive accuracy and data legality could not be optimal for practical application. In this view, this study develops an oppositional ant lion optimizer-based feature selection with a machine learning-enabled classification (OALOFS-MLC) model for FCP in a big data environment. For big data management in the financial sector, the Hadoop MapReduce tool is used. In addition, the presented OALOFS-MLC model designs a new OALOFS algorithm to choose an optimal subset of features which helps to achieve improved classification results. In addition, the deep random vector functional links network (DRVFLN) model is used to perform the grading process. Experimental validation of the OALOFS-MLC approach was conducted using a baseline dataset and the results demonstrated the supremacy of the OALOFS-MLC algorithm over recent approaches.}, } @article {pmid36016907, year = {2022}, author = {Reyes-Muñoz, P and Pipia, L and Salinero-Delgado, M and Belda, S and Berger, K and Estévez, J and Morata, M and Rivera-Caicedo, JP and Verrelst, J}, title = {Quantifying Fundamental Vegetation Traits over Europe Using the Sentinel-3 OLCI Catalogue in Google Earth Engine.}, journal = {Remote sensing}, volume = {14}, number = {6}, pages = {1347}, pmid = {36016907}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {Thanks to the emergence of cloud-computing platforms and the ability of machine learning methods to solve prediction problems efficiently, this work presents a workflow to automate spatiotemporal mapping of essential vegetation traits from Sentinel-3 (S3) imagery. The traits included leaf chlorophyll content (LCC), leaf area index (LAI), fraction of absorbed photosynthetically active radiation (FAPAR), and fractional vegetation cover (FVC), being fundamental for assessing photosynthetic activity on Earth. The workflow involved Gaussian process regression (GPR) algorithms trained on top-of-atmosphere (TOA) radiance simulations generated by the coupled canopy radiative transfer model (RTM) SCOPE and the atmospheric RTM 6SV. The retrieval models, named to S3-TOA-GPR-1.0, were directly implemented in Google Earth Engine (GEE) to enable the quantification of the traits from TOA data as acquired from the S3 Ocean and Land Colour Instrument (OLCI) sensor.Following good to high theoretical validation results with normalized root mean square error (NRMSE) ranging from 5% (FAPAR) to 19% (LAI), a three fold evaluation approach over diverse sites and land cover types was pursued: (1) temporal comparison against LAI and FAPAR products obtained from Moderate Resolution Imaging Spectroradiometer (MODIS) for the time window 2016-2020, (2) spatial difference mapping with Copernicus Global Land Service (CGLS) estimates, and (3) direct validation using interpolated in situ data from the VALERI network. For all three approaches, promising results were achieved. Selected sites demonstrated coherent seasonal patterns compared to LAI and FAPAR MODIS products, with differences between spatially averaged temporal patterns of only 6.59%. In respect of the spatial mapping comparison, estimates provided by the S3-TOA-GPR-1.0 models indicated highest consistency with FVC and FAPAR CGLS products. Moreover, the direct validation of our S3-TOA-GPR-1.0 models against VALERI estimates indicated with regard to jurisdictional claims in good retrieval performance for LAI, FAPAR and FVC. We conclude that our retrieval workflow of spatiotemporal S3 TOA data processing into GEE opens the path towards global monitoring of fundamental vegetation traits, accessible to the whole research community.}, } @article {pmid36016060, year = {2022}, author = {Thilakarathne, NN and Bakar, MSA and Abas, PE and Yassin, H}, title = {A Cloud Enabled Crop Recommendation Platform for Machine Learning-Driven Precision Farming.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016060}, issn = {1424-8220}, mesh = {*Agriculture ; *Artificial Intelligence ; Crops, Agricultural ; Farms ; Machine Learning ; }, abstract = {Modern agriculture incorporated a portfolio of technologies to meet the current demand for agricultural food production, in terms of both quality and quantity. In this technology-driven farming era, this portfolio of technologies has aided farmers to overcome many of the challenges associated with their farming activities by enabling precise and timely decision making on the basis of data that are observed and subsequently converged. In this regard, Artificial Intelligence (AI) holds a key place, whereby it can assist key stakeholders in making precise decisions regarding the conditions on their farms. Machine Learning (ML), which is a branch of AI, enables systems to learn and improve from their experience without explicitly being programmed, by imitating intelligent behavior in solving tasks in a manner that requires low computational power. For the time being, ML is involved in a variety of aspects of farming, assisting ranchers in making smarter decisions on the basis of the observed data. In this study, we provide an overview of AI-driven precision farming/agriculture with related work and then propose a novel cloud-based ML-powered crop recommendation platform to assist farmers in deciding which crops need to be harvested based on a variety of known parameters. Moreover, in this paper, we compare five predictive ML algorithms-K-Nearest Neighbors (KNN), Decision Tree (DT), Random Forest (RF), Extreme Gradient Boosting (XGBoost) and Support Vector Machine (SVM)-to identify the best-performing ML algorithm on which to build our recommendation platform as a cloud-based service with the intention of offering precision farming solutions that are free and open source, as will lead to the growth and adoption of precision farming solutions in the long run.}, } @article {pmid36016017, year = {2022}, author = {Rocha Filho, GP and Brandão, AH and Nobre, RA and Meneguette, RI and Freitas, H and Gonçalves, VP}, title = {HOsT: Towards a Low-Cost Fog Solution via Smart Objects to Deal with the Heterogeneity of Data in a Residential Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016017}, issn = {1424-8220}, mesh = {*Environment ; }, abstract = {With the fast and unstoppable development of technology, the amount of available technological devices and the data they produce is overwhelming. In analyzing the context of a smart home, a diverse group of intelligent devices generating constant reports of its environment information is needed for the proper control of the house. Due to this demand, many possible solutions have been developed in the literature to assess the need for processing power and storage capacity. This work proposes HOsT (home-context-aware fog-computing solution)-a solution that addresses the problems of data heterogeneity and the interoperability of smart objects in the context of a smart home. HOsT was modeled to compose a set of intelligent objects to form a computational infrastructure in fog. A publish/subscribe communication module was implemented to abstract the details of communication between objects to disseminate heterogeneous information. A performance evaluation was carried out to validate HOsT. The results show evidence of efficiency in the communication infrastructure; and in the impact of HOsT compared with a cloud infrastructure. Furthermore, HOsT provides scalability about the number of devices acting simultaneously and demonstrates its ability to work with different devices.}, } @article {pmid36016014, year = {2022}, author = {Bemani, A and Björsell, N}, title = {Aggregation Strategy on Federated Machine Learning Algorithm for Collaborative Predictive Maintenance.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36016014}, issn = {1424-8220}, support = {20202943//Region Gavleborg/ ; 20203291//EU - Tillvax Verket/ ; }, mesh = {*Algorithms ; Computer Simulation ; *Machine Learning ; Privacy ; Support Vector Machine ; }, abstract = {Industry 4.0 lets the industry build compact, precise, and connected assets and also has made modern industrial assets a massive source of data that can be used in process optimization, defining product quality, and predictive maintenance (PM). Large amounts of data are collected from machines, processed, and analyzed by different machine learning (ML) algorithms to achieve effective PM. These machines, assumed as edge devices, transmit their data readings to the cloud for processing and modeling. Transmitting massive amounts of data between edge and cloud is costly, increases latency, and causes privacy concerns. To address this issue, efforts have been made to use edge computing in PM applications., reducing data transmission costs and increasing processing speed. Federated learning (FL) has been proposed a mechanism that provides the ability to create a model from distributed data in edge, fog, and cloud layers without violating privacy and offers new opportunities for a collaborative approach to PM applications. However, FL has challenges in confronting with asset management in the industry, especially in the PM applications, which need to be considered in order to be fully compatible with these applications. This study describes distributed ML for PM applications and proposes two federated algorithms: Federated support vector machine (FedSVM) with memory for anomaly detection and federated long-short term memory (FedLSTM) for remaining useful life (RUL) estimation that enables factories at the fog level to maximize their PM models' accuracy without compromising their privacy. A global model at the cloud level has also been generated based on these algorithms. We have evaluated the approach using the Commercial Modular Aero-Propulsion System Simulation (CMAPSS) dataset to predict engines' RUL Experimental results demonstrate the advantage of FedSVM and FedLSTM in terms of model accuracy, model convergence time, and network usage resources.}, } @article {pmid36015736, year = {2022}, author = {Chen, YS and Cheng, KH and Hsu, CS and Zhang, HL}, title = {MiniDeep: A Standalone AI-Edge Platform with a Deep Learning-Based MINI-PC and AI-QSR System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015736}, issn = {1424-8220}, support = {MOST 109-2221-E-305-004-MY3//Ministry of Science and Technology, Taiwan/ ; }, mesh = {*Artificial Intelligence ; *Deep Learning ; Software ; }, abstract = {In this paper, we present a new AI (Artificial Intelligence) edge platform, called "MiniDeep", which provides a standalone deep learning platform based on the cloud-edge architecture. This AI-Edge platform provides developers with a whole deep learning development environment to set up their deep learning life cycle processes, such as model training, model evaluation, model deployment, model inference, ground truth collecting, data pre-processing, and training data management. To the best of our knowledge, such a whole deep learning development environment has not been built before. MiniDeep uses Amazon Web Services (AWS) as the backend platform of a deep learning tuning management model. In the edge device, the OpenVino enables deep learning inference acceleration at the edge. To perform a deep learning life cycle job, MiniDeep proposes a mini deep life cycle (MDLC) system which is composed of several microservices from the cloud to the edge. MiniDeep provides Train Job Creator (TJC) for training dataset management and the models' training schedule and Model Packager (MP) for model package management. All of them are based on several AWS cloud services. On the edge device, MiniDeep provides Inference Handler (IH) to handle deep learning inference by hosting RESTful API (Application Programming Interface) requests/responses from the end device. Data Provider (DP) is responsible for ground truth collection and dataset synchronization for the cloud. With the deep learning ability, this paper uses the MiniDeep platform to implement a recommendation system for AI-QSR (Quick Service Restaurant) KIOSK (interactive kiosk) application. AI-QSR uses the MiniDeep platform to train an LSTM (Long Short-Term Memory)-based recommendation system. The LSTM-based recommendation system converts KIOSK UI (User Interface) flow to the flow sequence and performs sequential recommendations with food suggestions. At the end of this paper, the efficiency of the proposed MiniDeep is verified through real experiments. The experiment results have demonstrated that the proposed LSTM-based scheme performs better than the rule-based scheme in terms of purchase hit accuracy, categorical cross-entropy, precision, recall, and F1 score.}, } @article {pmid36015727, year = {2022}, author = {Alzahrani, A and Alyas, T and Alissa, K and Abbas, Q and Alsaawy, Y and Tabassum, N}, title = {Hybrid Approach for Improving the Performance of Data Reliability in Cloud Storage Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015727}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computers ; *Information Storage and Retrieval ; Reproducibility of Results ; }, abstract = {The digital transformation disrupts the various professional domains in different ways, though one aspect is common: the unified platform known as cloud computing. Corporate solutions, IoT systems, analytics, business intelligence, and numerous tools, solutions and systems use cloud computing as a global platform. The migrations to the cloud are increasing, causing it to face new challenges and complexities. One of the essential segments is related to data storage. Data storage on the cloud is neither simplistic nor conventional; rather, it is becoming more and more complex due to the versatility and volume of data. The inspiration of this research is based on the development of a framework that can provide a comprehensive solution for cloud computing storage in terms of replication, and instead of using formal recovery channels, erasure coding has been proposed for this framework, which in the past proved itself as a trustworthy mechanism for the job. The proposed framework provides a hybrid approach to combine the benefits of replication and erasure coding to attain the optimal solution for storage, specifically focused on reliability and recovery. Learning and training mechanisms were developed to provide dynamic structure building in the future and test the data model. RAID architecture is used to formulate different configurations for the experiments. RAID-1 to RAID-6 are divided into two groups, with RAID-1 to 4 in the first group while RAID-5 and 6 are in the second group, further categorized based on FTT, parity, failure range and capacity. Reliability and recovery are evaluated on the rest of the data on the server side, and for the data in transit at the virtual level. The overall results show the significant impact of the proposed hybrid framework on cloud storage performance. RAID-6c at the server side came out as the best configuration for optimal performance. The mirroring for replication using RAID-6 and erasure coding for recovery work in complete coherence provide good results for the current framework while highlighting the interesting and challenging paths for future research.}, } @article {pmid36015699, year = {2022}, author = {Lakhan, A and Mohammed, MA and Abdulkareem, KH and Jaber, MM and Nedoma, J and Martinek, R and Zmij, P}, title = {Delay Optimal Schemes for Internet of Things Applications in Heterogeneous Edge Cloud Computing Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {16}, pages = {}, pmid = {36015699}, issn = {1424-8220}, support = {SP2022/18 and No. SP2022/34//Ministry of Education Youth and Sports/ ; CZ.02.1.01/0.0/0.0/17049/0008425//European Regional Development Fund in Research Platform focused on Industry 4.0 and Robotics in Ostrava/ ; }, mesh = {*Cloud Computing ; Delivery of Health Care ; *Internet of Things ; }, abstract = {Over the last decade, the usage of Internet of Things (IoT) enabled applications, such as healthcare, intelligent vehicles, and smart homes, has increased progressively. These IoT applications generate delayed- sensitive data and requires quick resources for execution. Recently, software-defined networks (SDN) offer an edge computing paradigm (e.g., fog computing) to run these applications with minimum end-to-end delays. Offloading and scheduling are promising schemes of edge computing to run delay-sensitive IoT applications while satisfying their requirements. However, in the dynamic environment, existing offloading and scheduling techniques are not ideal and decrease the performance of such applications. This article formulates joint and scheduling problems into combinatorial integer linear programming (CILP). We propose a joint task offloading and scheduling (JTOS) framework based on the problem. JTOS consists of task offloading, sequencing, scheduling, searching, and failure components. The study's goal is to minimize the hybrid delay of all applications. The performance evaluation shows that JTOS outperforms all existing baseline methods in hybrid delay for all applications in the dynamic environment. The performance evaluation shows that JTOS reduces the processing delay by 39% and the communication delay by 35% for IoT applications compared to existing schemes.}, } @article {pmid36009026, year = {2022}, author = {Lin, PC and Tsai, YS and Yeh, YM and Shen, MR}, title = {Cutting-Edge AI Technologies Meet Precision Medicine to Improve Cancer Care.}, journal = {Biomolecules}, volume = {12}, number = {8}, pages = {}, pmid = {36009026}, issn = {2218-273X}, mesh = {Artificial Intelligence ; Computational Biology/methods ; Data Mining ; Genomics/methods ; Humans ; *Neoplasms/diagnosis/genetics/therapy ; *Precision Medicine/methods ; }, abstract = {To provide precision medicine for better cancer care, researchers must work on clinical patient data, such as electronic medical records, physiological measurements, biochemistry, computerized tomography scans, digital pathology, and the genetic landscape of cancer tissue. To interpret big biodata in cancer genomics, an operational flow based on artificial intelligence (AI) models and medical management platforms with high-performance computing must be set up for precision cancer genomics in clinical practice. To work in the fast-evolving fields of patient care, clinical diagnostics, and therapeutic services, clinicians must understand the fundamentals of the AI tool approach. Therefore, the present article covers the following four themes: (i) computational prediction of pathogenic variants of cancer susceptibility genes; (ii) AI model for mutational analysis; (iii) single-cell genomics and computational biology; (iv) text mining for identifying gene targets in cancer; and (v) the NVIDIA graphics processing units, DRAGEN field programmable gate arrays systems and AI medical cloud platforms in clinical next-generation sequencing laboratories. Based on AI medical platforms and visualization, large amounts of clinical biodata can be rapidly copied and understood using an AI pipeline. The use of innovative AI technologies can deliver more accurate and rapid cancer therapy targets.}, } @article {pmid35996679, year = {2023}, author = {Alsalemi, A and Amira, A and Malekmohamadi, H and Diao, K}, title = {Lightweight Gramian Angular Field classification for edge internet of energy applications.}, journal = {Cluster computing}, volume = {26}, number = {2}, pages = {1375-1387}, pmid = {35996679}, issn = {1386-7857}, abstract = {UNLABELLED: With adverse industrial effects on the global landscape, climate change is imploring the global economy to adopt sustainable solutions. The ongoing evolution of energy efficiency targets massive data collection and Artificial Intelligence (AI) for big data analytics. Besides, emerging on the Internet of Energy (IoE) paradigm, edge computing is playing a rising role in liberating private data from cloud centralization. In this direction, a creative visual approach to understanding energy data is introduced. Building upon micro-moments, which are timeseries of small contextual data points, the power of pictorial representations to encapsulate rich information in a small two-dimensional (2D) space is harnessed through a novel Gramian Angular Fields (GAF) classifier for energy micro-moments. Designed with edge computing efficiency in mind, current testing results on the ODROID-XU4 can classify up to 7 million GAF-converted datapoints with ~ 90% accuracy in less than 30 s, paving the path towards industrial adoption of edge IoE.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s10586-022-03704-1.}, } @article {pmid35994872, year = {2022}, author = {Yeung, S and Kim, HK and Carleton, A and Munro, J and Ferguson, D and Monk, AP and Zhang, J and Besier, T and Fernandez, J}, title = {Integrating wearables and modelling for monitoring rehabilitation following total knee joint replacement.}, journal = {Computer methods and programs in biomedicine}, volume = {225}, number = {}, pages = {107063}, doi = {10.1016/j.cmpb.2022.107063}, pmid = {35994872}, issn = {1872-7565}, mesh = {*Arthroplasty, Replacement, Knee/rehabilitation ; Biomechanical Phenomena ; Gait ; Humans ; Knee Joint/surgery ; *Knee Prosthesis ; *Wearable Electronic Devices ; }, abstract = {BACKGROUND AND OBJECTIVE: Wearable inertial devices integrated with modelling and cloud computing have been widely adopted in the sports sector, however, their use in the health and medical field has yet to be fully realised. To date, there have been no reported studies concerning the use of wearables as a surrogate tool to monitor knee joint loading during recovery following a total knee joint replacement. The objective of this study is to firstly evaluate if peak tibial acceleration from wearables during gait is a good surrogate metric for computer modelling predicted functional knee loading; and secondly evaluate if traditional clinical patient related outcomes measures are consistent with wearable predictions.

METHODS: Following ethical approval, four healthy participants were used to establish the relationship between computer modelling predicted knee joint loading and wearable measured tibial acceleration. Following this, ten patients who had total knee joint replacements were then followed during their 6-week rehabilitation. Gait analysis, wearable acceleration, computer models of knee joint loading, and patient related outcomes measures including the Oxford knee score and range of motion were recorded.

RESULTS: A linear correlation (R[2] of 0.7-0.97) was observed between peak tibial acceleration (from wearables) and musculoskeletal model predicted knee joint loading during gait in healthy participants first. Whilst patient related outcome measures (Oxford knee score and patient range of motion) were observed to improve consistently during rehabilitation, this was not consistent with all patient's tibial acceleration. Only those patients that exhibited increasing peak tibial acceleration over 6-weeks rehabilitation were positively correlated with the Oxford knee score (R[2] of 0.51 to 0.97). Wearable predicted tibial acceleration revealed three patients with a consistent knee loading, five patients with improving knee loading, and two patients with declining knee loading during recovery. Hence, 20% of patients did not present with satisfactory joint loading following total knee joint replacement and this was not detected with current patient related outcome measures.

CONCLUSIONS: The use of inertial measurement units or wearables in this study provided additional insight into patients who were not exhibiting functional improvements in joint loading, and offers clinicians an 'off-site' early warning metric to identify potential complications during recovery and provide the opportunity for early intervention. This study has important implications for improving patient outcomes, equity, and for those who live in rural regions.}, } @article {pmid35992348, year = {2022}, author = {Xu, J and Xu, Z and Shi, B}, title = {Deep Reinforcement Learning Based Resource Allocation Strategy in Cloud-Edge Computing System.}, journal = {Frontiers in bioengineering and biotechnology}, volume = {10}, number = {}, pages = {908056}, pmid = {35992348}, issn = {2296-4185}, abstract = {The rapid development of mobile device applications put tremendous pressure on edge nodes with limited computing capabilities, which may cause poor user experience. To solve this problem, collaborative cloud-edge computing is proposed. In the cloud-edge computing, an edge node with limited local resources can rent more resources from a cloud node. According to the nature of cloud service, cloud service can be divided into private cloud and public cloud. In a private cloud environment, the edge node must allocate resources between the cloud node and the edge node. In a public cloud environment, since public cloud service providers offer various pricing modes for users' different computing demands, the edge node also must select the appropriate pricing mode of cloud service; which is a sequential decision problem. In this stydy, we model it as a Markov decision process and parameterized action Markov decision process, and we propose a resource allocation algorithm cost efficient resource allocation with private cloud (CERAI) and cost efficient resource allocation with public cloud (CERAU) in the collaborative cloud-edge environment based on the deep reinforcement learning algorithm deep deterministic policy gradient and P-DQN. Next, we evaluated CERAI and CERAU against three typical resource allocation algorithms based on synthetic and real data of Google datasets. The experimental results demonstrate that CERAI and CERAU can effectively reduce the long-term operating cost of collaborative cloud-side computing in various demanding settings. Our analysis can provide some useful insights for enterprises to design the resource allocation strategy in the collaborative cloud-side computing system.}, } @article {pmid35991356, year = {2022}, author = {de Oliveira, MEG and da Silva, MV and de Almeida, GLP and Pandorfi, H and Oliveira Lopes, PM and Manrique, DRC and Dos Santos, A and Jardim, AMDRF and Giongo, PR and Montenegro, AAA and da Silva Junior, CA and de Oliveira-Júnior, JF}, title = {Investigation of pre and post environmental impact of the lockdown (COVID-19) on the water quality of the Capibaribe and Tejipió rivers, Recife metropolitan region, Brazil.}, journal = {Journal of South American earth sciences}, volume = {118}, number = {}, pages = {103965}, pmid = {35991356}, issn = {0895-9811}, abstract = {The coronavirus pandemic has seriously affected human health, although some improvements on environmental indexes have temporarily occurred, due to changes on socio-cultural and economic standards. The objective of this study was to evaluate the impacts of the coronavirus and the influence of the lockdown associated with rainfall on the water quality of the Capibaribe and Tejipió rivers, Recife, Northeast Brazil, using cloud remote sensing on the Google Earth Engine (GEE) platform. The study was carried out based on eight representative images from Sentinel-2. Among the selected images, two refer to the year 2019 (before the pandemic), three refer to 2020 (during a pandemic), two from the lockdown period (2020), and one for the year 2021. The land use and land cover (LULC) and slope of the study region were determined and classified. Water turbidity data were subjected to descriptive and multivariate statistics. When analyzing the data on LULC for the riparian margin of the Capibaribe and Tejipió rivers, a low permanent preservation area was found, with a predominance of almost 100% of the urban area to which the deposition of soil particles in rivers are minimal. The results indicated that turbidity values in the water bodies varied from 6 mg. L[-1] up to 40 mg. L[-1]. Overall, the reduction in human-based activities generated by the lockdown enabled improvements in water quality of these urban rivers.}, } @article {pmid35990146, year = {2022}, author = {Li, J and Liu, L}, title = {The Reform of University Education Teaching Based on Cloud Computing and Big Data Background.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8169938}, pmid = {35990146}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Teaching ; Universities ; }, abstract = {In the era of big data and cloud computing, traditional college teaching model needs to be revolutionized in order to adapt to the needs of the present generation. The traditional college teaching model is currently facing unprecedented severe challenges which could be optimistically considered as a huge scope of development opportunity. In order to promote the gradual transformation of college teaching toward digitization, intelligence, and modernization, this paper comprehensively analyzes the impact of science and technology on college teaching. It further encourages the omnidirectional and multifaceted amalgamation of education with big data and cloud computing technology with an objective to improve the overall teaching level of colleges and universities. In order to realize the accurate evaluation of university teaching reform and improve teaching quality, the study presents an evaluation method of university teaching reform based on in-depth research network. Then, it further analyzes the main contents of university teaching reform, establishes the evaluation department of university teaching reform, and then establishes the evaluation model of university education reform. This is achieved by analyzing the relationship between university education reform and indicators using in-depth learning network followed by the development of simulation experiments pertinent to evaluation of university education reform. The results show that this method is helpful in improving the teaching quality.}, } @article {pmid35990138, year = {2022}, author = {Zhao, J and Zhang, L and Zhao, Y}, title = {Informatization of Accounting Systems in Small- and Medium-Sized Enterprises Based on Artificial Intelligence-Enabled Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6089195}, pmid = {35990138}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; *Cloud Computing ; }, abstract = {Against the backdrop of China's growing market economy, small- and medium-sized enterprises (SMEs) have taken advantage of this opportunity to develop rapidly. At present, SMEs have become an important part of the market economy. Accounting system information management system is an advanced form of management, and improving the degree of accounting information is the key to improving the management mode of SMEs. This study applies cloud computing to enterprise accounting management systems. The results show that realizing SME accounting information management can effectively improve economic settlements. With the development of cloud computing, its improvement of accounting management efficiency cannot be ignored. Besides, the risks of accounting informatization, enterprises can make their development by establishing a secure network protection wall and relying on strict relevant laws and regulations.}, } @article {pmid35989835, year = {2022}, author = {Datta, PK and Chowdhury, SR and Aravindan, A and Nath, S and Sen, P}, title = {Looking for a Silver Lining to the Dark Cloud: A Google Trends Analysis of Contraceptive Interest in the United States Post Roe vs. Wade Verdict.}, journal = {Cureus}, volume = {14}, number = {7}, pages = {e27012}, pmid = {35989835}, issn = {2168-8184}, abstract = {Background In the wake of the recent Roe vs. Wade judgment, we performed a Google Trends analysis to identify the impact of this decision on the interests regarding contraceptive choices in the United States. Methods A Google Trends search between April 6 and July 5, 2022, with the United States as the area of interest, was performed using the five most popular contraception choices. In addition, a second trend search was performed using oral and injectable hormonal birth control measures. Results Trends showed a spike in interest regarding various contraceptive methods immediately following the verdict. The highest increase in interest was noted for "vasectomy," followed by "tubal ligation." With respect to oral and injectable birth control measures, "morning after pill" showed a marked spike in interest. Conclusion This verdict has triggered increased interest in contraceptive practices, which can be translated into better reproductive health with proper public health initiatives.}, } @article {pmid35978910, year = {2022}, author = {Tang, H and Jiang, G and Wang, Q}, title = {Prediction of College Students' Sports Performance Based on Improved BP Neural Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5872384}, pmid = {35978910}, issn = {1687-5273}, mesh = {Algorithms ; *Athletic Performance ; Humans ; Neural Networks, Computer ; *Students ; Universities ; }, abstract = {Sports performance prediction has gradually become a research hotspot in various colleges and universities, and colleges and universities pay more and more attention to the development of college students' comprehensive quality. Aiming at the problems of low accuracy and slow convergence of the existing college students' sports performance prediction models, a method of college students' sports performance prediction based on improved BP neural network is proposed. First, preprocess the student's sports performance data, then use the BP neural network to train the data samples, optimize the selection of weights and thresholds in the neural network through the DE algorithm, and establish an optimal college student's sports performance prediction model, and then based on cloud computing, the platform implements and runs the sports performance prediction model, which speeds up the prediction of sports performance. The results show that the model can improve the accuracy of college students' sports performance prediction, provide more reliable prediction results, and provide valuable information for sports training.}, } @article {pmid37954189, year = {2023}, author = {Possik, J and Asgary, A and Solis, AO and Zacharewicz, G and Shafiee, MA and Najafabadi, MM and Nadri, N and Guimaraes, A and Iranfar, H and Ma, P and Lee, CM and Tofighi, M and Aarabi, M and Gorecki, S and Wu, J}, title = {An Agent-Based Modeling and Virtual Reality Application Using Distributed Simulation: Case of a COVID-19 Intensive Care Unit.}, journal = {IEEE transactions on engineering management}, volume = {70}, number = {8}, pages = {2931-2943}, pmid = {37954189}, issn = {0018-9391}, abstract = {Hospitals and other healthcare settings use various simulation methods to improve their operations, management, and training. The COVID-19 pandemic, with the resulting necessity for rapid and remote assessment, has highlighted the critical role of modeling and simulation in healthcare, particularly distributed simulation (DS). DS enables integration of heterogeneous simulations to further increase the usability and effectiveness of individual simulations. This article presents a DS system that integrates two different simulations developed for a hospital intensive care unit (ICU) ward dedicated to COVID-19 patients. AnyLogic has been used to develop a simulation model of the ICU ward using agent-based and discrete event modeling methods. This simulation depicts and measures physical contacts between healthcare providers and patients. The Unity platform has been utilized to develop a virtual reality simulation of the ICU environment and operations. The high-level architecture, an IEEE standard for DS, has been used to build a cloud-based DS system by integrating and synchronizing the two simulation platforms. While enhancing the capabilities of both simulations, the DS system can be used for training purposes and assessment of different managerial and operational decisions to minimize contacts and disease transmission in the ICU ward by enabling data exchange between the two simulations.}, } @article {pmid35974742, year = {2022}, author = {Deumer, J and Pauw, BR and Marguet, S and Skroblin, D and Taché, O and Krumrey, M and Gollwitzer, C}, title = {Small-angle X-ray scattering: characterization of cubic Au nanoparticles using Debye's scattering formula.}, journal = {Journal of applied crystallography}, volume = {55}, number = {Pt 4}, pages = {993-1001}, pmid = {35974742}, issn = {0021-8898}, abstract = {A versatile software package in the form of a Python extension, named CDEF (computing Debye's scattering formula for extraordinary form factors), is proposed to calculate approximate scattering profiles of arbitrarily shaped nanoparticles for small-angle X-ray scattering (SAXS). CDEF generates a quasi-randomly distributed point cloud in the desired particle shape and then applies the open-source software DEBYER for efficient evaluation of Debye's scattering formula to calculate the SAXS pattern (https://github.com/j-from-b/CDEF). If self-correlation of the scattering signal is not omitted, the quasi-random distribution provides faster convergence compared with a true-random distribution of the scatterers, especially at higher momentum transfer. The usage of the software is demonstrated for the evaluation of scattering data of Au nanocubes with rounded edges, which were measured at the four-crystal monochromator beamline of PTB at the synchrotron radiation facility BESSY II in Berlin. The implementation is fast enough to run on a single desktop computer and perform model fits within minutes. The accuracy of the method was analyzed by comparison with analytically known form factors and verified with another implementation, the SPONGE, based on a similar principle with fewer approximations. Additionally, the SPONGE coupled to McSAS3 allows one to retrieve information on the uncertainty of the size distribution using a Monte Carlo uncertainty estimation algorithm.}, } @article {pmid35972790, year = {2022}, author = {Ngu, AH and Metsis, V and Coyne, S and Srinivas, P and Salad, T and Mahmud, U and Chee, KH}, title = {Personalized Watch-Based Fall Detection Using a Collaborative Edge-Cloud Framework.}, journal = {International journal of neural systems}, volume = {32}, number = {12}, pages = {2250048}, doi = {10.1142/S0129065722500484}, pmid = {35972790}, issn = {1793-6462}, mesh = {Humans ; Aged ; *Accidental Falls/prevention & control ; *Smartphone ; Automation ; Software ; }, abstract = {The majority of current smart health applications are deployed on a smartphone paired with a smartwatch. The phone is used as the computation platform or the gateway for connecting to the cloud while the watch is used mainly as the data sensing device. In the case of fall detection applications for older adults, this kind of setup is not very practical since it requires users to always keep their phones in proximity while doing the daily chores. When a person falls, in a moment of panic, it might be difficult to locate the phone in order to interact with the Fall Detection App for the purpose of indicating whether they are fine or need help. This paper demonstrates the feasibility of running a real-time personalized deep-learning-based fall detection system on a smartwatch device using a collaborative edge-cloud framework. In particular, we present the software architecture we used for the collaborative framework, demonstrate how we automate the fall detection pipeline, design an appropriate UI on the small screen of the watch, and implement strategies for the continuous data collection and automation of the personalization process with the limited computational and storage resources of a smartwatch. We also present the usability of such a system with nine real-world older adult participants.}, } @article {pmid35972192, year = {2022}, author = {Poolman, TM and Townsend-Nicholson, A and Cain, A}, title = {Teaching genomics to life science undergraduates using cloud computing platforms with open datasets.}, journal = {Biochemistry and molecular biology education : a bimonthly publication of the International Union of Biochemistry and Molecular Biology}, volume = {50}, number = {5}, pages = {446-449}, pmid = {35972192}, issn = {1539-3429}, mesh = {*COVID-19/epidemiology ; *Cloud Computing ; Genomics ; Humans ; Software ; Students ; }, abstract = {The final year of a biochemistry degree is usually a time to experience research. However, laboratory-based research projects were not possible during COVID-19. Instead, we used open datasets to provide computational research projects in metagenomics to biochemistry undergraduates (80 students with limited computing experience). We aimed to give the students a chance to explore any dataset, rather than use a small number of artificial datasets (~60 published datasets were used). To achieve this, we utilized Google Colaboratory (Colab), a virtual computing environment. Colab was used as a framework to retrieve raw sequencing data (analyzed with QIIME2) and generate visualizations. Setting up the environment requires no prior experience; all students have the same drive structure and notebooks can be shared (for synchronous sessions). We also used the platform to combine multiple datasets, perform a meta-analysis, and allowed the students to analyze large datasets with 1000s of subjects and factors. Projects that required increased computational resources were integrated with Google Cloud Compute. In future, all research projects can include some aspects of reanalyzing public data, providing students with data science experience. Colab is also an excellent environment in which to develop data skills in multiple languages (e.g., Perl, Python, Julia).}, } @article {pmid35970834, year = {2022}, author = {Kim, M and Jiang, X and Lauter, K and Ismayilzada, E and Shams, S}, title = {Secure human action recognition by encrypted neural network inference.}, journal = {Nature communications}, volume = {13}, number = {1}, pages = {4799}, pmid = {35970834}, issn = {2041-1723}, support = {R13 HG009072/HG/NHGRI NIH HHS/United States ; R01 AG066749/AG/NIA NIH HHS/United States ; }, mesh = {*Activities of Daily Living ; Algorithms ; Cloud Computing ; *Computer Security ; Humans ; Neural Networks, Computer ; Pattern Recognition, Automated ; }, abstract = {Advanced computer vision technology can provide near real-time home monitoring to support "aging in place" by detecting falls and symptoms related to seizures and stroke. Affordable webcams, together with cloud computing services (to run machine learning algorithms), can potentially bring significant social benefits. However, it has not been deployed in practice because of privacy concerns. In this paper, we propose a strategy that uses homomorphic encryption to resolve this dilemma, which guarantees information confidentiality while retaining action detection. Our protocol for secure inference can distinguish falls from activities of daily living with 86.21% sensitivity and 99.14% specificity, with an average inference latency of 1.2 seconds and 2.4 seconds on real-world test datasets using small and large neural nets, respectively. We show that our method enables a 613x speedup over the latency-optimized LoLa and achieves an average of 3.1x throughput increase in secure inference compared to the throughput-optimized nGraph-HE2.}, } @article {pmid35968406, year = {2023}, author = {Gupta, YP and Mukul, and Gupta, N}, title = {Deep learning model based multimedia retrieval and its optimization in augmented reality applications.}, journal = {Multimedia tools and applications}, volume = {82}, number = {6}, pages = {8447-8466}, pmid = {35968406}, issn = {1380-7501}, abstract = {With the uproar of touchless technology, the Virtual Continuum has seen some spark in the upcoming products. Today numerous gadgets support the use of Mixed Reality / Augmented Reality (AR)/ Virtual Reality. The Head Mounted Displays (HMDs) like that of Hololens, Google Lens, Jio Glass manifested reality into virtuality. Other than the HMDs many organizations tend to develop mobile AR applications to support umpteen number of industries like medicine, education, construction. Currently, the major issue lies in the performance parameters of these applications, while deploying for mobile application's graphics performance, latency, and CPU functioning. Many industries pose real-time computation requirements in AR but do not implement an efficient algorithm in their frameworks. Offloading the computation of deep learning models involved in the application to the cloud servers will highly affect the processing parameters. For our use case, we will be using Multi-Task Cascaded Convolutional Neural Network (MTCNN) which is a modern tool for face detection, using a 3-stage neural network detector. Therefore, the optimization of communication between local application and cloud computing frameworks needs to be optimized. The proposed framework defines how the parameters involving the complete deployment of a mobile AR application can be optimized in terms of retrieval of multimedia, its processing, and augmentation of graphics, eventually enhancing the performance. To implement the proposed algorithm a mobile application is created in Unity3D. The mobile application virtually augments a 3D model of a skeleton on a target face. After the mentioned experimentation, it is found that average Media Retrieval Time (1.1471 μ s) and Client Time (1.1207 μ s) in the local application are extremely low than the average API process time (288.934ms). The highest time latency is achieved at the frame rate higher than 80fps.}, } @article {pmid35968403, year = {2022}, author = {Finnegan, A and Potenziani, DD and Karutu, C and Wanyana, I and Matsiko, N and Elahi, C and Mijumbi, N and Stanley, R and Vota, W}, title = {Deploying machine learning with messy, real world data in low- and middle-income countries: Developing a global health use case.}, journal = {Frontiers in big data}, volume = {5}, number = {}, pages = {553673}, pmid = {35968403}, issn = {2624-909X}, abstract = {The rapid emergence of machine learning in the form of large-scale computational statistics and accumulation of data offers global health implementing partners an opportunity to adopt, adapt, and apply these techniques and technologies to low- and middle-income country (LMIC) contexts where we work. These benefits reside just out of the reach of many implementing partners because they lack the experience and specific skills to use them. Yet the growth of available analytical systems and exponential growth of data require the global digital health community to become conversant in this technology to continue to make contributions to help fulfill our missions. In this community case study, we describe the approach we took at IntraHealth International to inform the use case for machine learning in global health and development. We found that the data needed to take advantage of machine learning were plentiful and that an international, interdisciplinary team can be formed to collect, clean, and analyze the data at hand using cloud-based (e.g., Dropbox, Google Drive) and open source tools (e.g., R). We organized our work as a "sprint" lasting roughly 10 weeks in length so that we could rapidly prototype these approaches in order to achieve institutional buy in. Our initial sprint resulted in two requests in subsequent workplans for analytics using the data we compiled and directly impacted program implementation.}, } @article {pmid35967636, year = {2022}, author = {Liu, S}, title = {Anti-monopoly supervision model of platform economy based on big data and sentiment.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {953271}, pmid = {35967636}, issn = {1664-1078}, abstract = {With the advent of the cloud computing era, big data technology has also developed rapidly. Due to the huge volume, variety, fast processing speed and low value density of big data, traditional data storage, extraction, transformation and analysis technologies are not suitable, so new solutions for big data application technologies are needed. However, with the development of economic theory and the practice of market economy, some links in the industrial chain of natural monopoly industries already have a certain degree of competitiveness. In this context, the article conducts a research on the anti-monopoly supervision mode of platform economy based on big data and sentiment analysis. This paper introduces the main idea of MapReduce, the current software implementation specifies a Map function that maps a set of key-value pairs into a new set of key-value pairs. It specifies a concurrent Reduce function that guarantees that each of all mapped key-value pairs share the same set of keys. establishes a vector space model, and basically realizes the extraction of text emotional elements. It introduces the theoretical controversy of antitrust regulation of predatory pricing behavior of third-party payment platforms, and conducted model experiments. The experimental results show that the throughput of 40 test users in 1 h of test is determined by two factors, QPS and the number of concurrent, where QPS = 40/(60*60) transactions/second. The time for each test user to log in to the system is 10 min, and the average response time is 10*60 s, then the number of concurrency = QPS*average response time = 40/(60*60)*10*60 = 6.66. This paper has successfully completed the research on the anti-monopoly supervision model of platform economy based on big data and sentiment analysis.}, } @article {pmid35966392, year = {2022}, author = {Berisha, B and Mëziu, E and Shabani, I}, title = {Big data analytics in Cloud computing: an overview.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {11}, number = {1}, pages = {24}, pmid = {35966392}, issn = {2192-113X}, abstract = {Big Data and Cloud Computing as two mainstream technologies, are at the center of concern in the IT field. Every day a huge amount of data is produced from different sources. This data is so big in size that traditional processing tools are unable to deal with them. Besides being big, this data moves fast and has a lot of variety. Big Data is a concept that deals with storing, processing and analyzing large amounts of data. Cloud computing on the other hand is about offering the infrastructure to enable such processes in a cost-effective and efficient manner. Many sectors, including among others businesses (small or large), healthcare, education, etc. are trying to leverage the power of Big Data. In healthcare, for example, Big Data is being used to reduce costs of treatment, predict outbreaks of pandemics, prevent diseases etc. This paper, presents an overview of Big Data Analytics as a crucial process in many fields and sectors. We start by a brief introduction to the concept of Big Data, the amount of data that is generated on a daily bases, features and characteristics of Big Data. We then delve into Big Data Analytics were we discuss issues such as analytics cycle, analytics benefits and the movement from ETL to ELT paradigm as a result of Big Data analytics in Cloud. As a case study we analyze Google's BigQuery which is a fully-managed, serverless data warehouse that enables scalable analysis over petabytes of data. As a Platform as a Service (PaaS) supports querying using ANSI SQL. We use the tool to perform different experiments such as average read, average compute, average write, on different sizes of datasets.}, } @article {pmid35965760, year = {2022}, author = {Sadad, T and Bukhari, SAC and Munir, A and Ghani, A and El-Sherbeeny, AM and Rauf, HT}, title = {Detection of Cardiovascular Disease Based on PPG Signals Using Machine Learning with Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1672677}, pmid = {35965760}, issn = {1687-5273}, mesh = {Bayes Theorem ; *COVID-19/diagnosis ; *Cardiovascular Diseases/diagnosis ; Cloud Computing ; Humans ; Machine Learning ; Pandemics ; Photoplethysmography/methods ; }, abstract = {Hypertension is the main cause of blood pressure (BP), which further causes various cardiovascular diseases (CVDs). The recent COVID-19 pandemic raised the burden on the healthcare system and also limits the resources to these patients only. The treatment of chronic patients, especially those who suffer from CVD, has fallen behind, resulting in increased deaths from CVD around the world. Regular monitoring of BP is crucial to prevent CVDs as it can be controlled and diagnosed through constant monitoring. To find an effective and convenient procedure for the early diagnosis of CVDs, photoplethysmography (PPG) is recognized as a low-cost technology. Through PPG technology, various cardiovascular parameters, including blood pressure, heart rate, blood oxygen saturation, etc., are detected. Merging the healthcare domain with information technology (IT) is a demanding area to reduce the rehospitalization of CVD patients. In the proposed model, PPG signals from the Internet of things (IoT)-enabled wearable patient monitoring (WPM) devices are used to monitor the heart rate (HR), etc., of the patients remotely. This article investigates various machine learning techniques such as decision tree (DT), naïve Bayes (NB), and support vector machine (SVM) and the deep learning model one-dimensional convolutional neural network-long short-term memory (1D CNN-LSTM) to develop a system that assists physicians during continuous monitoring, which achieved an accuracy of 99.5% using PPG-BP data set. The proposed system provides cost-effective, efficient, and fully connected monitoring systems for cardiac patients.}, } @article {pmid35963375, year = {2022}, author = {Palomeque-Mangut, S and Meléndez, F and Gómez-Suárez, J and Frutos-Puerto, S and Arroyo, P and Pinilla-Gil, E and Lozano, J}, title = {Wearable system for outdoor air quality monitoring in a WSN with cloud computing: Design, validation and deployment.}, journal = {Chemosphere}, volume = {307}, number = {Pt 3}, pages = {135948}, doi = {10.1016/j.chemosphere.2022.135948}, pmid = {35963375}, issn = {1879-1298}, mesh = {*Air Pollutants/analysis ; *Air Pollution/analysis ; Cloud Computing ; Environmental Monitoring/methods ; Humans ; Oxides ; *Wearable Electronic Devices ; }, abstract = {Breathing poor-quality air is a global threat at the same level as unhealthy diets or tobacco smoking, so the availability of affordable instrument for the measurement of air pollutant levels is highly relevant for human and environmental protection. We developed an air quality monitoring platform that comprises a wearable device embedding low-cost metal oxide semiconductor (MOS) gas sensors, a PM sensor, and a smartphone for collecting the data using Bluetooth Low Energy (BLE) communication. Our own developed app displays information about the air surrounding the user and sends the gathered geolocalized data to a cloud, where the users can map the air quality levels measured in the network. The resulting device is small-sized, light-weighted, compact, and belt-worn, with a user-friendly interface and a low cost. The data collected by the sensor array are validated in two experimental setups, first in laboratory-controlled conditions and then against referential pollutant concentrations measured by standard instruments in an outdoor environment. The performance of our air quality platform was tested in a field testing campaign in Barcelona with six moving devices acting as wireless sensor nodes. Devices were trained by means of machine learning algorithms to differentiate between air quality index (AQI) referential concentration values (97% success in the laboratory, 82.3% success in the field). Humidity correction was applied to all data.}, } @article {pmid35958753, year = {2022}, author = {Qi, W and Wang, H and Chen, T}, title = {Multimedia System Design and Data Storage Optimization Based on Machine Learning Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6426551}, pmid = {35958753}, issn = {1687-5273}, mesh = {Algorithms ; *Information Storage and Retrieval ; Machine Learning ; *Multimedia ; Reproducibility of Results ; }, abstract = {With the advancement of science and technology, digital technology and Internet of Things network technology have been developed rapidly, and multimedia technology has also been widely used. Multimedia formats such as digital TV and elevator posters are shaking up traditional media. At the same time, many media operation models and multimedia technologies are combined to plan operational strategies, determine operational goals, and change the traditional media structure to achieve commercial profits and society benefit. However, due to limitations in the existing operating model or unreasonable technical solutions, it is not easy to maximize the value of multimedia technology. The XML-based database has been submitted, and it will carry out the business requirements of the transaction network and the business platform of the transaction network. Integrated management mechanism is analyzed and applied. The framework design includes parallel quota processing module, update processing module, result processing module, and storage library and database connection management module. The department runs multiple parts of the system together and completes the database. The development of cloud database is based on cloud computing. It can effectively fill the shortcomings and gaps of traditional database storage and processing, and it can also provide high-reciprocity databases to provide storage and management services. It has high reliability. Cloud servers use fair weighted rounding algorithms to achieve load balancing and use the in-memory database Redis to realize terminal data caching. After a comprehensive test of the system, the system can perform all functions normally, and it has good performance and stable operation.}, } @article {pmid35958748, year = {2022}, author = {Rahman, AU and Asif, RN and Sultan, K and Alsaif, SA and Abbas, S and Khan, MA and Mosavi, A}, title = {ECG Classification for Detecting ECG Arrhythmia Empowered with Deep Learning Approaches.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6852845}, pmid = {35958748}, issn = {1687-5273}, mesh = {Arrhythmias, Cardiac/diagnosis ; Cloud Computing ; *Deep Learning ; Electrocardiography/methods ; Humans ; Machine Learning ; }, abstract = {According to the World Health Organization (WHO) report, heart disease is spreading throughout the world very rapidly and the situation is becoming alarming in people aged 40 or above (Xu, 2020). Different methods and procedures are adopted to detect and diagnose heart abnormalities. Data scientists are working on finding the different methods with the required accuracy (Strodthoff et al., 2021). Electrocardiogram (ECG) is the procedure to find the heart condition in the waveform. For ages, the machine learning techniques, which are feature based, played a vital role in the medical sciences and centralized the data in cloud computing and having access throughout the world. Furthermore, deep learning or transfer learning widens the vision and introduces different transfer learning methods to ensure accuracy and time management to detect the ECG in a better way in comparison to the previous and machine learning methods. Hence, it is said that transfer learning has turned world research into more appropriate and innovative research. Here, the proposed comparison and accuracy analysis of different transfer learning methods by using ECG classification for detecting ECG Arrhythmia (CAA-TL). The CAA-TL model has the multiclassification of the ECG dataset, which has been taken from Kaggle. Some of the healthy and unhealthy datasets have been taken in real-time, augmented, and fused with the Kaggle dataset, i.e., Massachusetts Institute of Technology-Beth Israel Hospital (MIT-BIH dataset). The CAA-TL worked on the accuracy of heart problem detection by using different methods like ResNet50, AlexNet, and SqueezeNet. All three deep learning methods showed remarkable accuracy, which is improved from the previous research. The comparison of different deep learning approaches with respect to layers widens the research and gives the more clarity and accuracy and at the same time finds it time-consuming while working with multiclassification with massive dataset of ECG. The implementation of the proposed method showed an accuracy of 98.8%, 90.08%, and 91% for AlexNet, SqueezeNet, and ResNet50, respectively.}, } @article {pmid35958385, year = {2022}, author = {Jiang, S}, title = {Hotspot Mining in the Field of Library and Information Science under the Environment of Big Data.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2802835}, pmid = {35958385}, issn = {1687-9813}, mesh = {*Artificial Intelligence ; *Big Data ; Data Mining/methods ; }, abstract = {Currently, with the implementation of big data strategies in countries all over the world, big data has achieved vigorous development in various fields. Big data research and application practices have also rapidly attracted the attention of the library and information field. Objective. The study explored the current state of research and research hotspots of big data in the library and information field and further discussed the future research trends. Methods. In the CNKI database, 16 CSSCI source journals in the discipline of library information and digital library were selected as data sources, and the relevant literature was retrieved with the theme of "big data." The collected literature was excluded and expanded according to the citation relationship. Then, with the help of Bicomb and SPSS, co-word analysis and cluster analysis would be carried out on these literature results. Results. According to the findings of the data analysis, the research hotspots on the topic mainly focus on five major research themes, namely, big data and smart library, big data and intelligence research, data mining and cloud computing, big data and information analysis, and library innovation and services. Limitations. At present, the research scope and coverage on this topic are wide, which leads to the research still staying at the macro level. Conclusions. Big data research will remain one of the hotspots in the future. However, the most study is still limited to the perspective of library and information and has not yet analyzed the research status, research hotspots, and development trends in this field from the perspective of big data knowledge structure. Moreover, machine learning, artificial intelligence, knowledge services, AR, and VR may be new directions for future attention and development.}, } @article {pmid35957481, year = {2022}, author = {Foroughimehr, N and Vilagosh, Z and Yavari, A and Wood, A}, title = {The Impact of Base Cell Size Setup on the Finite Difference Time Domain Computational Simulation of Human Cornea Exposed to Millimeter Wave Radiation at Frequencies above 30 GHz.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957481}, issn = {1424-8220}, support = {APP1042464//National Health and Medical Research Council/ ; }, mesh = {Cell Size ; Computer Simulation ; *Cornea ; Electricity ; *Electromagnetic Fields ; Humans ; }, abstract = {Mobile communication has achieved enormous technology innovations over many generations of progression. New cellular technology, including 5G cellular systems, is being deployed and making use of higher frequencies, including the Millimetre Wave (MMW) range (30-300 GHz) of the electromagnetic spectrum. Numerical computational techniques such as the Finite Difference Time Domain (FDTD) method have been used extensively as an effective approach for assessing electromagnetic fields' biological impacts. This study demonstrates the variation of the accuracy of the FDTD computational simulation system when different meshing sizes are used, by using the interaction of the critically sensitive human cornea with EM in the 30 to 100 GHz range. Different approaches of base cell size specifications were compared. The accuracy of the computation is determined by applying planar sensors showing the detail of electric field distribution as well as the absolute values of electric field collected by point sensors. It was found that manually defining the base cell sizes reduces the model size as well as the computation time. However, the accuracy of the computation decreases in an unpredictable way. The results indicated that using a cloud computing capacity plays a crucial role in minimizing the computation time.}, } @article {pmid35957453, year = {2022}, author = {Bahache, M and Tahari, AEK and Herrera-Tapia, J and Lagraa, N and Calafate, CT and Kerrache, CA}, title = {Towards an Accurate Faults Detection Approach in Internet of Medical Things Using Advanced Machine Learning Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957453}, issn = {1424-8220}, mesh = {Humans ; Internet ; *Machine Learning ; *Wireless Technology ; }, abstract = {Remotely monitoring people's healthcare is still among the most important research topics for researchers from both industry and academia. In addition, with the Wireless Body Networks (WBANs) emergence, it becomes possible to supervise patients through an implanted set of body sensors that can communicate through wireless interfaces. These body sensors are characterized by their tiny sizes, and limited resources (power, computing, and communication capabilities), which makes these devices prone to have faults and sensible to be damaged. Thus, it is necessary to establish an efficient system to detect any fault or anomalies when receiving sensed data. In this paper, we propose a novel, optimized, and hybrid solution between machine learning and statistical techniques, for detecting faults in WBANs that do not affect the devices' resources and functionality. Experimental results illustrate that our approach can detect unwanted measurement faults with a high detection accuracy ratio that exceeds the 99.62%, and a low mean absolute error of 0.61%, clearly outperforming the existing state-of-art solutions.}, } @article {pmid35957452, year = {2022}, author = {Kim, M and Joo, S}, title = {Time-Constrained Adversarial Defense in IoT Edge Devices through Kernel Tensor Decomposition and Multi-DNN Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957452}, issn = {1424-8220}, support = {NRF-2020R1G1A1012170//National Research Foundation of Korea/ ; }, abstract = {The development of deep learning technology has resulted in great contributions in many artificial intelligence services, but adversarial attack techniques on deep learning models are also becoming more diverse and sophisticated. IoT edge devices take cloud-independent on-device DNN (deep neural network) processing technology to exhibit a fast response time. However, if the computational complexity of the denoizer for adversarial noises is high, or if a single embedded GPU is shared by multiple DNN models, adversarial defense at the on-device level is bound to represent a long latency. To solve this problem, eDenoizer is proposed in this paper. First, it applies Tucker decomposition to reduce the computational amount required for convolutional kernel tensors in the denoizer. Second, eDenoizer effectively orchestrates both the denoizer and the model defended by the denoizer simultaneously. In addition, the priority of the CPU side can be projected onto the GPU which is completely priority-agnostic, so that the delay can be minimized when the denoizer and the defense target model are assigned a high priority. As a result of confirming through extensive experiments, the reduction of classification accuracy was very marginal, up to 1.78%, and the inference speed accompanied by adversarial defense was improved up to 51.72%.}, } @article {pmid35957450, year = {2022}, author = {Liutkevičius, A and Morkevičius, N and Venčkauskas, A and Toldinas, J}, title = {Distributed Agent-Based Orchestrator Model for Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957450}, issn = {1424-8220}, support = {830892//European Union's Horizon 2020 research and innovation program/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Delivery of Health Care ; }, abstract = {Fog computing is an extension of cloud computing that provides computing services closer to user end-devices at the network edge. One of the challenging topics in fog networks is the placement of tasks on fog nodes to obtain the best performance and resource usage. The process of mapping tasks for resource-constrained devices is known as the service or fog application placement problem (SPP, FAPP). The highly dynamic fog infrastructures with mobile user end-devices and constantly changing fog nodes resources (e.g., battery life, security level) require distributed/decentralized service placement (orchestration) algorithms to ensure better resilience, scalability, and optimal real-time performance. However, recently proposed service placement algorithms rarely support user end-device mobility, constantly changing the resource availability of fog nodes and the ability to recover from fog node failures at the same time. In this article, we propose a distributed agent-based orchestrator model capable of flexible service provisioning in a dynamic fog computing environment by considering the constraints on the central processing unit (CPU), memory, battery level, and security level of fog nodes. Distributing the decision-making to multiple orchestrator fog nodes instead of relying on the mapping of a single central entity helps to spread the load and increase scalability and, most importantly, resilience. The prototype system based on the proposed orchestrator model was implemented and tested with real hardware. The results show that the proposed model is efficient in terms of response latency and computational overhead, which are minimal compared to the placement algorithm itself. The research confirms that the proposed orchestrator approach is suitable for various fog network applications when scalability, mobility, and fault tolerance must be guaranteed.}, } @article {pmid35957307, year = {2022}, author = {Ismail, L and Buyya, R}, title = {Artificial Intelligence Applications and Self-Learning 6G Networks for Smart Cities Digital Ecosystems: Taxonomy, Challenges, and Future Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957307}, issn = {1424-8220}, support = {31R215//National Water and Energy Center of the United Arab Emirates University/ ; }, mesh = {*Artificial Intelligence ; Cities ; *Ecosystem ; Technology/methods ; Wireless Technology ; }, abstract = {The recent upsurge of smart cities' applications and their building blocks in terms of the Internet of Things (IoT), Artificial Intelligence (AI), federated and distributed learning, big data analytics, blockchain, and edge-cloud computing has urged the design of the upcoming 6G network generation, due to their stringent requirements in terms of the quality of services (QoS), availability, and dependability to satisfy a Service-Level-Agreement (SLA) for the end users. Industries and academia have started to design 6G networks and propose the use of AI in its protocols and operations. Published papers on the topic discuss either the requirements of applications via a top-down approach or the network requirements in terms of agility, performance, and energy saving using a down-top perspective. In contrast, this paper adopts a holistic outlook, considering the applications, the middleware, the underlying technologies, and the 6G network systems towards an intelligent and integrated computing, communication, coordination, and decision-making ecosystem. In particular, we discuss the temporal evolution of the wireless network generations' development to capture the applications, middleware, and technological requirements that led to the development of the network generation systems from 1G to AI-enabled 6G and its employed self-learning models. We provide a taxonomy of the technology-enabled smart city applications' systems and present insights into those systems for the realization of a trustworthy and efficient smart city ecosystem. We propose future research directions in 6G networks for smart city applications.}, } @article {pmid35957281, year = {2022}, author = {Alwaheidi, MKS and Islam, S}, title = {Data-Driven Threat Analysis for Ensuring Security in Cloud Enabled Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35957281}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Security ; *Ecosystem ; }, abstract = {Cloud computing offers many benefits including business flexibility, scalability and cost savings but despite these benefits, there exist threats that require adequate attention for secure service delivery. Threats in a cloud-based system need to be considered from a holistic perspective that accounts for data, application, infrastructure and service, which can pose potential risks. Data certainly plays a critical role within the whole ecosystem and organisations should take account of and protect data from any potential threats. Due to the variation of data types, status, and location, understanding the potential security concerns in cloud-based infrastructures is more complex than in a traditional system. The existing threat modeling approaches lack the ability to analyse and prioritise data-related threats. The main contribution of the paper is a novel data-driven threat analysis (d-TM) approach for the cloud-based systems. The main motivation of d-TM is the integration of data from three levels of abstractions, i.e., management, control, and business and three phases, i.e., storage, process and transmittance, within each level. The d-TM provides a systematic flow of attack surface analysis from the user agent to the cloud service provider based on the threat layers in cloud computing. Finally, a cloud-based use case scenario was used to demonstrate the applicability of the proposed approach. The result shows that d-TM revealed four critical threats out of the seven threats based on the identified assets. The threats targeted management and business data in general, while targeting data in process and transit more specifically.}, } @article {pmid35945076, year = {2022}, author = {Jones, HE and Wilson, PB}, title = {Progress and opportunities through use of genomics in animal production.}, journal = {Trends in genetics : TIG}, volume = {38}, number = {12}, pages = {1228-1252}, doi = {10.1016/j.tig.2022.06.014}, pmid = {35945076}, issn = {0168-9525}, mesh = {Animals ; Humans ; *Animal Husbandry ; *Livestock/genetics ; Animal Welfare ; Genomics ; Genome/genetics ; }, abstract = {The rearing of farmed animals is a vital component of global food production systems, but its impact on the environment, human health, animal welfare, and biodiversity is being increasingly challenged. Developments in genetic and genomic technologies have had a key role in improving the productivity of farmed animals for decades. Advances in genome sequencing, annotation, and editing offer a means not only to continue that trend, but also, when combined with advanced data collection, analytics, cloud computing, appropriate infrastructure, and regulation, to take precision livestock farming (PLF) and conservation to an advanced level. Such an approach could generate substantial additional benefits in terms of reducing use of resources, health treatments, and environmental impact, while also improving animal health and welfare.}, } @article {pmid35942755, year = {2022}, author = {Chiang, TW and Chiang, DL and Chen, TS and Lin, FY and Shen, VRL and Wang, MC}, title = {Novel Lagrange interpolation polynomials for dynamic access control in a healthcare cloud system.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {9}, pages = {9200-9219}, doi = {10.3934/mbe.2022427}, pmid = {35942755}, issn = {1551-0018}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; *Confidentiality ; Delivery of Health Care ; }, abstract = {The authority of user personal health records (PHRs) is usually determined by the owner of a cloud computing system. When a PHR file is accessed, a dynamic access control algorithm must be used to authenticate the users. The proposed dynamic access control algorithm is based on a novel Lagrange interpolation polynomial with timestamps, mainly functioning to authenticate the users with key information. Moreover, the inclusion of timestamps allows user access within an approved time slot to enhance the security of the healthcare cloud system. According to the security analysis results, this healthcare cloud system can effectively resist common attacks, including external attacks, internal attacks, collaborative attacks and equation-based attacks. Furthermore, the overall computational complexity of establishing and updating the polynomials is O(n*m* (log m)[2]), which is a promising result, where m denotes the degree of $ polynomial~G\left(x, y\right) $ and n denotes the number of secure users in the hierarchy.}, } @article {pmid35942754, year = {2022}, author = {Cui, D and Huang, H and Peng, Z and Li, Q and He, J and Qiu, J and Luo, X and Ou, J and Fan, C}, title = {Next-generation 5G fusion-based intelligent health-monitoring platform for ethylene cracking furnace tube.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {9}, pages = {9168-9199}, doi = {10.3934/mbe.2022426}, pmid = {35942754}, issn = {1551-0018}, mesh = {*Artificial Intelligence ; Automation ; Ethylenes ; *Intelligence ; }, abstract = {This study aimed to develop a 5G + "mixed computing" + deep learning-based next-generation intelligent health-monitoring platform for an ethylene cracking furnace tube based on 5G communication technology, with the goal of improving the health management level of the key component of ethylene production, that is, the cracking furnace tube, and focusing on the key common technical difficulties of ethylene production of tube outer-surface temperature sensing and tube slagging diagnosis. It also integrated the edge-fog-cloud "mixed computing" technology and deep learning technology in artificial intelligence, which had a higher degree in the research and development of automation and intelligence, and was more versatile in an industrial environment. The platform included a 5G-based tube intelligent temperature-measuring device, a 5G-based intelligent peep door gearing, a 5G-based edge-fog-cloud collaboration mechanism, and a mixed deep learning-related application. The platform enhanced the automation and intelligence of the enterprise, which could not only promote the quality and efficiency of the enterprise but also protect the safe operation of the cracking furnace device and lead the technological progress and transformation and upgrading of the industry through the application.}, } @article {pmid35942147, year = {2022}, author = {Zhang, T and Han, Q and Zhang, Z}, title = {Sport Resource Classification Algorithm for Health Promotion Based on Cloud Computing: Rhythmic Gymnastics' Example.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {2587169}, pmid = {35942147}, issn = {1687-9813}, mesh = {Algorithms ; *Cloud Computing ; *Gymnastics ; Health Promotion ; }, abstract = {In the processing of rhythmic gymnastics resources, there are inefficiency problems such as confusion of teaching resources and lack of individuation. To improve the health access to teaching resource data, such as videos and documents, this study proposes a cloud computing-based personalized rhythmic gymnastics teaching resource classification algorithm for health promotion. First, personalized rhythmic gymnastics teaching resource database is designed based on cloud computing technology, and the teaching resources in the database are preprocessed to obtain a meta-sample set. Then, the characteristics of teaching resources are selected by the information acquisition method, and a vector space model is established to calculate the similarity of teaching resources. Finally, the distance-weighted k-NN method is used to classify the teaching resources for health promotion. The experimental results show that the classification accuracy of the proposed algorithm is high, the recall rate is high, and the F-measure value is high, which verifies the effectiveness of the algorithm.}, } @article {pmid35937323, year = {2022}, author = {Huang, C and Li, W and Zhang, Z and Hua, X and Yang, J and Ye, J and Duan, L and Liang, X and Yang, W}, title = {An Intelligent Rice Yield Trait Evaluation System Based on Threshed Panicle Compensation.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {900408}, pmid = {35937323}, issn = {1664-462X}, abstract = {High-throughput phenotyping of yield-related traits is meaningful and necessary for rice breeding and genetic study. The conventional method for rice yield-related trait evaluation faces the problems of rice threshing difficulties, measurement process complexity, and low efficiency. To solve these problems, a novel intelligent system, which includes an integrated threshing unit, grain conveyor-imaging units, threshed panicle conveyor-imaging unit, and specialized image analysis software has been proposed to achieve rice yield trait evaluation with high throughput and high accuracy. To improve the threshed panicle detection accuracy, the Region of Interest Align, Convolution Batch normalization activation with Leaky Relu module, Squeeze-and-Excitation unit, and optimal anchor size have been adopted to optimize the Faster-RCNN architecture, termed 'TPanicle-RCNN,' and the new model achieved F1 score 0.929 with an increase of 0.044, which was robust to indica and japonica varieties. Additionally, AI cloud computing was adopted, which dramatically reduced the system cost and improved flexibility. To evaluate the system accuracy and efficiency, 504 panicle samples were tested, and the total spikelet measurement error decreased from 11.44 to 2.99% with threshed panicle compensation. The average measuring efficiency was approximately 40 s per sample, which was approximately twenty times more efficient than manual measurement. In this study, an automatic and intelligent system for rice yield-related trait evaluation was developed, which would provide an efficient and reliable tool for rice breeding and genetic research.}, } @article {pmid35931501, year = {2022}, author = {Kumon, RE}, title = {Teaching an advanced undergraduate acoustics laboratory without a laboratory: Course developments enabling teaching during the COVID-19 pandemic.}, journal = {The Journal of the Acoustical Society of America}, volume = {152}, number = {1}, pages = {9}, doi = {10.1121/10.0011808}, pmid = {35931501}, issn = {1520-8524}, mesh = {Acoustics ; *COVID-19/epidemiology ; Humans ; Learning ; Pandemics ; Students ; Teaching ; }, abstract = {This paper describes ongoing developments to an advanced laboratory course at Kettering University, which is targeted to students in engineering and engineering physics and emphasizes theoretical, computational, and experimental components in the context of airborne acoustics and modal testing [cf. D. A. Russell and D. O. Ludwigsen, J. Acoust. Soc. Am. 131, 2515-2524 (2012)]. These developments have included a transition to electronic laboratory notebooks and cloud-based computing resources, incorporation of updated hardware and software, and creation and testing of a multiple-choice assessment instrument for the course. When Kettering University suddenly shifted to exclusively remote teaching in March 2020 due to the COVID-19 pandemic, many of these changes proved to be essential for enabling rapid adaptation to a situation in which a laboratory was not available for the course. Laboratory activities were rewritten by crowdsourcing archived data, videos were incorporated to illustrate dynamic phenomena, and computer simulations were used to retain student interactivity. The comparison of multiple measures, including the assessment instrument, team-based grades on project papers, and individual grades on final exams, indicates that most students were successful at learning the course material and adapting to work on team-based projects in the midst of challenging remote learning conditions.}, } @article {pmid35930042, year = {2023}, author = {Mokhtarzadeh, H and Jiang, F and Zhao, S and Malekipour, F}, title = {OpenColab project: OpenSim in Google colaboratory to explore biomechanics on the web.}, journal = {Computer methods in biomechanics and biomedical engineering}, volume = {26}, number = {9}, pages = {1055-1063}, doi = {10.1080/10255842.2022.2104607}, pmid = {35930042}, issn = {1476-8259}, mesh = {*User-Computer Interface ; Biomechanical Phenomena ; *Search Engine ; Software ; Internet ; }, abstract = {OpenSim is an open-source biomechanical package with a variety of applications. It is available for many users with bindings in MATLAB, Python, and Java via its application programming interfaces (APIs). Although the developers described well the OpenSim installation on different operating systems (Windows, Mac, and Linux), it is time-consuming and complex since each operating system requires a different configuration. This project aims to demystify the development of neuro-musculoskeletal modeling in OpenSim with zero configuration on any operating system for installation (thus cross-platform), easy to share models while accessing free graphical processing units (GPUs) on a web-based platform of Google Colab. To achieve this, OpenColab was developed where OpenSim source code was used to build a Conda package that can be installed on the Google Colab with only one block of code in less than 7 min. To use OpenColab, one requires a connection to the internet and a Gmail account. Moreover, OpenColab accesses vast libraries of machine learning methods available within free Google products, e.g. TensorFlow. Next, we performed an inverse problem in biomechanics and compared OpenColab results with OpenSim graphical user interface (GUI) for validation. The outcomes of OpenColab and GUI matched well (r≥0.82). OpenColab takes advantage of the zero-configuration of cloud-based platforms, accesses GPUs, and enables users to share and reproduce modeling approaches for further validation, innovative online training, and research applications. Step-by-step installation processes and examples are available at: https://simtk.org/projects/opencolab.}, } @article {pmid35928494, year = {2022}, author = {Amanat, A and Rizwan, M and Maple, C and Zikria, YB and Almadhor, AS and Kim, SW}, title = {Blockchain and cloud computing-based secure electronic healthcare records storage and sharing.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {938707}, pmid = {35928494}, issn = {2296-2565}, mesh = {*Blockchain ; Cloud Computing ; Delivery of Health Care ; Electronic Health Records ; Electronics ; Humans ; }, abstract = {Healthcare information is essential for both service providers and patients. Further secure sharing and maintenance of Electronic Healthcare Records (EHR) are imperative. EHR systems in healthcare have traditionally relied on a centralized system (e.g., cloud) to exchange health data across healthcare stakeholders, which may expose private and sensitive patient information. EHR has struggled to meet the demands of several stakeholders and systems in terms of safety, isolation, and other regulatory constraints. Blockchain is a distributed, decentralized ledger technology that can provide secured, validated, and immutable data sharing facilities. Blockchain creates a distributed ledger system using techniques of cryptography (hashes) that are consistent and permit actions to be carried out in a distributed manner without needing a centralized authority. Data exploitation is difficult and evident in a blockchain network due to its immutability. We propose an architecture based on blockchain technology that authenticates the user identity using a Proof of Stake (POS) cryptography consensus mechanism and Secure Hash Algorithm (SHA256) to secure EHR sharing among different electronic healthcare systems. An Elliptic Curve Digital Signature Algorithm (ECDSA) is used to verify EHR sensors to assemble and transmit data to cloud infrastructure. Results indicate that the proposed solution performs exceptionally well when compared with existing solutions, which include Proof-Of-Work (POW), Secure Hash Algorithm (SHA-1), and Message Digest (MD5) in terms of power consumption, authenticity, and security of healthcare records.}, } @article {pmid35923220, year = {2022}, author = {Qi, L and Wu, F and Ge, Z and Sun, Y}, title = {DeepMatch: Toward Lightweight in Point Cloud Registration.}, journal = {Frontiers in neurorobotics}, volume = {16}, number = {}, pages = {891158}, pmid = {35923220}, issn = {1662-5218}, abstract = {From source to target, point cloud registration solves for a rigid body transformation that aligns the two point clouds. IterativeClosest Point (ICP) and other traditional algorithms require a long registration time and are prone to fall into local optima. Learning-based algorithms such as Deep ClosestPoint (DCP) perform better than those traditional algorithms and escape from local optimality. However, they are still not perfectly robust and rely on the complex model design due to the extracted local features are susceptible to noise. In this study, we propose a lightweight point cloud registration algorithm, DeepMatch. DeepMatch extracts a point feature for each point, which is a spatial structure composed of each point itself, the center point of the point cloud, and the farthest point of each point. Because of the superiority of this per-point feature, the computing resources and time required by DeepMatch to complete the training are less than one-tenth of other learning-based algorithms with similar performance. In addition, experiments show that our algorithm achieves state-of-the-art (SOTA) performance on both clean, with Gaussian noise and unseen category datasets. Among them, on the unseen categories, compared to the previous best learning-based point cloud registration algorithms, the registration error of DeepMatch is reduced by two orders of magnitude, achieving the same performance as on the categories seen in training, which proves DeepMatch is generalizable in point cloud registration tasks. Finally, only our DeepMatch completes 100% recall on all three test sets.}, } @article {pmid35922695, year = {2022}, author = {Pouya, S and Aghlmand, M}, title = {Evaluation of urban green space per capita with new remote sensing and geographic information system techniques and the importance of urban green space during the COVID-19 pandemic.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {9}, pages = {633}, pmid = {35922695}, issn = {1573-2959}, mesh = {*COVID-19/epidemiology ; Cities ; Environmental Monitoring/methods ; *Geographic Information Systems ; Humans ; Pandemics ; Parks, Recreational ; Remote Sensing Technology ; Urbanization ; }, abstract = {A recently conducted study by the Centers for Disease Control and Prevention encouraged access to urban green space for the public over the prevalence of COVID-19 in that exposure to urban green space can positively affect the physical and mental health, including the reduction rate of heart disease, obesity, stress, stroke, and depression. COVID-19 has foregrounded the inadequacy of green space in populated cities. It has also highlighted the extant inequities so as to unequal access to urban green space both quantitatively and qualitatively. In this regard, it seems that one of the problems related to Malatya is the uncoordinated distribution of green space in different parts of the city. Therefore, knowing the quantity and quality of these spaces in each region can play an effective role in urban planning. The aim of the present study has been to evaluate urban green space per capita and to investigate its distribution based on the population of the districts of Battalgazi county in Malatya city through developing an integrated methodology (remote sensing and geographic information system). Accordingly, in Google Earth Engine by images of Sentinel-1 and PlanetScope satellites, it was calculated different indexes (NDVI, EVI, PSSR, GNDVI, and NDWI). The data set was prepared and then by combining different data, classification was performed according to support vector machine algorithm. From the landscaping maps obtained, the map was selected with the highest accuracy (overall accuracy: 94.43; and kappa coefficient: 90.5). Finally, by the obtained last map, the distribution of urban green space per capita and their functions in Battalgazi county and its districts were evaluated. The results of the study showed that the existing urban green spaces in the Battalgazi/Malatya were not distributed evenly on the basis of the districts. The per capita of urban green space is twenty-four regions which is more than 9m[2] and in twenty-three ones is less than 9m[2]. The recommendation of this study was that Türkiye city planners and landscape designers should replan and redesign the quality and equal distribution of urban green spaces, especially during and following COVID-19 pandemic. Additionally, drawing on the Google Earth Engine cloud system, which has revolutionized GIS and remote sensing, is recommended to be used in land use land cover modeling. It is straightforward to access information and analyze them quickly in Google Earth Engine. The published codes in this study makes it possible to conduct further relevant studies.}, } @article {pmid35920716, year = {2022}, author = {Petrović, D and Scott, JS and Bodnarchuk, MS and Lorthioir, O and Boyd, S and Hughes, GM and Lane, J and Wu, A and Hargreaves, D and Robinson, J and Sadowski, J}, title = {Virtual Screening in the Cloud Identifies Potent and Selective ROS1 Kinase Inhibitors.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {16}, pages = {3832-3843}, doi = {10.1021/acs.jcim.2c00644}, pmid = {35920716}, issn = {1549-960X}, mesh = {*Carcinoma, Non-Small-Cell Lung ; Cloud Computing ; Drug Evaluation, Preclinical ; Humans ; *Lung Neoplasms ; Molecular Docking Simulation ; Prospective Studies ; Protein Kinase Inhibitors/chemistry/pharmacology ; Protein-Tyrosine Kinases ; Proto-Oncogene Proteins ; Receptor Protein-Tyrosine Kinases ; }, abstract = {ROS1 rearrangements account for 1-2% of non-small cell lung cancer patients, yet there are no specifically designed, selective ROS1 therapies in the clinic. Previous knowledge of potent ROS1 inhibitors with selectivity over TrkA, a selected antitarget, enabled virtual screening as a hit finding approach in this project. The ligand-based virtual screening was focused on identifying molecules with a similar 3D shape and pharmacophore to the known actives. To that end, we turned to the AstraZeneca virtual library, estimated to cover 10[15] synthesizable make-on-demand molecules. We used cloud computing-enabled FastROCS technology to search the enumerated 10[10] subset of the full virtual space. A small number of specific libraries were prioritized based on the compound properties and a medicinal chemistry assessment and further enumerated with available building blocks. Following the docking evaluation to the ROS1 structure, the most promising hits were synthesized and tested, resulting in the identification of several potent and selective series. The best among them gave a nanomolar ROS1 inhibitor with over 1000-fold selectivity over TrkA and, from the preliminary established SAR, these have the potential to be further optimized. Our prospective study describes how conceptually simple shape-matching approaches can identify potent and selective compounds by searching ultralarge virtual libraries, demonstrating the applicability of such workflows and their importance in early drug discovery.}, } @article {pmid35912308, year = {2022}, author = {Qie, D}, title = {The Relevance of Virtual-Assisted Early Childhood Education and Occupational Psychotherapy Based on Emotional Interaction.}, journal = {Occupational therapy international}, volume = {2022}, number = {}, pages = {2785987}, pmid = {35912308}, issn = {1557-0703}, mesh = {Child, Preschool ; Emotions ; Health Education ; Humans ; *Occupational Therapy ; Psychotherapy ; School Teachers ; }, abstract = {This paper presents an in-depth study and analysis of the relevance of early childhood education to occupational psychotherapy using a virtual-assisted affective interaction approach. Starting from the educational theory of interactive cognitive psychology, the theoretical basis for parent-child picture book education for interactive learning is explored, as well as the theoretical development after the introduction of AR technology. Firstly, the analysis of young children's emotions involves massive image processing, and the use of cloud computing architecture leads to high latency, while young children's safety is a latency-sensitive service. Secondly, face recognition accuracy based on static images is not high due to problems such as inconspicuous facial features of toddlers and low-quality kindergarten surveillance videos. In this paper, a face identity correction model based on location features is proposed and the superiority of the model is demonstrated through experiments. Finally, this paper analyzes and mines the emotional data of young children. The level of kindergarten teachers' awareness of early childhood mental health education generally showed an upward trend as their titles rose, and there were significant differences in the seven dimensions of early childhood mental health, the purpose and meaning of early childhood mental health education, implementers, targets, content, pathways, and effects; significant differences existed between teachers of different kindergarten natures, and there were significant differences in the purpose and meaning of early childhood mental health education, implementers, targets, content, pathways, effects, and mental health education for young children. Therefore, this paper proposes a face identity correction model based on position information, which considers both the correlation between pixel values in the spatial domain and the correlation between frames in the temporal domain. This paper has developed an emotion analysis system for kindergartens and put it into use in kindergartens to meet the needs of monitoring the safety of young children and evaluating early childhood education and has received good feedback from users, demonstrating the effectiveness of the system.}, } @article {pmid35910077, year = {2022}, author = {Lutnick, B and Manthey, D and Becker, JU and Zuckerman, JE and Rodrigues, L and Jen, KY and Sarder, P}, title = {A tool for federated training of segmentation models on whole slide images.}, journal = {Journal of pathology informatics}, volume = {13}, number = {}, pages = {100101}, pmid = {35910077}, issn = {2229-5089}, abstract = {The largest bottleneck to the development of convolutional neural network (CNN) models in the computational pathology domain is the collection and curation of diverse training datasets. Training CNNs requires large cohorts of image data, and model generalizability is dependent on training data heterogeneity. Including data from multiple centers enhances the generalizability of CNN-based models, but this is hindered by the logistical challenges of sharing medical data. In this paper, we explore the feasibility of training our recently developed cloud-based segmentation tool (Histo-Cloud) using federated learning. Using a dataset of renal tissue biopsies we show that federated training to segment interstitial fibrosis and tubular atrophy (IFTA) using datasets from three institutions is not found to be different from a training by pooling the data on one server when tested on a fourth (holdout) institution's data. Further, training a model to segment glomeruli for a federated dataset (split by staining) demonstrates similar performance.}, } @article {pmid35909867, year = {2022}, author = {Zhang, H and Feng, Y and Wang, L}, title = {Cloud Computing to Tourism Economic Data Scheduling Algorithm under the Background of Image and Video.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3948221}, pmid = {35909867}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Tourism ; }, abstract = {With the rapid development of image video and tourism economy, tourism economic data are gradually becoming big data. Therefore, how to schedule between data has become a hot topic. This paper first summarizes the research results on image video, cloud computing, tourism economy, and data scheduling algorithms. Secondly, the origin, structure, development, and service types of cloud computing are expounded in detail. And in order to solve the problem of tourism economic data scheduling, this paper regards the completion time and cross-node transmission delay as the constraints of tourism economic data scheduling. The constraint model of data scheduling is established, the fitness function is improved on the basis of an artificial immune algorithm combined with the constraint model, and the directional recombination of excellent antibodies is carried out by using the advantages of gene recombination so as to obtain the optimal solution to the problem more appropriately. When the resource node scale is 100, the response time of EDSA is 107.92 seconds.}, } @article {pmid35909865, year = {2022}, author = {Yan, S and Shi, L and Wang, L}, title = {Influence of the Urban Built Environment on Physical and Mental Health of the Elderly under the Background of Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4266723}, pmid = {35909865}, issn = {1687-5273}, mesh = {Aged ; *Big Data ; Built Environment ; Cities ; City Planning ; Humans ; Male ; *Mental Health ; }, abstract = {With the advent of the information technology revolution and the Internet era, information technology is gradually occupying an important position and becoming an important strategic factor in economic development. As an emerging technology that has been developing continuously in recent years, big data is becoming an important industry to improve the innovation and development of the urban economy. Like AI technology, cloud computing, and the Internet, big data has become an important application technology for economic growth and economic efficiency improvement in today's world. It is an effective means of progress and development in a region and an important strategic resource. As a new technology, big data has attracted more and more attention from all walks of life. Many companies have turned their attention to developing big data for economic benefits. "Enjoy your old age" is the yearning of every old man and his family. In recent years, the national level has been committed to "creating an urban built environment for the elderly to achieve healthy aging." From the perspective of promoting the physical and mental health of the elderly, this paper analyzes the impact of the urban built environment on the physical and mental health of the elderly based on the needs of the elderly and puts forward countermeasures and suggestions based on the current status and existing problems of the urban built environment for the elderly. Based on the combined data analysis method and technology in big data, this paper conducted a field questionnaire survey on a total of 4,000 elderly people in urban and rural areas by means of the questionnaire survey. It is found that the existing problems of the built environment in the old cities include scattered content, one-sided understanding, and rigid design. According to the problems, the solutions of building consensus, paying attention to planning, combining urban characteristics, and the joint efforts of all sectors of society are put forward. And programming tools are used to combine formulas and analyze related data in detail. The analysis results show that the physical and mental health index of the elderly is highly correlated with factors such as changes in the consensus degree of the urban built environment, urban built environment planning, urban built environment policy support, and multiparty efforts in the urban built environment. Changes show a positive change.}, } @article {pmid35903800, year = {2022}, author = {Mishra, N and Singh, RK and Yadav, SK}, title = {Detection of DDoS Vulnerability in Cloud Computing Using the Perplexed Bayes Classifier.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9151847}, pmid = {35903800}, issn = {1687-5273}, mesh = {*Algorithms ; Bayes Theorem ; *Cloud Computing ; Machine Learning ; }, abstract = {Cloud computing security has been a critical issue with its increase in demand. One of the most challenging problems in cloud computing is detecting distributed denial-of-service (DDoS) attacks. The attack detection framework for the DDoS attack is tricky because of its nonlinear nature of interruption activities, atypical system traffic behaviour, and many features in the problem space. As a result, creating defensive solutions against these attacks is critical for mainstream cloud computing adoption. In this novel research, by using performance parameters, perplexed-based classifiers with and without feature selection will be compared with the existing machine learning algorithms such as naïve Bayes and random forest to prove the efficacy of the perplexed-based classification algorithm. Comparing the performance parameters like accuracy, sensitivity, and specificity, the proposed algorithm has an accuracy of 99%, which is higher than the existing algorithms, proving that the proposed algorithm is highly efficient in detecting the DDoS attacks in cloud computing systems. To extend our research in the area of nature-inspired computing, we compared our perplexed Bayes classifier feature selection with nature-inspired feature selection like genetic algorithm (GA) and particle swarm optimization (PSO) and found that our classifier is highly efficient in comparison with GA and PSO and their accuracies are 2% and 8%, respectively, less than those of perplexed Bayes classifier.}, } @article {pmid35901084, year = {2022}, author = {Ali-Eldin, AMT}, title = {A hybrid trust computing approach for IoT using social similarity and machine learning.}, journal = {PloS one}, volume = {17}, number = {7}, pages = {e0265658}, pmid = {35901084}, issn = {1932-6203}, mesh = {Algorithms ; Humans ; Machine Learning ; *Privacy ; *Trust ; }, abstract = {Every year, millions of new devices are added to the Internet of things, which has both great benefits and serious security risks for user data privacy. It is the device owners' responsibility to ensure that the ownership settings of Internet of things devices are maintained, allowing them to communicate with other user devices autonomously. The ultimate goal of the future Internet of Things is for it to be able to make decisions on its own, without the need for human intervention. Therefore, trust computing and prediction have become more vital in the processing and handling of data as well as in the delivery of services. In this paper, we compute trust in social IoT scenarios using a hybrid approach that combines a distributed computation technique and a global machine learning approach. The approach considers social similarity while assessing other users' ratings and utilize a cloud-based architecture. Further, we propose a dynamic way to aggregate the different computed trust values. According to the results of the experimental work, it is shown that the proposed approaches outperform related work. Besides, it is shown that the use of machine learning provides slightly better performance than the computing model. Both proposed approaches were found successful in degrading malicious ratings without the need for more complex algorithms.}, } @article {pmid35898787, year = {2022}, author = {Lin, K}, title = {Big Data Technology in the Macrodecision-Making Model of Regional Industrial Economic Information Applied Research.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7400797}, pmid = {35898787}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; *Big Data ; Data Mining ; Industry ; Technology ; }, abstract = {In the era of Internet +, modern industry has developed rapidly, the network economy has promoted the great development of the industrial economy, and the traditional industrial economic statistics method has not been suitable for the development needs of modern enterprises. In today's society, it can be described as the era of big data, the use of big data technology for industrial economic statistics is needed for the development of industrial modernization, and it is also a new requirement for industrial economic statistics put forward by social development. With the wide application of Internet of Things, cloud computing, mobile Internet, remote sensing, and geographic information technology in the economic field, precise economic policies have gradually developed and matured. Especially for different industries in the regional economy, according to the big data in the region, the big data mining technology and analysis technology can be used to obtain the development situation and future trend of the industrial economy in a timely and effective manner. Applying big data technology to macrodecision of regional economic information is an effective way to make macrodecision of current economy. Based on this background, this paper proposes a macroeconomic decision-making method for regional industries based on big data technology. Using data mining technology, time series data analysis methods combined with artificial intelligence analysis, the development trend of regional industries is obtained, and then the development trend of the industry is obtained. Development makes macroeconomic decisions. Taking agriculture as an example, the most popular analysis of the price trend of a certain agricultural product provides an effective reference for the development strategy of this agricultural product. The results show that the method proposed in this paper can effectively apply big data technology to the macrodecision-making of regional industrial economy. And it has better promotion significance.}, } @article {pmid35898480, year = {2022}, author = {Gao, S}, title = {Network Security Problems and Countermeasures of Hospital Information System after Going to the Cloud.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {9725741}, pmid = {35898480}, issn = {1748-6718}, mesh = {*Cloud Computing ; *Computer Security ; Delivery of Health Care ; *Hospital Information Systems ; Hospitals ; Humans ; }, abstract = {In the current social context, information technology, network technology, and cloud computing have been widely used in all walks of life. The analysis of the specific application results of progressive technology shows that the use of technology has changed the working state of various industries and improved the work efficiency and quality of the industry. It should be noted that although the application of some technologies will bring many positive belongings, the potential risks brought by them cannot be ignored. As far as the hospital is concerned, the information system using cloud computing technology can make better use of the hospital's information data, but after the information system is on the cloud, new problems will appear in network security, resulting in the leakage of hospital patient information or research information. Based on this, in practice, it is necessary to analyze the network security problems after the hospital information system goes to the cloud and build and implement the corresponding strategies. The author analyzes and discusses the corresponding contents through work practice and combined with previous articles, in order to provide guidance and help for peers.}, } @article {pmid35897994, year = {2022}, author = {Wang, B and Ben, K and Lin, H and Zuo, M and Zhang, F}, title = {EP-ADTA: Edge Prediction-Based Adaptive Data Transfer Algorithm for Underwater Wireless Sensor Networks (UWSNs).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {15}, pages = {}, pmid = {35897994}, issn = {1424-8220}, support = {52071153//National Natural Science Foundation of China/ ; }, abstract = {The underwater wireless sensor network is an important component of the underwater three-dimensional monitoring system. Due to the high bit error rate, high delay, low bandwidth, limited energy, and high dynamic of underwater networks, it is very difficult to realize efficient and reliable data transmission. Therefore, this paper posits that it is not enough to design the routing algorithm only from the perspective of the transmission environment; the comprehensive design of the data transmission algorithm should also be combined with the application. An edge prediction-based adaptive data transmission algorithm (EP-ADTA) is proposed that can dynamically adapt to the needs of underwater monitoring applications and the changes in the transmission environment. EP-ADTA uses the end-edge-cloud architecture to define the underwater wireless sensor networks. The algorithm uses communication nodes as the agents, realizes the monitoring data prediction and compression according to the edge prediction, dynamically selects the transmission route, and controls the data transmission accuracy based on reinforcement learning. The simulation results show that EP-ADTA can meet the accuracy requirements of underwater monitoring applications, dynamically adapt to the changes in the transmission environment, and ensure efficient and reliable data transmission in underwater wireless sensor networks.}, } @article {pmid35891110, year = {2022}, author = {Qiu, S and Li, A}, title = {Application of Chaos Mutation Adaptive Sparrow Search Algorithm in Edge Data Compression.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35891110}, issn = {1424-8220}, support = {6140002010101,6140001030111//Central Military Commission/ ; }, mesh = {Algorithms ; Cloud Computing ; *Data Compression ; Mutation ; }, abstract = {In view of the large amount of data collected by an edge server, when compression technology is used for data compression, data classification accuracy is reduced and data loss is large. This paper proposes a data compression algorithm based on the chaotic mutation adaptive sparrow search algorithm (CMASSA). Constructing a new fitness function, CMASSA optimizes the hyperparameters of the Convolutional Auto-Encoder Network (CAEN) on the cloud service center, aiming to obtain the optimal CAEN model. The model is sent to the edge server to compress the data at the lower level of edge computing. The effectiveness of CMASSA performance is tested on ten high-dimensional benchmark functions, and the results show that CMASSA outperforms other comparison algorithms. Subsequently, experiments are compared with other literature on the Multi-class Weather Dataset (MWD). Experiments show that under the premise of ensuring a certain compression ratio, the proposed algorithm not only has better accuracy in classification tasks than other algorithms but also maintains a high degree of data reconstruction.}, } @article {pmid35891007, year = {2022}, author = {Alatoun, K and Matrouk, K and Mohammed, MA and Nedoma, J and Martinek, R and Zmij, P}, title = {A Novel Low-Latency and Energy-Efficient Task Scheduling Framework for Internet of Medical Things in an Edge Fog Cloud System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35891007}, issn = {1424-8220}, support = {SP2022/18 and No. SP2022/34//Ministry of Education Youth and Sports/ ; CZ.02.1.01/0.0/0.0/17_049/ 0008425//European Regional Development Fund in Research Platform focused on Industry 4.0 and Robotics in Ostrava project/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Electrocardiography ; Internet ; }, abstract = {In healthcare, there are rapid emergency response systems that necessitate real-time actions where speed and efficiency are critical; this may suffer as a result of cloud latency because of the delay caused by the cloud. Therefore, fog computing is utilized in real-time healthcare applications. There are still limitations in response time, latency, and energy consumption. Thus, a proper fog computing architecture and good task scheduling algorithms should be developed to minimize these limitations. In this study, an Energy-Efficient Internet of Medical Things to Fog Interoperability of Task Scheduling (EEIoMT) framework is proposed. This framework schedules tasks in an efficient way by ensuring that critical tasks are executed in the shortest possible time within their deadline while balancing energy consumption when processing other tasks. In our architecture, Electrocardiogram (ECG) sensors are used to monitor heart health at home in a smart city. ECG sensors send the sensed data continuously to the ESP32 microcontroller through Bluetooth (BLE) for analysis. ESP32 is also linked to the fog scheduler via Wi-Fi to send the results data of the analysis (tasks). The appropriate fog node is carefully selected to execute the task by giving each node a special weight, which is formulated on the basis of the expected amount of energy consumed and latency in executing this task and choosing the node with the lowest weight. Simulations were performed in iFogSim2. The simulation outcomes show that the suggested framework has a superior performance in reducing the usage of energy, latency, and network utilization when weighed against CHTM, LBS, and FNPA models.}, } @article {pmid35890918, year = {2022}, author = {Khanna, A and Sah, A and Bolshev, V and Burgio, A and Panchenko, V and Jasiński, M}, title = {Blockchain-Cloud Integration: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890918}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Data Management ; Technology ; }, abstract = {Over the last couple of years, Blockchain technology has emerged as a game-changer for various industry domains, ranging from FinTech and the supply chain to healthcare and education, thereby enabling them to meet the competitive market demands and end-user requirements. Blockchain technology gained its popularity after the massive success of Bitcoin, of which it constitutes the backbone technology. While blockchain is still emerging and finding its foothold across domains, Cloud computing is comparatively well defined and established. Organizations such as Amazon, IBM, Google, and Microsoft have extensively invested in Cloud and continue to provide a plethora of related services to a wide range of customers. The pay-per-use policy and easy access to resources are some of the biggest advantages of Cloud, but it continues to face challenges like data security, compliance, interoperability, and data management. In this article, we present the advantages of integrating Cloud and blockchain technology along with applications of Blockchain-as-a-Service. The article presents itself with a detailed survey illustrating recent works combining the amalgamation of both technologies. The survey also talks about blockchain-cloud services being offered by existing Cloud Service providers.}, } @article {pmid35890848, year = {2022}, author = {Khalil, U and Malik, OA and Uddin, M and Chen, CL}, title = {A Comparative Analysis on Blockchain versus Centralized Authentication Architectures for IoT-Enabled Smart Devices in Smart Cities: A Comprehensive Review, Recent Advances, and Future Research Directions.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890848}, issn = {1424-8220}, support = {51808474//National Natural Science Foundation of China/ ; }, mesh = {Artificial Intelligence ; *Blockchain ; Cities ; Computer Security ; *Internet of Things ; }, abstract = {Smart devices have become an essential part of the architectures such as the Internet of Things (IoT), Cyber-Physical Systems (CPSs), and Internet of Everything (IoE). In contrast, these architectures constitute a system to realize the concept of smart cities and, ultimately, a smart planet. The adoption of these smart devices expands to different cyber-physical systems in smart city architecture, i.e., smart houses, smart healthcare, smart transportation, smart grid, smart agriculture, etc. The edge of the network connects these smart devices (sensors, aggregators, and actuators) that can operate in the physical environment and collects the data, which is further used to make an informed decision through actuation. Here, the security of these devices is immensely important, specifically from an authentication standpoint, as in the case of unauthenticated/malicious assets, the whole infrastructure would be at stake. We provide an updated review of authentication mechanisms by categorizing centralized and distributed architectures. We discuss the security issues regarding the authentication of these IoT-enabled smart devices. We evaluate and analyze the study of the proposed literature schemes that pose authentication challenges in terms of computational costs, communication overheads, and models applied to attain robustness. Hence, lightweight solutions in managing, maintaining, processing, and storing authentication data of IoT-enabled assets are an urgent need. From an integration perspective, cloud computing has provided strong support. In contrast, decentralized ledger technology, i.e., blockchain, light-weight cryptosystems, and Artificial Intelligence (AI)-based solutions, are the areas with much more to explore. Finally, we discuss the future research challenges, which will eventually help address the ambiguities for improvement.}, } @article {pmid35890825, year = {2022}, author = {Nakazato, J and Li, Z and Maruta, K and Kubota, K and Yu, T and Tran, GK and Sakaguchi, K and Masuko, S}, title = {MEC/Cloud Orchestrator to Facilitate Private/Local Beyond 5G with MEC and Proof-of-Concept Implementation.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890825}, issn = {1424-8220}, support = {00101//National Institute of Information and Communications Technology/ ; }, mesh = {*Cloud Computing ; Communication ; *Ecosystem ; }, abstract = {The emergence of 5G-IoT opens up unprecedented connectivity possibilities for new service use cases and players. Multi-access edge computing (MEC) is a crucial technology and enabler for Beyond 5G, supporting next-generation communications with service guarantees (e.g., ultra-low latency, high security) from an end-to-end (E2E) perspective. On the other hand, one notable advance is the platform that supports virtualization from RAN to applications. Deploying Radio Access Networks (RAN) and MEC, including third-party applications on virtualization platforms, and renting other equipment from legacy telecom operators will make it easier for new telecom operators, called Private/Local Telecom Operators, to join the ecosystem. Our preliminary studies have discussed the ecosystem for private and local telecom operators regarding business potential and revenue and provided numerical results. What remains is how Private/Local Telecom Operators can manage and deploy their MEC applications. In this paper, we designed the architecture for fully virtualized MEC 5G cellular networks with local use cases (e.g., stadiums, campuses). We propose an MEC/Cloud Orchestrator implementation for intelligent deployment selection. In addition, we provide implementation schemes in several cases held by either existing cloud owners or private and local operators. In order to verify the proposal's feasibility, we designed the system level in E2E and constructed a Beyond 5G testbed at the Ōokayama Campus of the Tokyo Institute of Technology. Through proof-of-concept in the outdoor field, the proposed system's feasibility is verified by E2E performance evaluation. The verification results prove that the proposed approach can reduce latency and provide a more stable throughput than conventional cloud services.}, } @article {pmid35890793, year = {2022}, author = {Wang, Q and Jiang, L and Sun, X and Zhao, J and Deng, Z and Yang, S}, title = {An Efficient LiDAR Point Cloud Map Coding Scheme Based on Segmentation and Frame-Inserting Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890793}, issn = {1424-8220}, support = {No. 62001262//National Natural Science Foundation of China/ ; No. 62001263//National Natural Science Foundation of China/ ; No. ZR2020QF008//Nature Science Foundation of Shandong Province/ ; }, abstract = {In this article, we present an efficient coding scheme for LiDAR point cloud maps. As a point cloud map consists of numerous single scans spliced together, by recording the time stamp and quaternion matrix of each scan during map building, we cast the point cloud map compression into the point cloud sequence compression problem. The coding architecture includes two techniques: intra-coding and inter-coding. For intra-frames, a segmentation-based intra-prediction technique is developed. For inter-frames, an interpolation-based inter-frame coding network is explored to remove temporal redundancy by generating virtual point clouds based on the decoded frames. We only need to code the difference between the original LiDAR data and the intra/inter-predicted point cloud data. The point cloud map can be reconstructed according to the decoded point cloud sequence and quaternion matrices. Experiments on the KITTI dataset show that the proposed coding scheme can largely eliminate the temporal and spatial redundancies. The point cloud map can be encoded to 1/24 of its original size with 2 mm-level precision. Our algorithm also obtains better coding performance compared with the octree and Google Draco algorithms.}, } @article {pmid35890787, year = {2022}, author = {Hussein, M and Mohammed, YS and Galal, AI and Abd-Elrahman, E and Zorkany, M}, title = {Smart Cognitive IoT Devices Using Multi-Layer Perception Neural Network on Limited Microcontroller.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {14}, pages = {}, pmid = {35890787}, issn = {1424-8220}, mesh = {Algorithms ; *Artificial Intelligence ; Cognition ; *Internet of Things ; Neural Networks, Computer ; }, abstract = {The Internet of Things (IoT) era is mainly dependent on the word "Smart", such as smart cities, smart homes, and smart cars. This aspect can be achieved through the merging of machine learning algorithms with IoT computing models. By adding the Artificial Intelligence (AI) algorithms to IoT, the result is the Cognitive IoT (CIoT). In the automotive industry, many researchers worked on self-diagnosis systems using deep learning, but most of them performed this process on the cloud due to the hardware limitations of the end-devices, and the devices obtain the decision via the cloud servers. Others worked with simple traditional algorithms of machine learning to solve these limitations of the processing capabilities of the end-devices. In this paper, a self-diagnosis smart device is introduced with fast responses and little overhead using the Multi-Layer Perceptron Neural Network (MLP-NN) as a deep learning technique. The MLP-NN learning stage is performed using a Tensorflow framework to generate an MLP model's parameters. Then, the MLP-NN model is implemented using these model's parameters on a low cost end-device such as ARM Cortex-M Series architecture. After implementing the MLP-NN model, the IoT implementation is built to publish the decision results. With the proposed implemented method for the smart device, the output decision based on sensors values can be taken by the IoT node itself without returning to the cloud. For comparison, another solution is proposed for the cloud-based architecture, where the MLP-NN model is implemented on Cloud. The results clarify a successful implemented MLP-NN model for little capabilities end-devices, where the smart device solution has a lower traffic and latency than the cloud-based solution.}, } @article {pmid35880010, year = {2022}, author = {Hemalatha, M}, title = {A hybrid random forest deep learning classifier empowered edge cloud architecture for COVID-19 and pneumonia detection.}, journal = {Expert systems with applications}, volume = {210}, number = {}, pages = {118227}, pmid = {35880010}, issn = {0957-4174}, abstract = {COVID-19 is a global pandemic that mostly affects patients' respiratory systems, and the only way to protect oneself against the virus at present moment is to diagnose the illness, isolate the patient, and provide immunization. In the present situation, the testing used to predict COVID-19 is inefficient and results in more false positives. This difficulty can be solved by developing a remote medical decision support system that detects illness using CT scans or X-ray images with less manual interaction and is less prone to errors. The state-of-art techniques mainly used complex deep learning architectures which are not quite effective when deployed in resource-constrained edge devices. To overcome this problem, a multi-objective Modified Heat Transfer Search (MOMHTS) optimized hybrid Random Forest Deep learning (HRFDL) classifier is proposed in this paper. The MOMHTS algorithm mainly optimizes the deep learning model in the HRFDL architecture by optimizing the hyperparameters associated with it to support the resource-constrained edge devices. To evaluate the efficiency of this technique, extensive experimentation is conducted on two real-time datasets namely the COVID19 lung CT scan dataset and the Chest X-ray images (Pneumonia) datasets. The proposed methodology mainly offers increased speed for communication between the IoT devices and COVID-19 detection via the MOMHTS optimized HRFDL classifier is modified to support the resources which can only support minimal computation and handle minimum storage. The proposed methodology offers an accuracy of 99% for both the COVID19 lung CT scan dataset and the Chest X-ray images (Pneumonia) datasets with minimal computational time, cost, and storage. Based on the simulation outcomes, we can conclude that the proposed methodology is an appropriate fit for edge computing detection to identify the COVID19 and pneumonia with higher detection accuracy.}, } @article {pmid35879937, year = {2022}, author = {Siriborvornratanakul, T}, title = {Human behavior in image-based Road Health Inspection Systems despite the emerging AutoML.}, journal = {Journal of big data}, volume = {9}, number = {1}, pages = {96}, pmid = {35879937}, issn = {2196-1115}, abstract = {INTRODUCTION: The emergence of automated machine learning or AutoML has raised an interesting trend of no-code and low-code machine learning where most tasks in the machine learning pipeline can possibly be automated without support from human data scientists. While it sounds reasonable that we should leave repetitive trial-and-error tasks of designing complex network architectures and tuning a lot of hyperparameters to AutoML, leading research using AutoML is still scarce. Thereby, the overall purpose of this case study is to investigate the gap between current AutoML frameworks and practical machine learning development.

CASE DESCRIPTION: First, this paper confirms the increasing trend of AutoML via an indirect indicator of the numbers of search results in Google trend, IEEE Xplore, and ACM Digital Library during 2012-2021. Then, the three most popular AutoML frameworks (i.e., Auto-Sklearn, AutoKeras, and Google Cloud AutoML) are inspected as AutoML's representatives; the inspection includes six comparative aspects. Based on the features available in the three AutoML frameworks investigated, our case study continues to observe recent machine learning research regarding the background of image-based machine learning. This is because the field of computer vision spans several levels of machine learning from basic to advanced and it has been one of the most popular fields in studying machine learning and artificial intelligence lately. Our study is specific to the context of image-based road health inspection systems as it has a long history in computer vision, allowing us to observe solution transitions from past to present.

DISCUSSION AND EVALUATION: After confirming the rising numbers of AutoML search results in the three search engines, our study regarding the three AutoML representatives further reveals that there are many features that can be used to automate the development pipeline of image-based road health inspection systems. Nevertheless, we find that recent works in image-based road health inspection have not used any form of AutoML in their works. Digging into these recent works, there are two main problems that best conclude why most researchers do not use AutoML in their image-based road health inspection systems yet. Firstly, it is because AutoML's trial-and-error decision involves much extra computation compared to human-guided decisions. Secondly, using AutoML adds another layer of non-interpretability to a model. As these two problems are the major pain points in modern neural networks and deep learning, they may require years to resolve, delaying the mass adoption of AutoML in image-based road health inspection systems.

CONCLUSIONS: In conclusion, although AutoML's utilization is not mainstream at this moment, we believe that the trend of AutoML will continue to grow. This is because there exists a demand for AutoML currently, and in the future, more demand for no-code or low-code machine learning development alternatives will grow together with the expansion of machine learning solutions. Nevertheless, this case study focuses on selected papers whose authors are researchers who can publish their works in academic conferences and journals. In the future, the study should continue to include observing novice users, non-programmer users, and machine learning practitioners in order to discover more insights from non-research perspectives.}, } @article {pmid35875731, year = {2022}, author = {Hameed Abdulkareem, K and Awad Mutlag, A and Musa Dinar, A and Frnda, J and Abed Mohammed, M and Hasan Zayr, F and Lakhan, A and Kadry, S and Ali Khattak, H and Nedoma, J}, title = {Smart Healthcare System for Severity Prediction and Critical Tasks Management of COVID-19 Patients in IoT-Fog Computing Environments.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5012962}, pmid = {35875731}, issn = {1687-5273}, mesh = {Algorithms ; *COVID-19 ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {COVID-19 has depleted healthcare systems around the world. Extreme conditions must be defined as soon as possible so that services and treatment can be deployed and intensified. Many biomarkers are being investigated in order to track the patient's condition. Unfortunately, this may interfere with the symptoms of other diseases, making it more difficult for a specialist to diagnose or predict the severity level of the case. This research develops a Smart Healthcare System for Severity Prediction and Critical Tasks Management (SHSSP-CTM) for COVID-19 patients. On the one hand, a machine learning (ML) model is projected to predict the severity of COVID-19 disease. On the other hand, a multi-agent system is proposed to prioritize patients according to the seriousness of the COVID-19 condition and then provide complete network management from the edge to the cloud. Clinical data, including Internet of Medical Things (IoMT) sensors and Electronic Health Record (EHR) data of 78 patients from one hospital in the Wasit Governorate, Iraq, were used in this study. Different data sources are fused to generate new feature pattern. Also, data mining techniques such as normalization and feature selection are applied. Two models, specifically logistic regression (LR) and random forest (RF), are used as baseline severity predictive models. A multi-agent algorithm (MAA), consisting of a personal agent (PA) and fog node agent (FNA), is used to control the prioritization process of COVID-19 patients. The highest prediction result is achieved based on data fusion and selected features, where all examined classifiers observe a significant increase in accuracy. Furthermore, compared with state-of-the-art methods, the RF model showed a high and balanced prediction performance with 86% accuracy, 85.7% F-score, 87.2% precision, and 86% recall. In addition, as compared to the cloud, the MAA showed very significant performance where the resource usage was 66% in the proposed model and 34% in the traditional cloud, the delay was 19% in the proposed model and 81% in the cloud, and the consumed energy was 31% in proposed model and 69% in the cloud. The findings of this study will allow for the early detection of three severity cases, lowering mortality rates.}, } @article {pmid35875729, year = {2022}, author = {Zhang, L}, title = {B/S-Based Construction of a Big Data Logistics Platform.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6873062}, pmid = {35875729}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Humans ; }, abstract = {Due to the overwhelming characteristic of the Internet of Things, devices belonging to these networks are utilized in almost every domain of real life in order to improve the lifestyle of humans. However, these networks result in a huge amount of data related to different application domains, leading to another important research aspect, i.e., big data and cloud computing. Big data and cloud computing technologies in the logistics field have experienced initial contact, gradual penetration, and widespread application. Moreover, it supports traditional logistics to upgrade to smart logistics, aiming to achieve the fundamental requirements of today's logistics industry and reduce costs with enhanced efficiency. However, the big data and cloud computing wisdom logistics model still has many problems in the construction of logistics public information platforms, end coordination development, government platform construction, and so on, in order to solve the problems of low efficiency, high cost, and low service satisfaction of traditional logistics. In this article, we have designed a new big data-enabled logistics detection system that is based on B/S architecture, constructed a smart logistics model consisting of a supply subsystem, demand subsystem, and supervision subsystem, and finally realized the operation process of the smart logistics model based on big data cloud computing.}, } @article {pmid35875634, year = {2022}, author = {Chen, X and Gao, T and Gao, H and Liu, B and Chen, M and Wang, B}, title = {A multi-stage heuristic method for service caching and task offloading to improve the cooperation between edge and cloud computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e1012}, pmid = {35875634}, issn = {2376-5992}, abstract = {Edge-cloud computing has attracted increasing attention recently due to its efficiency on providing services for not only delay-sensitive applications but also resource-intensive requests, by combining low-latency edge resources and abundant cloud resources. A carefully designed strategy of service caching and task offloading helps to improve the user satisfaction and the resource efficiency. Thus, in this article, we focus on joint service caching and task offloading problem in edge-cloud computing environments, to improve the cooperation between edge and cloud resources. First, we formulated the problem into a mix-integer nonlinear programming, which is proofed as NP-hard. Then, we proposed a three-stage heuristic method for solving the problem in polynomial time. In the first stages, our method tried to make full use of abundant cloud resources by pre-offloading as many tasks as possible to the cloud. Our method aimed at making full use of low-latency edge resources by offloading remaining tasks and caching corresponding services on edge resources. In the last stage, our method focused on improving the performance of tasks offloaded to the cloud, by re-offloading some tasks from cloud resources to edge resources. The performance of our method was evaluated by extensive simulated experiments. The results show that our method has up to 155%, 56.1%, and 155% better performance in user satisfaction, resource efficiency, and processing efficiency, respectively, compared with several classical and state-of-the-art task scheduling methods.}, } @article {pmid35874097, year = {2022}, author = {Huang, CW and Chuang, WH and Lin, CY and Chen, SH}, title = {Elegancy: Digitizing the wisdom from laboratories to the cloud with free no-code platform.}, journal = {iScience}, volume = {25}, number = {8}, pages = {104710}, pmid = {35874097}, issn = {2589-0042}, abstract = {One of the top priorities in any laboratory is archiving experimental data in the most secure, efficient, and errorless way. It is especially important to those in chemical and biological research, for it is more likely to damage experiment records. In addition, the transmission of experiment results from paper to electronic devices is time-consuming and redundant. Therefore, we introduce an open-source no-code electronic laboratory notebook, Elegancy, a cloud-based/standalone web service distributed as a Docker image. Elegancy fits all laboratories but is specially equipped with several features benefitting biochemical laboratories. It can be accessed via various web browsers, allowing researchers to upload photos or audio recordings directly from their mobile devices. Elegancy also contains a meeting arrangement module, audit/revision control, and laboratory supply management system. We believe Elegancy could help the scientific research community gather evidence, share information, reorganize knowledge, and digitize laboratory works with greater ease and security.}, } @article {pmid35873307, year = {2022}, author = {Rodas-Martinez, AK and Altamirano-Yupanqui, JR}, title = {[Mass vaccinations against COVID-19 through the use of technologies for the management of appointment scheduling and data of large volumes of vaccinated].}, journal = {Vacunas}, volume = {23}, number = {}, pages = {S111-S120}, pmid = {35873307}, issn = {1576-9887}, abstract = {Mass vaccination against COVID-19 using technologies to manage appointment scheduling and data in large volumes of vaccinated people Abstract Mass vaccination poses a challenge for health authorities due to the high volume of people who need to be vaccinated in a short period of time. Manual processes in vaccination centres to record and control vaccinations where the data is entered on paper result in delays in the timely input of information rendering the vaccination process inefficient. The proposed prototype, as a strategy for mass COVID-19 vaccination, to generate appointments, record, and control entry to vaccination centres, uses mobile technology, QR codes, and cloud computing to automate these data-driven processes. Technology-based processes help people by giving them the flexibility to choose the most convenient vaccination centre and provide health authorities with data-driven tools for management, control, and real-time decision-making.}, } @article {pmid35870448, year = {2022}, author = {Abe, T and Kinsella, I and Saxena, S and Buchanan, EK and Couto, J and Briggs, J and Kitt, SL and Glassman, R and Zhou, J and Paninski, L and Cunningham, JP}, title = {Neuroscience Cloud Analysis As a Service: An open-source platform for scalable, reproducible data analysis.}, journal = {Neuron}, volume = {110}, number = {17}, pages = {2771-2789.e7}, pmid = {35870448}, issn = {1097-4199}, support = {T32 NS064929/NS/NINDS NIH HHS/United States ; UF1 NS107696/NS/NINDS NIH HHS/United States ; RF1 MH120680/MH/NIMH NIH HHS/United States ; U19 NS107613/NS/NINDS NIH HHS/United States ; U19 NS104649/NS/NINDS NIH HHS/United States ; UF1 NS108213/NS/NINDS NIH HHS/United States ; U19 NS123716/NS/NINDS NIH HHS/United States ; U01 NS103489/NS/NINDS NIH HHS/United States ; }, mesh = {Cloud Computing ; *Data Analysis ; *Neurosciences ; Reproducibility of Results ; Software ; }, abstract = {A key aspect of neuroscience research is the development of powerful, general-purpose data analyses that process large datasets. Unfortunately, modern data analyses have a hidden dependence upon complex computing infrastructure (e.g., software and hardware), which acts as an unaddressed deterrent to analysis users. Although existing analyses are increasingly shared as open-source software, the infrastructure and knowledge needed to deploy these analyses efficiently still pose significant barriers to use. In this work, we develop Neuroscience Cloud Analysis As a Service (NeuroCAAS): a fully automated open-source analysis platform offering automatic infrastructure reproducibility for any data analysis. We show how NeuroCAAS supports the design of simpler, more powerful data analyses and that many popular data analysis tools offered through NeuroCAAS outperform counterparts on typical infrastructure. Pairing rigorous infrastructure management with cloud resources, NeuroCAAS dramatically accelerates the dissemination and use of new data analyses for neuroscientific discovery.}, } @article {pmid35867406, year = {2022}, author = {Merdan, O and Şişman, AS and Aksoy, SA and Kızıl, S and Tüzemen, NÜ and Yılmaz, E and Ener, B}, title = {Investigation of the Defective Growth Pattern and Multidrug Resistance in a Clinical Isolate of Candida glabrata Using Whole-Genome Sequencing and Computational Biology Applications.}, journal = {Microbiology spectrum}, volume = {10}, number = {4}, pages = {e0077622}, pmid = {35867406}, issn = {2165-0497}, mesh = {*Amphotericin B/metabolism/pharmacology ; Animals ; Antifungal Agents/pharmacology ; Artificial Intelligence ; Azoles/metabolism/pharmacology ; *Candida glabrata/genetics ; Cholesterol/metabolism/pharmacology ; Computational Biology ; Drug Resistance, Fungal/genetics ; Drug Resistance, Multiple ; Ergosterol/metabolism ; Microbial Sensitivity Tests ; Sheep ; }, abstract = {Candida glabrata is increasingly isolated from blood cultures, and multidrug-resistant isolates have important implications for therapy. This study describes a cholesterol-dependent clinical C. glabrata isolate (ML72254) that did not grow without blood (containing cholesterol) on routine mycological media and that showed azole and amphotericin B (AmB) resistance. Matrix-assisted laser desorption ionization-time of flight (MALDI-TOF) and whole-genome sequencing (WGS) were used for species identification. A modified Etest method (Mueller-Hinton agar supplemented with 5% sheep blood) was used for antifungal susceptibility testing. WGS data were processed via the Galaxy platform, and the genomic variations of ML72254 were retrieved. A computational biology workflow utilizing web-based applications (PROVEAN, AlphaFold Colab, and Missense3D) was constructed to predict possible deleterious effects of these missense variations on protein functions. The predictive ability of this workflow was tested with previously reported missense variations in ergosterol synthesis genes of C. glabrata. ML72254 was identified as C. glabrata sensu stricto with MALDI-TOF, and WGS confirmed this identification. The MICs of fluconazole, voriconazole, and amphotericin B were >256, >32, and >32 μg/mL, respectively. A novel frameshift mutation in the ERG1 gene (Pro314fs) and many missense variations were detected in the ergosterol synthesis genes. None of the missense variations in the ML72254 ergosterol synthesis genes were deleterious, and the Pro314fs mutation was identified as the causative molecular change for a cholesterol-dependent and multidrug-resistant phenotype. This study verified that web-based computational biology solutions can be powerful tools for examining the possible impacts of missense mutations in C. glabrata. IMPORTANCE In this study, a cholesterol-dependent C. glabrata clinical isolate that confers azole and AmB resistance was investigated using artificial intelligence (AI) technologies and cloud computing applications. This is the first of the known cholesterol-dependent C. glabrata isolate to be found in Turkey. Cholesterol-dependent C. glabrata isolates are rarely isolated in clinical samples; they can easily be overlooked during routine laboratory procedures. Microbiologists therefore need to be alert when discrepancies occur between microscopic examination and growth on routine media. In addition, because these isolates confer antifungal resistance, patient management requires extra care.}, } @article {pmid35866176, year = {2021}, author = {Zhou, H and Ouyang, X and Su, J and de Laat, C and Zhao, Z}, title = {Enforcing trustworthy cloud SLA with witnesses: A game theory-based model using smart contracts.}, journal = {Concurrency and computation : practice & experience}, volume = {33}, number = {14}, pages = {e5511}, doi = {10.1002/cpe.5511}, pmid = {35866176}, issn = {1532-0626}, abstract = {There lacks trust between the cloud customer and provider to enforce traditional cloud SLA (Service Level Agreement) where the blockchain technique seems a promising solution. However, current explorations still face challenges to prove that the off-chain SLO (Service Level Objective) violations really happen before recorded into the on-chain transactions. In this paper, a witness model is proposed implemented with smart contracts to solve this trust issue. The introduced role, "Witness", gains rewards as an incentive for performing the SLO violation report, and the payoff function is carefully designed in a way that the witness has to tell the truth, for maximizing the rewards. This fact that the witness has to be honest is analyzed and proved using the Nash Equilibrium principle of game theory. For ensuring the chosen witnesses are random and independent, an unbiased selection algorithm is proposed to avoid possible collusions. An auditing mechanism is also introduced to detect potential malicious witnesses. Specifically, we define three types of malicious behaviors and propose quantitative indicators to audit and detect these behaviors. Moreover, experimental studies based on Ethereum blockchain demonstrate the proposed model is feasible, and indicate that the performance, ie, transaction fee, of each interface follows the design expectations.}, } @article {pmid35865872, year = {2022}, author = {Zhou, Y}, title = {The Application Trend of Digital Finance and Technological Innovation in the Development of Green Economy.}, journal = {Journal of environmental and public health}, volume = {2022}, number = {}, pages = {1064558}, pmid = {35865872}, issn = {1687-9813}, mesh = {*Artificial Intelligence ; Conservation of Energy Resources ; Economic Development ; *Inventions ; Sustainable Development ; }, abstract = {Based on the perspective of digital finance and technological innovation, this paper analyzes its application in economic development, green economy, and sustainable development. With the continuous development of technological economy, methods such as artificial intelligence, Internet of Things, big data, and cloud computing become increasingly mature. Economic development is inseparable from the empowerment of technology. In this paper, firstly, we introduce the basic concepts and main forms of digital finance and technological economy and list the cutting-edge technologies including blockchain, VR, sharing economy, and other modes. Then, we analyze the application trend of technology economy. Finally, we analyze the examples of digital finance and technological innovation in detail, including tourism economy, digital marketing, sharing economy, smart city, digital healthcare, and personalized education, three hot topics of technology intersection and integration. In the end, we put forward prospects for the development of a digital economy, digital finance, and technological innovation.}, } @article {pmid35860795, year = {2022}, author = {Yan, L and Chen, Y and Caixia, G and Jiangying, W and Xiaoying, L and Zhe, L}, title = {Medical Big Data and Postoperative Nursing of Fracture Patients Based on Cloud Computing.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {4090235}, pmid = {35860795}, issn = {2314-6141}, mesh = {*Big Data ; Cloud Computing ; *Fractures, Bone/surgery ; Humans ; *Postoperative Care/nursing ; Reproducibility of Results ; Wireless Technology ; }, abstract = {Based on the standards for wireless sensor system identification, the sensor node identity OID identification and the management object OID identification in the SNMP MIB are merged, and a management object OID identification coding mechanism for the SNMP-based wireless sensor system is proposed to make the node management system only. The identity, attributes, and multiple entities of the target sensor node in the wireless sensor network can be identified and managed by the node management object OID. The source of abnormal medical big data generally uses two models of multidimensional data and sliding window for detection and verification. First, the sliding window can be used to detect abnormalities. The result is that under this condition, the detection rate of medical big data is more than 95%; the effect is very good, but in different dimensions, the detection rate of four-dimensional data is 2.9% higher than that of a single-dimensional one. On the basis of the ZigBee wireless network, the terminal signal transmission of fracture treatment can be realized. On this basis, combined with the actual needs of fracture treatment, it can be built with its wireless module. The wireless network has a certain basic function. The reform of the nursing system was carried out on the basis of the safety and reliability of the nursing system, the efficiency of the nursing system was improved, and timely and safe nursing services were achieved.}, } @article {pmid35860647, year = {2022}, author = {Li, H}, title = {Computer Security Issues and Legal System Based on Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8112212}, pmid = {35860647}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Information Storage and Retrieval ; Software ; }, abstract = {To effectively improve the security and accuracy of computer information storage, a computer security problem and legal system based on cloud computing are proposed. Firstly, this article details the evolution of cloud computing, its characteristics, architecture, and application status of cloud computing in detail. Second, we discussed security strategies to ensure the confidentiality and integrity of cloud computing information, focuses on the data encryption technology of cloud data security, and designs and implements the data backup and recovery system based on the cloud platform. The core layers of the system are the system layer and data operation layer. The system uses multithreading technology based on epoll and thread pool to improve the efficiency of data transmission. At the same time, the basic visual page is realized, and users can use the page to create a convenient operating system. Finally, the system is built in the laboratory environment and tested as a whole. The test results show that through the performance comparison with the current commonly used systems, it is found that the system in this paper has a certain improvement in data transmission rate, but the utilization rate of node CPU is as high as 40%, which leads to certain requirements for node CPU performance. Therefore, the system meets the functional requirements proposed in the design. Compared to the existing system, its performance has been found to meet the actual requirements of use, proving that the system is accessible and efficient.}, } @article {pmid35858750, year = {2022}, author = {, }, title = {Diversifying the genomic data science research community.}, journal = {Genome research}, volume = {32}, number = {7}, pages = {1231-1241}, pmid = {35858750}, issn = {1549-5469}, support = {U24 HG010263/HG/NHGRI NIH HHS/United States ; R01 CA230514/CA/NCI NIH HHS/United States ; U54 MD007601/MD/NIMHD NIH HHS/United States ; U24 HG010262/HG/NHGRI NIH HHS/United States ; R21 DC020560/DC/NIDCD NIH HHS/United States ; R01 CA223490/CA/NCI NIH HHS/United States ; P30 CA071789/CA/NCI NIH HHS/United States ; P20 GM103466/GM/NIGMS NIH HHS/United States ; P20 GM139753/GM/NIGMS NIH HHS/United States ; }, abstract = {Over the past 20 years, the explosion of genomic data collection and the cloud computing revolution have made computational and data science research accessible to anyone with a web browser and an internet connection. However, students at institutions with limited resources have received relatively little exposure to curricula or professional development opportunities that lead to careers in genomic data science. To broaden participation in genomics research, the scientific community needs to support these programs in local education and research at underserved institutions (UIs). These include community colleges, historically Black colleges and universities, Hispanic-serving institutions, and tribal colleges and universities that support ethnically, racially, and socioeconomically underrepresented students in the United States. We have formed the Genomic Data Science Community Network to support students, faculty, and their networks to identify opportunities and broaden access to genomic data science. These opportunities include expanding access to infrastructure and data, providing UI faculty development opportunities, strengthening collaborations among faculty, recognizing UI teaching and research excellence, fostering student awareness, developing modular and open-source resources, expanding course-based undergraduate research experiences (CUREs), building curriculum, supporting student professional development and research, and removing financial barriers through funding programs and collaborator support.}, } @article {pmid35854299, year = {2022}, author = {Wang, R and Han, J and Liu, C and Wang, L}, title = {Relationship between medical students' perceived instructor role and their approaches to using online learning technologies in a cloud-based virtual classroom.}, journal = {BMC medical education}, volume = {22}, number = {1}, pages = {560}, pmid = {35854299}, issn = {1472-6920}, mesh = {Cloud Computing ; Cross-Sectional Studies ; *Education, Distance/methods ; Humans ; *Students, Medical ; Universities ; }, abstract = {BACKGROUND: Students can take different approaches to using online learning technologies: deep and surface. It is important to understand the relationship between instructor role and student approaches to using online learning technologies in online learning settings supported by cloud computing techniques.

METHODS: A descriptive, cross-sectional study was conducted to analyze the relationships between medical students' perceptions of instructor role (instructor support, instructor-student interaction, and instructor innovation) and students' approaches to using online learning technologies in cloud-based virtual classrooms. A 25-item online questionnaire along with a sheet with basic demographic was administered to all medical students at Qilu Medical Schools of Shandong University China. Overall, 213 of 4000 medical students (5.34%) at the medical school participated in the survey.

RESULTS: The results showed high levels of medical students' perceived instructor support, instructor-student interaction and instructor innovation. Most students adopted the deep approaches to using online learning technologies. Instructor support, instructor-student interaction and innovation were positively related to students' deep approaches to using online learning technologies. Instructor support was negatively related to students' surface approaches to using online learning technologies.

CONCLUSIONS: The relationship between instructor role (instructor support, instructor-student interaction and instructor innovation) and students' approaches to using online learning technologies highlight the importance of instructor support and innovation in facilitating students' adoption of desirable approaches to learning from the application of technologies.}, } @article {pmid35850085, year = {2022}, author = {Peng, Y and Sengupta, D and Duan, Y and Chen, C and Tian, B}, title = {Accurate mapping of Chinese coastal aquaculture ponds using biophysical parameters based on Sentinel-2 time series images.}, journal = {Marine pollution bulletin}, volume = {181}, number = {}, pages = {113901}, doi = {10.1016/j.marpolbul.2022.113901}, pmid = {35850085}, issn = {1879-3363}, mesh = {Aquaculture/methods ; *Environmental Monitoring ; *Ponds ; Time Factors ; Water ; }, abstract = {Aquaculture plays a crucial role in the global food security and nutrition supply, where China accounts for the largest market share. Although there are some studies that focus on large-scale extraction of coastal aquaculture ponds from satellite images, they have often variable accuracies and encounter misclassification due to the similar geometric characteristics of various vivid water bodies. This paper proposes an efficient and novel method that integrates the spatial characteristics and three biophysical parameters (Chlorophyll-a, Trophic State Index, and Floating Algae Index) to map coastal aquaculture ponds at a national scale. These parameters are derived from bio-optical models based on the Google Earth Engine (GEE) cloud computing platform and time series of high-resolution Sentinel-2 images. Our proposed method effectively addresses the misclassification issue between the aquaculture ponds and rivers, lakes, reservoirs, and salt pans and achieves an overall accuracy of 91 % and a Kappa coefficient of 0.83 in the Chinese coastal zone. Our results indicate that the total area of Chinese coastal aquaculture ponds was 1,039,214 ha in 2019, mainly distributed in the Shandong and Guangdong provinces. The highest aquaculture intensity occurs within the 1 km coastal buffer zone, accounting for 22.4 % of the total area. Furthermore, more than half of the Chinese coastal aquaculture ponds are concentrated in the 0-5 km buffer zone. Our method is of general applicability and thus is suitable for large-scale aquaculture ponds mapping projects. Moreover, the biophysical parameters we employ can be considered as new indicators for the classification of various water bodies even with different aquaculture species.}, } @article {pmid35846728, year = {2022}, author = {Yi, J and Zhang, H and Mao, J and Chen, Y and Zhong, H and Wang, Y}, title = {Review on the COVID-19 pandemic prevention and control system based on AI.}, journal = {Engineering applications of artificial intelligence}, volume = {114}, number = {}, pages = {105184}, pmid = {35846728}, issn = {0952-1976}, abstract = {As a new technology, artificial intelligence (AI) has recently received increasing attention from researchers and has been successfully applied to many domains. Currently, the outbreak of the COVID-19 pandemic has not only put people's lives in jeopardy but has also interrupted social activities and stifled economic growth. Artificial intelligence, as the most cutting-edge science field, is critical in the fight against the pandemic. To respond scientifically to major emergencies like COVID-19, this article reviews the use of artificial intelligence in the combat against the pandemic from COVID-19 large data, intelligent devices and systems, and intelligent robots. This article's primary contributions are in two aspects: (1) we summarized the applications of AI in the pandemic, including virus spreading prediction, patient diagnosis, vaccine development, excluding potential virus carriers, telemedicine service, economic recovery, material distribution, disinfection, and health care. (2) We concluded the faced challenges during the AI-based pandemic prevention process, including multidimensional data, sub-intelligent algorithms, and unsystematic, and discussed corresponding solutions, such as 5G, cloud computing, and unsupervised learning algorithms. This article systematically surveyed the applications and challenges of AI technology during the pandemic, which is of great significance to promote the development of AI technology and can serve as a new reference for future emergencies.}, } @article {pmid35845885, year = {2022}, author = {Yao, Y and Li, S}, title = {Design and Analysis of Intelligent Robot Based on Internet of Things Technology.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7304180}, pmid = {35845885}, issn = {1687-5273}, mesh = {Cloud Computing ; Humans ; Intelligence ; *Internet of Things ; *Robotics ; }, abstract = {This research uses Auto-ID Labs radio frequency identification system to realize the information dissemination from the destination node to the nodes in its neighborhood. The purpose is to forward messages and explore typical applications. Realize the intelligent analysis and management of IoT devices and data. Design a set of edge video CDN system, in the G1 data set A = 9, p = 9, ℤp = 9, lℤp = 8, AES = 5, ES = 9. Distribute some hot content to public wireless hotspots closer to users in advance, A = 9, p = 7, ℤp = 9, lℤp = 9, AES = 9, ES = 8. At present, a large amount of research is mainly to deploy an edge node between the end node of the Internet of Things and the cloud computing center to provide high-quality services. By learning a stable dynamic system from human teaching to ensure the robustness of the controller to spatial disturbances. FPP-SCA plan FPP-SCA = 1.99, FPP-SCA = 1.86, FPP-SCA = 1.03, FPP-SCA = 1.18, FPP-SCA = 1.01, FPP-SCA = 1.46, FPP-SCA = 1.61.The more robots work in an unstructured environment, with different scenarios and tasks, the comparison shows that the FPP-SCA scheme is the optimal model F-S0 = 2.52, F-S5 = 2.38, F-S10 = 2.5, F- S15 = 2.09, F-S20 = 2.54, F-S25 = 2.8, F-S30 = 2.98.}, } @article {pmid35843990, year = {2022}, author = {Kölzsch, A and Davidson, SC and Gauggel, D and Hahn, C and Hirt, J and Kays, R and Lang, I and Lohr, A and Russell, B and Scharf, AK and Schneider, G and Vinciguerra, CM and Wikelski, M and Safi, K}, title = {MoveApps: a serverless no-code analysis platform for animal tracking data.}, journal = {Movement ecology}, volume = {10}, number = {1}, pages = {30}, pmid = {35843990}, issn = {2051-3933}, support = {80NSSC21K1182/NASA/NASA/United States ; }, abstract = {BACKGROUND: Bio-logging and animal tracking datasets continuously grow in volume and complexity, documenting animal behaviour and ecology in unprecedented extent and detail, but greatly increasing the challenge of extracting knowledge from the data obtained. A large variety of analysis methods are being developed, many of which in effect are inaccessible to potential users, because they remain unpublished, depend on proprietary software or require significant coding skills.

RESULTS: We developed MoveApps, an open analysis platform for animal tracking data, to make sophisticated analytical tools accessible to a global community of movement ecologists and wildlife managers. As part of the Movebank ecosystem, MoveApps allows users to design and share workflows composed of analysis modules (Apps) that access and analyse tracking data. Users browse Apps, build workflows, customise parameters, execute analyses and access results through an intuitive web-based interface. Apps, coded in R or other programming languages, have been developed by the MoveApps team and can be contributed by anyone developing analysis code. They become available to all user of the platform. To allow long-term and cross-system reproducibility, Apps have public source code and are compiled and run in Docker containers that form the basis of a serverless cloud computing system. To support reproducible science and help contributors document and benefit from their efforts, workflows of Apps can be shared, published and archived with DOIs in the Movebank Data Repository. The platform was beta launched in spring 2021 and currently contains 49 Apps that are used by 316 registered users. We illustrate its use through two workflows that (1) provide a daily report on active tag deployments and (2) segment and map migratory movements.

CONCLUSIONS: The MoveApps platform is meant to empower the community to supply, exchange and use analysis code in an intuitive environment that allows fast and traceable results and feedback. By bringing together analytical experts developing movement analysis methods and code with those in need of tools to explore, answer questions and inform decisions based on data they collect, we intend to increase the pace of knowledge generation and integration to match the huge growth rate in bio-logging data acquisition.}, } @article {pmid35829789, year = {2022}, author = {Mozaffaree Pour, N and Karasov, O and Burdun, I and Oja, T}, title = {Simulation of land use/land cover changes and urban expansion in Estonia by a hybrid ANN-CA-MCA model and utilizing spectral-textural indices.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {8}, pages = {584}, pmid = {35829789}, issn = {1573-2959}, support = {PRG352//Eesti Teadusagentuur/ ; PEATSPEC//Academy of Finland/ ; decision no 341963//Academy of Finland/ ; }, mesh = {Agriculture ; *Conservation of Natural Resources ; *Environmental Monitoring ; Estonia ; Wetlands ; }, abstract = {Over the recent two decades, land use/land cover (LULC) drastically changed in Estonia. Even though the population decreased by 11%, noticeable agricultural and forest land areas were turned into urban land. In this work, we analyzed those LULC changes by mapping the spatial characteristics of LULC and urban expansion in the years 2000-2019 in Estonia. Moreover, using the revealed spatiotemporal transitions of LULC, we simulated LULC and urban expansion for 2030. Landsat 5 and 8 data were used to estimate 147 spectral-textural indices in the Google Earth Engine cloud computing platform. After that, 19 selected indices were used to model LULC changes by applying the hybrid artificial neural network, cellular automata, and Markov chain analysis (ANN-CA-MCA). While determining spectral-textural indices is quite common for LULC classifications, utilization of these continues indices in LULC change detection and examining these indices at the landscape scale is still in infancy. This country-wide modeling approach provided the first comprehensive projection of future LULC utilizing spectral-textural indices. In this work, we utilized the hybrid ANN-CA-MCA model for predicting LULC in Estonia for 2030; we revealed that the predicted changes in LULC from 2019 to 2030 were similar to the observed changes from 2011 to 2019. The predicted change in the area of artificial surfaces was an increased rate of 1.33% to reach 787.04 km[2] in total by 2030. Between 2019 and 2030, the other significant changes were the decrease of 34.57 km[2] of forest lands and the increase of agricultural lands by 14.90 km[2] and wetlands by 9.31 km[2]. These findings can develop a proper course of action for long-term spatial planning in Estonia. Therefore, a key policy priority should be to plan for the stable care of forest lands to maintain biodiversity.}, } @article {pmid35816521, year = {2023}, author = {Singh, P and Gaba, GS and Kaur, A and Hedabou, M and Gurtov, A}, title = {Dew-Cloud-Based Hierarchical Federated Learning for Intrusion Detection in IoMT.}, journal = {IEEE journal of biomedical and health informatics}, volume = {27}, number = {2}, pages = {722-731}, doi = {10.1109/JBHI.2022.3186250}, pmid = {35816521}, issn = {2168-2208}, mesh = {Humans ; Cloud Computing ; *COVID-19 ; *Internet of Things ; Internet ; Algorithms ; }, abstract = {The coronavirus pandemic has overburdened medical institutions, forcing physicians to diagnose and treat their patients remotely. Moreover, COVID-19 has made humans more conscious about their health, resulting in the extensive purchase of IoT-enabled medical devices. The rapid boom in the market worth of the internet of medical things (IoMT) captured cyber attackers' attention. Like health, medical data is also sensitive and worth a lot on the dark web. Despite the fact that the patient's health details have not been protected appropriately, letting the trespassers exploit them. The system administrator is unable to fortify security measures due to the limited storage capacity and computation power of the resource-constrained network devices'. Although various supervised and unsupervised machine learning algorithms have been developed to identify anomalies, the primary undertaking is to explore the swift progressing malicious attacks before they deteriorate the wellness system's integrity. In this paper, a Dew-Cloud based model is designed to enable hierarchical federated learning (HFL). The proposed Dew-Cloud model provides a higher level of data privacy with greater availability of IoMT critical application(s). The hierarchical long-term memory (HLSTM) model is deployed at distributed Dew servers with a backend supported by cloud computing. Data pre-processing feature helps the proposed model achieve high training accuracy (99.31%) with minimum training loss (0.034). The experiment results demonstrate that the proposed HFL-HLSTM model is superior to existing schemes in terms of performance metrics such as accuracy, precision, recall, and f-score.}, } @article {pmid35808479, year = {2022}, author = {Romeo, L and Marani, R and Perri, AG and D'Orazio, T}, title = {Microsoft Azure Kinect Calibration for Three-Dimensional Dense Point Clouds and Reliable Skeletons.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808479}, issn = {1424-8220}, support = {CDS000750//Italian Ministry of Economic Development LAMPO "Leonardo Automated Manufacturing Processes for cOmposites"/ ; }, mesh = {Calibration ; *Gestures ; Humans ; *Skeleton ; }, abstract = {Nowadays, the need for reliable and low-cost multi-camera systems is increasing for many potential applications, such as localization and mapping, human activity recognition, hand and gesture analysis, and object detection and localization. However, a precise camera calibration approach is mandatory for enabling further applications that require high precision. This paper analyzes the available two-camera calibration approaches to propose a guideline for calibrating multiple Azure Kinect RGB-D sensors to achieve the best alignment of point clouds in both color and infrared resolutions, and skeletal joints returned by the Microsoft Azure Body Tracking library. Different calibration methodologies using 2D and 3D approaches, all exploiting the functionalities within the Azure Kinect devices, are presented. Experiments demonstrate that the best results are returned by applying 3D calibration procedures, which give an average distance between all couples of corresponding points of point clouds in color or an infrared resolution of 21.426 mm and 9.872 mm for a static experiment and of 20.868 mm and 7.429 mm while framing a dynamic scene. At the same time, the best results in body joint alignment are achieved by three-dimensional procedures on images captured by the infrared sensors, resulting in an average error of 35.410 mm.}, } @article {pmid35808459, year = {2022}, author = {Khan, A and Umar, AI and Shirazi, SH and Ishaq, W and Shah, M and Assam, M and Mohamed, A}, title = {QoS-Aware Cost Minimization Strategy for AMI Applications in Smart Grid Using Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808459}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Simulation ; *Computer Systems ; Models, Theoretical ; Software ; }, abstract = {Cloud computing coupled with Internet of Things technology provides a wide range of cloud services such as memory, storage, computational processing, network bandwidth, and database application to the end users on demand over the Internet. More specifically, cloud computing provides efficient services such as "pay as per usage". However, Utility providers in Smart Grid are facing challenges in the design and implementation of such architecture in order to minimize the cost of underlying hardware, software, and network services. In Smart Grid, smart meters generate a large volume of different traffics, due to which efficient utilization of available resources such as buffer, storage, limited processing, and bandwidth is required in a cost-effective manner in the underlying network infrastructure. In such context, this article introduces a QoS-aware Hybrid Queue Scheduling (HQS) model that can be seen over the IoT-based network integrated with cloud environment for different advanced metering infrastructure (AMI) application traffic, which have different QoS levels in the Smart Grid network. The proposed optimization model supports, classifies, and prioritizes the AMI application traffic. The main objective is to reduce the cost of buffer, processing power, and network bandwidth utilized by AMI applications in the cloud environment. For this, we developed a simulation model in the CloudSim simulator that uses a simple mathematical model in order to achieve the objective function. During the simulations, the effects of various numbers of cloudlets on the cost of virtual machine resources such as RAM, CPU processing, and available bandwidth have been investigated in cloud computing. The obtained simulation results exhibited that our proposed model successfully competes with the previous schemes in terms of minimizing the processing, memory, and bandwidth cost by a significant margin. Moreover, the simulation results confirmed that the proposed optimization model behaves as expected and is realistic for AMI application traffic in the Smart Grid network using cloud computing.}, } @article {pmid35808452, year = {2022}, author = {Shen, X and Chang, Z and Niu, S}, title = {Mobile Edge Computing Task Offloading Strategy Based on Parking Cooperation in the Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808452}, issn = {1424-8220}, support = {61961010//National Natural Science Foundation of China/ ; No.AA19046004//Guangxi Science and technology major special projects/ ; YCSW2022314//Innovation Project of Guangxi Graduate Education/ ; }, mesh = {*Cloud Computing ; Computer Simulation ; *Internet ; }, abstract = {Due to the limited computing capacity of onboard devices, they can no longer meet a large number of computing requirements. Therefore, mobile edge computing (MEC) provides more computing and storage capabilities for vehicles. Inspired by a large number of roadside parking vehicles, this paper takes the roadside parking vehicles with idle computing resources as the task offloading platform and proposes a mobile edge computing task offloading strategy based on roadside parking cooperation. The resource sharing and mutual utilization among roadside vehicles, roadside units (RSU), and cloud servers (cloud servers) were established, and the collaborative offloading problem of computing tasks was transformed into a constraint problem. The hybrid genetic algorithm (HHGA) with a mountain-climbing operator was used to solve the multi-constraint problem, to reduce the delay and energy consumption of computing tasks. The simulation results show that when the number of tasks is 25, the delay and energy consumption of the HHGA algorithm is improved by 24.1% and 11.9%, respectively, compared with Tradition. When the task size is 1.0 MB, the HHGA algorithm reduces the system overhead by 7.9% compared with Tradition. Therefore, the proposed scheme can effectively reduce the total system cost during task offloading.}, } @article {pmid35808373, year = {2022}, author = {Loukatos, D and Lygkoura, KA and Maraveas, C and Arvanitis, KG}, title = {Enriching IoT Modules with Edge AI Functionality to Detect Water Misuse Events in a Decentralized Manner.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808373}, issn = {1424-8220}, mesh = {Agriculture ; *Artificial Intelligence ; Humans ; *Internet of Things ; Machine Learning ; Water ; }, abstract = {The digital transformation of agriculture is a promising necessity for tackling the increasing nutritional needs of the population on Earth and the degradation of natural resources. Focusing on the "hot" area of natural resource preservation, the recent appearance of more efficient and cheaper microcontrollers, the advances in low-power and long-range radios, and the availability of accompanying software tools are exploited in order to monitor water consumption and to detect and report misuse events, with reduced power and network bandwidth requirements. Quite often, large quantities of water are wasted for a variety of reasons; from broken irrigation pipes to people's negligence. To tackle this problem, the necessary design and implementation details are highlighted for an experimental water usage reporting system that exhibits Edge Artificial Intelligence (Edge AI) functionality. By combining modern technologies, such as Internet of Things (IoT), Edge Computing (EC) and Machine Learning (ML), the deployment of a compact automated detection mechanism can be easier than before, while the information that has to travel from the edges of the network to the cloud and thus the corresponding energy footprint are drastically reduced. In parallel, characteristic implementation challenges are discussed, and a first set of corresponding evaluation results is presented.}, } @article {pmid35808368, year = {2022}, author = {Sefati, SS and Halunga, S}, title = {A Hybrid Service Selection and Composition for Cloud Computing Using the Adaptive Penalty Function in Genetic and Artificial Bee Colony Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808368}, issn = {1424-8220}, support = {861219//Mobility and Training foR beyond 5G ecosystems (MOTOR5G)'. The project has received funding from the European Union's Horizon 2020 programme under the Marie Skłodowska Curie Actions (MSCA)/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Reproducibility of Results ; }, abstract = {The rapid development of Cloud Computing (CC) has led to the release of many services in the cloud environment. Service composition awareness of Quality of Service (QoS) is a significant challenge in CC. A single service in the cloud environment cannot respond to the complex requests and diverse requirements of the real world. In some cases, one service cannot fulfill the user's needs, so it is necessary to combine different services to meet these requirements. Many available services provide an enormous QoS and selecting or composing those combined services is called an Np-hard optimization problem. One of the significant challenges in CC is integrating existing services to meet the intricate necessities of different types of users. Due to NP-hard complexity of service composition, many metaheuristic algorithms have been used so far. This article presents the Artificial Bee Colony and Genetic Algorithm (ABCGA) as a metaheuristic algorithm to achieve the desired goals. If the fitness function of the services selected by the Genetic Algorithm (GA) is suitable, a set of services is further introduced for the Artificial Bee Colony (ABC) algorithm to choose the appropriate service from, according to each user's needs. The proposed solution is evaluated through experiments using Cloud SIM simulation, and the numerical results prove the efficiency of the proposed method with respect to reliability, availability, and cost.}, } @article {pmid35808345, year = {2022}, author = {Shahzad, A and Gherbi, A and Zhang, K}, title = {Enabling Fog-Blockchain Computing for Autonomous-Vehicle-Parking System: A Solution to Reinforce IoT-Cloud Platform for Future Smart Parking.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808345}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Humans ; Privacy ; }, abstract = {With the advent of modern technologies, including the IoT and blockchain, smart-parking (SP) systems are becoming smarter and smarter. Similar to other automated systems, and particularly those that require automation or minimal interaction with humans, the SP system is heuristic in delivering performances, such as throughput in terms of latency, efficiency, privacy, and security, and it is considered a long-term cost-effective solution. This study looks ahead to future trends and developments in SP systems and presents an inclusive, long-term, effective, and well-performing smart autonomous vehicle parking (SAVP) system that explores and employs the emerging fog-computing and blockchain technologies as robust solutions to strengthen the existing collaborative IoT-cloud platform to build and manage SP systems for autonomous vehicles (AVs). In other words, the proposed SAVP system offers a smart-parking solution, both indoors and outdoors, and mainly for AVs looking for vacant parking, wherein the fog nodes act as a middleware layer that provides various parking operations closer to IoT-enabled edge devices. To address the challenges of privacy and security, a lightweight integrated blockchain and cryptography (LIBC) module is deployed, which is functional at each fog node, to authorize and grant access to the AVs in every phase of parking (e.g., from the parking entrance to the parking slot to the parking exit). A proof-of-concept implementation was conducted, wherein the overall computed results, such as the average response time, efficiency, privacy, and security, were examined as highly efficient to enable a proven SAVP system. This study also examined an innovative pace, with careful considerations to combatting the existing SP-system challenges and, therefore, to building and managing future scalable SP systems.}, } @article {pmid35808322, year = {2022}, author = {Katayama, Y and Tachibana, T}, title = {Optimal Task Allocation Algorithm Based on Queueing Theory for Future Internet Application in Mobile Edge Computing Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808322}, issn = {1424-8220}, support = {Platform Technology of Wired/Wireless Access Network Corresponding to Various Services of 5G/Beyond 5G//National Institute of Information and Communications Technology/ ; }, mesh = {*Algorithms ; *Cloud Computing ; *Computer Heuristics ; Forecasting ; Internet/trends ; }, abstract = {For 5G and future Internet, in this paper, we propose a task allocation method for future Internet application to reduce the total latency in a mobile edge computing (MEC) platform with three types of servers: a dedicated MEC server, a shared MEC server, and a cloud server. For this platform, we first calculate the delay between sending a task and receiving a response for the dedicated MEC server, shared MEC server, and cloud server by considering the processing time and transmission delay. Here, the transmission delay for the shared MEC server is derived using queueing theory. Then, we formulate an optimization problem for task allocation to minimize the total latency for all tasks. By solving this optimization problem, tasks can be allocated to the MEC servers and cloud server appropriately. In addition, we propose a heuristic algorithm to obtain the approximate optimal solution in a shorter time. This heuristic algorithm consists of four algorithms: a main algorithm and three additional algorithms. In this algorithm, tasks are divided into two groups, and task allocation is executed for each group. We compare the performance of our proposed heuristic algorithm with the solution obtained by three other methods and investigate the effectiveness of our algorithm. Numerical examples are used to demonstrate the effectiveness of our proposed heuristic algorithm. From some results, we observe that our proposed heuristic algorithm can perform task allocation in a short time and can effectively reduce the total latency in a short time. We conclude that our proposed heuristic algorithm is effective for task allocation in a MEC platform with multiple types of MEC servers.}, } @article {pmid35808234, year = {2022}, author = {Chen, X and Liu, G}, title = {Federated Deep Reinforcement Learning-Based Task Offloading and Resource Allocation for Smart Cities in a Mobile Edge Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808234}, issn = {1424-8220}, support = {NO.2018ZDCXL-GY-04-03-02//Shaanxi Key R\&D Program/ ; }, abstract = {Mobile edge computing (MEC) has become an indispensable part of the era of the intelligent manufacturing industry 4.0. In the smart city, computation-intensive tasks can be offloaded to the MEC server or the central cloud server for execution. However, the privacy disclosure issue may arise when the raw data is migrated to other MEC servers or the central cloud server. Since federated learning has the characteristics of protecting the privacy and improving training performance, it is introduced to solve the issue. In this article, we formulate the joint optimization problem of task offloading and resource allocation to minimize the energy consumption of all Internet of Things (IoT) devices subject to delay threshold and limited resources. A two-timescale federated deep reinforcement learning algorithm based on Deep Deterministic Policy Gradient (DDPG) framework (FL-DDPG) is proposed. Simulation results show that the proposed algorithm can greatly reduce the energy consumption of all IoT devices.}, } @article {pmid35808224, year = {2022}, author = {Khanh, TT and Hai, TH and Hossain, MD and Huh, EN}, title = {Fuzzy-Assisted Mobile Edge Orchestrator and SARSA Learning for Flexible Offloading in Heterogeneous IoT Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808224}, issn = {1424-8220}, mesh = {Algorithms ; Fuzzy Logic ; *Internet of Things ; Learning ; Reward ; }, abstract = {In the era of heterogeneous 5G networks, Internet of Things (IoT) devices have significantly altered our daily life by providing innovative applications and services. However, these devices process large amounts of data traffic and their application requires an extremely fast response time and a massive amount of computational resources, leading to a high failure rate for task offloading and considerable latency due to congestion. To improve the quality of services (QoS) and performance due to the dynamic flow of requests from devices, numerous task offloading strategies in the area of multi-access edge computing (MEC) have been proposed in previous studies. Nevertheless, the neighboring edge servers, where computational resources are in excess, have not been considered, leading to unbalanced loads among edge servers in the same network tier. Therefore, in this paper, we propose a collaboration algorithm between a fuzzy-logic-based mobile edge orchestrator (MEO) and state-action-reward-state-action (SARSA) reinforcement learning, which we call the Fu-SARSA algorithm. We aim to minimize the failure rate and service time of tasks and decide on the optimal resource allocation for offloading, such as a local edge server, cloud server, or the best neighboring edge server in the MEC network. Four typical application types, healthcare, AR, infotainment, and compute-intensive applications, were used for the simulation. The performance results demonstrate that our proposed Fu-SARSA framework outperformed other algorithms in terms of service time and the task failure rate, especially when the system was overloaded.}, } @article {pmid35808184, year = {2022}, author = {Aldhyani, THH and Alkahtani, H}, title = {Artificial Intelligence Algorithm-Based Economic Denial of Sustainability Attack Detection Systems: Cloud Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {13}, pages = {}, pmid = {35808184}, issn = {1424-8220}, support = {NA000106//'This work was supported through the Annual Funding track by the Deanship of Scientific Re-search, Vice Presidency for Graduate Studies and Scientific Research, King Faisal University, Sau-di Arabia [NA000106]/ ; }, mesh = {Algorithms ; *Artificial Intelligence ; *Cloud Computing ; Neural Networks, Computer ; Support Vector Machine ; }, abstract = {Cloud computing is currently the most cost-effective means of providing commercial and consumer IT services online. However, it is prone to new flaws. An economic denial of sustainability attack (EDoS) specifically leverages the pay-per-use paradigm in building up resource demands over time, culminating in unanticipated usage charges to the cloud customer. We present an effective approach to mitigating EDoS attacks in cloud computing. To mitigate such distributed attacks, methods for detecting them on different cloud computing smart grids have been suggested. These include hard-threshold, machine, and deep learning, support vector machine (SVM), K-nearest neighbors (KNN), random forest (RF) tree algorithms, namely convolutional neural network (CNN), and long short-term memory (LSTM). These algorithms have greater accuracies and lower false alarm rates and are essential for improving the cloud computing service provider security system. The dataset of nine injection attacks for testing machine and deep learning algorithms was obtained from the Cyber Range Lab at the University of New South Wales (UNSW), Canberra. The experiments were conducted in two categories: binary classification, which included normal and attack datasets, and multi-classification, which included nine classes of attack data. The results of the proposed algorithms showed that the RF approach achieved accuracy of 98% with binary classification, whereas the SVM model achieved accuracy of 97.54% with multi-classification. Moreover, statistical analyses, such as mean square error (MSE), Pearson correlation coefficient (R), and the root mean square error (RMSE), were applied in evaluating the prediction errors between the input data and the prediction values from different machine and deep learning algorithms. The RF tree algorithm achieved a very low prediction level (MSE = 0.01465) and a correlation R[2] (R squared) level of 92.02% with the binary classification dataset, whereas the algorithm attained an R[2] level of 89.35% with a multi-classification dataset. The findings of the proposed system were compared with different existing EDoS attack detection systems. The proposed attack mitigation algorithms, which were developed based on artificial intelligence, outperformed the few existing systems. The goal of this research is to enable the detection and effective mitigation of EDoS attacks.}, } @article {pmid35801559, year = {2022}, author = {Mueen, A and Awedh, M and Zafar, B}, title = {Multi-obstacle aware smart navigation system for visually impaired people in fog connected IoT-cloud environment.}, journal = {Health informatics journal}, volume = {28}, number = {3}, pages = {14604582221112609}, doi = {10.1177/14604582221112609}, pmid = {35801559}, issn = {1741-2811}, mesh = {Algorithms ; Humans ; *Visually Impaired Persons ; }, abstract = {Design of smart navigation for visually impaired/blind people is a hindering task. Existing researchers analyzed it in either indoor or outdoor environment and also it's failed to focus on optimum route selection, latency minimization and multi-obstacle presence. In order to overcome these challenges and to provide precise assistance to visually impaired people, this paper proposes smart navigation system for visually impaired people based on both image and sensor outputs of the smart wearable. The proposed approach involves the upcoming processes: (i) the input query of the visually impaired people (users) is improved by the query processor in order to achieve accurate assistance. (ii) The safest route from source to destination is provided by implementing Environment aware Bald Eagle Search Optimization algorithm in which multiple routes are identified and classified into three different classes from which the safest route is suggested to the users. (iii) The concept of fog computing is leveraged and the optimal fog node is selected in order to minimize the latency. The fog node selection is executed by using Nearest Grey Absolute Decision Making Algorithm based on multiple parameters. (iv) The retrieval of relevant information is performed by means of computing Euclidean distance between the reference and database information. (v) The multi-obstacle detection is carried out by YOLOv3 Tiny in which both the static and dynamic obstacles are classified into small, medium and large obstacles. (vi) The decision upon navigation is provided by implementing Adaptive Asynchronous Advantage Actor-Critic (A3C) algorithm based on fusion of both image and sensor outputs. (vii) Management of heterogeneous is carried out by predicting and pruning the fault data in the sensor output by minimum distance based extended kalman filter for better accuracy and clustering the similar information by implementing Spatial-Temporal Optics Clustering Algorithm to reduce complexity. The proposed model is implemented in NS 3.26 and the results proved that it outperforms other existing works in terms of obstacle detection and task completion time.}, } @article {pmid35800683, year = {2022}, author = {Chen, T}, title = {Deep Learning-Based Optimization Algorithm for Enterprise Personnel Identity Authentication.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9662817}, pmid = {35800683}, issn = {1687-5273}, mesh = {Algorithms ; *Deep Learning ; Humans ; }, abstract = {Enterprise strategic management is not only an important part of enterprise work, but also an important factor to deepen the reform of management system and promote the centralized and unified management of enterprises. Enterprise strategic management is to study the major problems of survival and development of enterprises in the competitive environment from the overall and long-term point of view. It is the most important function of senior leaders of modern enterprises. Starting from the characteristics of the recognition object, this paper analyzes the individual differences of biometrics through intelligent face image recognition technology to identify biometrics, which can be used to identify different individuals. This paper studies the main problems of personnel identity authentication in the current enterprise strategic management system. Based on identity management and supported by face image recognition technology, deep learning, and cloud computing technology, the personnel management model of the management system is constructed, which solves the problems of personnel real identity authentication and personnel safety behavior control. Experiments show that the model can simplify the workflow, improve the operation efficiency, and reduce the management cost. From the perspective of enterprise system development, building a scientific enterprise strategic management system is of great significance to improve the scientific level of enterprise system management.}, } @article {pmid38603290, year = {2022}, author = {Dadash Pour, P and Nazzal, MA and Darras, BM}, title = {The role of industry 4.0 technologies in overcoming pandemic challenges for the manufacturing sector.}, journal = {Concurrent engineering, research, and applications}, volume = {30}, number = {2}, pages = {190-205}, pmid = {38603290}, issn = {1063-293X}, abstract = {Industry 4.0 aims to revolutionize the manufacturing sector to achieve sustainable and efficient production. The novel coronavirus pandemic has brought many challenges in different industries globally. Shortage in supply of raw material, changes in product demand, and factories closures due to general lockdown are all examples of such challenges. The adaption of Industry 4.0 technologies can address these challenges and prevent their recurrence in case of another pandemic outbreak in future. A prominent advantage of Industry 4.0 technologies is their capability of building resilient and flexible systems that are responsive to exceptional circumstances such as unpredictable market demand, supply chain interruptions, and manpower shortage which can be crucial at times of pandemics. This work focuses on discussing how different Industry 4.0 technologies such as Cyber Physical Systems, Additive Manufacturing, and Internet of Things can help the manufacturing sector overcome pandemics challenges. The role of Industry 4.0 technologies in raw material provenance identification and counterfeit prevention, collaboration and business continuity, agility and decentralization of manufacturing, crisis simulation, elimination of single point of failure risk, and other factors is discussed. Moreover, a self-assessment readiness model has been developed to help manufacturing firms determine their readiness level for implementing different Industry 4.0 technologies.}, } @article {pmid37378273, year = {2023}, author = {Guo, G and Sun, Y and Qian, G and Wang, Q}, title = {LIC criterion for optimal subset selection in distributed interval estimation.}, journal = {Journal of applied statistics}, volume = {50}, number = {9}, pages = {1900-1920}, pmid = {37378273}, issn = {0266-4763}, abstract = {Distributed interval estimation in linear regression may be computationally infeasible in the presence of big data that are normally stored in different computer servers or in cloud. The existing challenge represents the results from the distributed estimation may still contain redundant information about the population characteristics of the data. To tackle this computing challenge, we develop an optimization procedure to select the best subset from the collection of data subsets, based on which we perform interval estimation in the context of linear regression. The procedure is derived based on minimizing the length of the final interval estimator and maximizing the information remained in the selected data subset, thus is named as the LIC criterion. Theoretical performance of the LIC criterion is studied in this paper together with a simulation study and real data analysis.}, } @article {pmid38013884, year = {2022}, author = {Rogage, K and Mahamedi, E and Brilakis, I and Kassem, M}, title = {Beyond digital shadows: Digital Twin used for monitoring earthwork operation in large infrastructure projects.}, journal = {AI in civil engineering}, volume = {1}, number = {1}, pages = {7}, pmid = {38013884}, issn = {2730-5392}, abstract = {Current research on Digital Twin (DT) is largely focused on the performance of built assets in their operational phases as well as on urban environment. However, Digital Twin has not been given enough attention to construction phases, for which this paper proposes a Digital Twin framework for the construction phase, develops a DT prototype and tests it for the use case of measuring the productivity and monitoring of earthwork operation. The DT framework and its prototype are underpinned by the principles of versatility, scalability, usability and automation to enable the DT to fulfil the requirements of large-sized earthwork projects and the dynamic nature of their operation. Cloud computing and dashboard visualisation were deployed to enable automated and repeatable data pipelines and data analytics at scale and to provide insights in near-real time. The testing of the DT prototype in a motorway project in the Northeast of England successfully demonstrated its ability to produce key insights by using the following approaches: (i) To predict equipment utilisation ratios and productivities; (ii) To detect the percentage of time spent on different tasks (i.e., loading, hauling, dumping, returning or idling), the distance travelled by equipment over time and the speed distribution; and (iii) To visualise certain earthwork operations.}, } @article {pmid38620743, year = {2021}, author = {Poonia, A and Ghosh, S and Ghosh, A and Nath, SB and Ghosh, SK and Buyya, R}, title = {CONFRONT: Cloud-fog-dew based monitoring framework for COVID-19 management.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {16}, number = {}, pages = {100459}, pmid = {38620743}, issn = {2542-6605}, abstract = {In the recent times, the IoT (Internet of Things) enabled devices and applications have seen a rapid growth in various sectors including healthcare. The ability of low-cost connected sensors to cover large areas makes it a potential tool in the fight against pandemics, like COVID-19. The COVID-19 has posed a formidable challenge for the developing countries, like India, which need to cater to large population base with limited health infrastructure. In this paper, we proposed a Cloud-fog-dew based mOnitoriNg Framework foR cOvid-19 maNagemenT, called CONFRONT. This cloud-fog-dew based healthcare model may help in preliminary diagnosis and also in monitoring patients while they are in quarantine facilities or home based treatments. The fog architecture ensures that the model is suited for real-time scenarios while keeping the bandwidth requirements low. To analyse large scale COVID-19 statistics data for extracting aggregate information of the disease spread, the cloud servers are leveraged due to its scalable computational and storage capabilities. The dew architecture ensures that the application is available at a limited scale even when cloud connectivity is lost, leading to a faster uptime for the application. A low cost wearable device consisting of heterogeneous sensors has also been designed and fabricated to realize the proposed framework.}, } @article {pmid36654109, year = {2021}, author = {Xu, X and Sun, J and Endo, S and Li, Y and Benjamin, SC and Yuan, X}, title = {Variational algorithms for linear algebra.}, journal = {Science bulletin}, volume = {66}, number = {21}, pages = {2181-2188}, doi = {10.1016/j.scib.2021.06.023}, pmid = {36654109}, issn = {2095-9281}, abstract = {Quantum algorithms have been developed for efficiently solving linear algebra tasks. However, they generally require deep circuits and hence universal fault-tolerant quantum computers. In this work, we propose variational algorithms for linear algebra tasks that are compatible with noisy intermediate-scale quantum devices. We show that the solutions of linear systems of equations and matrix-vector multiplications can be translated as the ground states of the constructed Hamiltonians. Based on the variational quantum algorithms, we introduce Hamiltonian morphing together with an adaptive ansätz for efficiently finding the ground state, and show the solution verification. Our algorithms are especially suitable for linear algebra problems with sparse matrices, and have wide applications in machine learning and optimisation problems. The algorithm for matrix multiplications can be also used for Hamiltonian simulation and open system simulation. We evaluate the cost and effectiveness of our algorithm through numerical simulations for solving linear systems of equations. We implement the algorithm on the IBM quantum cloud device with a high solution fidelity of 99.95%.}, } @article {pmid36618951, year = {2021}, author = {Moosavi, J and Bakhshi, J and Martek, I}, title = {The application of industry 4.0 technologies in pandemic management: Literature review and case study.}, journal = {Healthcare analytics (New York, N.Y.)}, volume = {1}, number = {}, pages = {100008}, pmid = {36618951}, issn = {2772-4425}, abstract = {The Covid-19 pandemic impact on people's lives has been devastating. Around the world, people have been forced to stay home, resorting to the use of digital technologies in an effort to continue their life and work as best they can. Covid-19 has thus accelerated society's digital transformation towards Industry 4.0 (the fourth industrial revolution). Using scientometric analysis, this study presents a systematic literature review of the themes within Industry 4.0. Thematic analysis reveals that the Internet of Things (IoT), Artificial Intelligence (AI), Cloud computing, Machine learning, Security, Big Data, Blockchain, Deep learning, Digitalization, and Cyber-physical system (CPS) to be the key technologies associated with Industry 4.0. Subsequently, a case study using Industry 4.0 technologies to manage the Covid-19 pandemic is discussed. In conclusion, Covid-19,is clearly shown to be an accelerant in the progression towards Industry 4.0. Moreover, the technologies of this digital transformation can be expected to be invoked in the management of future pandemics.}, } @article {pmid36700091, year = {2021}, author = {Blum, BC and Emili, A}, title = {Omics Notebook: robust, reproducible and flexible automated multiomics exploratory analysis and reporting.}, journal = {Bioinformatics advances}, volume = {1}, number = {1}, pages = {vbab024}, pmid = {36700091}, issn = {2635-0041}, abstract = {SUMMARY: Mass spectrometry is an increasingly important tool for the global interrogation of diverse biomolecules. Unfortunately, the complexity of downstream data analysis is a major challenge for the routine use of these data by investigators from broader training backgrounds. Omics Notebook is an open-source framework for exploratory analysis, reporting and integrating multiomic data that are automated, reproducible and customizable. Built-in functions allow the processing of proteomic data from MaxQuant and metabolomic data from XCMS, along with other omics data in standardized input formats as specified in the documentation. In addition, the use of containerization manages R package installation requirements and is tailored for shared high-performance computing or cloud environments.

Omics Notebook is implemented in Python and R and is available for download from https://github.com/cnsb-boston/Omics_Notebook with additional documentation under a GNU GPLv3 license.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics Advances online.}, } @article {pmid37645133, year = {2021}, author = {Brandolini, F and Domingo-Ribas, G and Zerboni, A and Turner, S}, title = {A Google Earth Engine-enabled Python approach for the identification of anthropogenic palaeo-landscape features.}, journal = {Open research Europe}, volume = {1}, number = {}, pages = {22}, pmid = {37645133}, issn = {2732-5121}, abstract = {The necessity of sustainable development for landscapes has emerged as an important theme in recent decades. Current methods take a holistic approach to landscape heritage and promote an interdisciplinary dialogue to facilitate complementary landscape management strategies. With the socio-economic values of the "natural" and "cultural" landscape heritage increasingly recognised worldwide, remote sensing tools are being used more and more to facilitate the recording and management of landscape heritage. The advent of freeware cloud computing services has enabled significant improvements in landscape research allowing the rapid exploration and processing of satellite imagery such as the Landsat and Copernicus Sentinel datasets. This research represents one of the first applications of the Google Earth Engine (GEE) Python application programming interface (API) in studies of historic landscapes. The complete free and open-source software (FOSS) cloud protocol proposed here consists of a Python code script developed in Google Colab, which could be adapted and replicated in different areas of the world. A multi-temporal approach has been adopted to investigate the potential of Sentinel-2 satellite imagery to detect buried hydrological and anthropogenic features along with spectral index and spectral decomposition analysis. The protocol's effectiveness in identifying palaeo-riverscape features has been tested in the Po Plain (N Italy).}, } @article {pmid36713097, year = {2021}, author = {Roffi, M and Casadei, B and Gouillard, C and Nambatingué, N and Daval, G and Bardinet, I and Priori, SG}, title = {Digital transformation of major scientific meetings induced by the COVID-19 pandemic: insights from the ESC 2020 annual congress.}, journal = {European heart journal. Digital health}, volume = {2}, number = {4}, pages = {704-712}, doi = {10.1093/ehjdh/ztab076}, pmid = {36713097}, issn = {2634-3916}, support = {CH/12/3/29609/BHF_/British Heart Foundation/United Kingdom ; }, abstract = {As a consequence of the COVID-19 pandemic, the European Society of Cardiology (ESC) was forced to pivot the scientific programme of the ESC Congress 2021 into a totally new format for online consumption, The Digital Experience. A variety of new suppliers were involved, including experts in TV studio, cloud infrastructure, online platforms, video management, and online analytics. An information technology platform able to support hundreds of thousands simultaneous connections was built and cloud computing technologies were put in place to help scale up and down the resources needed for the high number of users at peak times. The video management system was characterized by multiple layers of security and redundancy and offered the same fluidity, albeit at a different resolution, to all user independently of the performance of their internet connection. The event, free for all users, was an undisputed success, both from a scientific/educational as well as from a digital technology perspective. The number of registrations increased by almost four-fold when compared with the 2019 record-breaking edition in Paris, with a greater proportion of younger and female participants as well as of participants from low- and middle-income countries. No major technical failures were encountered. For the first time in history, attendees from all around the globe had the same real-time access to the world's most popular cardiovascular conference.}, } @article {pmid36567694, year = {2021}, author = {Singh, M and Singh, BB and Singh, R and Upendra, B and Kaur, R and Gill, SS and Biswas, MS}, title = {Quantifying COVID-19 enforced global changes in atmospheric pollutants using cloud computing based remote sensing.}, journal = {Remote sensing applications : society and environment}, volume = {22}, number = {}, pages = {100489}, pmid = {36567694}, issn = {2352-9385}, abstract = {Global lockdowns in response to the COVID-19 pandemic have led to changes in the anthropogenic activities resulting in perceivable air quality improvements. Although several recent studies have analyzed these changes over different regions of the globe, these analyses have been constrained due to the usage of station based data which is mostly limited up to the metropolitan cities. Also the quantifiable changes have been reported only for the developed and developing regions leaving the poor economies (e.g. Africa) due to the shortage of in-situ data. Using a comprehensive set of high spatiotemporal resolution satellites and merged products of air pollutants, we analyze the air quality across the globe and quantify the improvement resulting from the suppressed anthropogenic activity during the lockdowns. In particular, we focus on megacities, capitals and cities with high standards of living to make the quantitative assessment. Our results offer valuable insights into the spatial distribution of changes in the air pollutants due to COVID-19 enforced lockdowns. Statistically significant reductions are observed over megacities with mean reduction by 19.74%, 7.38% and 49.9% in nitrogen dioxide (NO2), aerosol optical depth (AOD) and PM2.5 concentrations. Google Earth Engine empowered cloud computing based remote sensing is used and the results provide a testbed for climate sensitivity experiments and validation of chemistry-climate models. Additionally, Google Earth Engine based apps have been developed to visualize the changes in a real-time fashion.}, } @article {pmid38620326, year = {2021}, author = {Gupta, D and Bhatt, S and Gupta, M and Tosun, AS}, title = {Future Smart Connected Communities to Fight COVID-19 Outbreak.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {13}, number = {}, pages = {100342}, pmid = {38620326}, issn = {2542-6605}, abstract = {Internet of Things (IoT) has grown rapidly in the last decade and continues to develop in terms of dimension and complexity, offering a wide range of devices to support a diverse set of applications. With ubiquitous Internet, connected sensors and actuators, networking and communication technology along with artificial intelligence (AI), smart cyber-physical systems (CPS) provide services rendering assistance and convenience to humans in their daily lives. However, the recent outbreak of COVID-19 (also known as coronavirus) pandemic has exposed and highlighted the limitations of contemporary technological deployments especially to contain the widespread of this disease. IoT and smart connected technologies together with data-driven applications can play a crucial role not only in the prevention, mitigation, or continuous remote monitoring of patients, but also enable prompt enforcement of guidelines, rules, and administrative orders to contain such future outbreaks. In this paper, we envision an IoT and data-supported connected ecosystem designed for intelligent monitoring, pro-active prevention and control, and mitigation of COVID-19 and similar epidemics. We propose a gamut of synergistic applications and technology systems for various smart infrastructures including E-Health, smart home, supply chain management, transportation, and city, which will work in convergence to develop 'pandemic-proof' future smart communities. We also present a generalized cloud-enabled IoT implementation framework along with scientific solutions, which can be adapted and extended to deploy smart connected ecosystem scenarios using widely used Amazon Web Services (AWS) cloud infrastructures. In addition, we also implement an E-Health RPM use case scenario to demonstrate the need and practicality for smart connected communities. Finally, we highlight challenges and research directions that need thoughtful consideration and across the board cooperation among stakeholders to build resilient communities against future pandemics.}, } @article {pmid36299497, year = {2021}, author = {Jensen, TL and Hooper, WF and Cherikh, SR and Goll, JB}, title = {RP-REP Ribosomal Profiling Reports: an open-source cloud-enabled framework for reproducible ribosomal profiling data processing, analysis, and result reporting.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {143}, pmid = {36299497}, issn = {2046-1402}, abstract = {Ribosomal profiling is an emerging experimental technology to measure protein synthesis by sequencing short mRNA fragments undergoing translation in ribosomes. Applied on the genome wide scale, this is a powerful tool to profile global protein synthesis within cell populations of interest. Such information can be utilized for biomarker discovery and detection of treatment-responsive genes. However, analysis of ribosomal profiling data requires careful preprocessing to reduce the impact of artifacts and dedicated statistical methods for visualizing and modeling the high-dimensional discrete read count data. Here we present Ribosomal Profiling Reports (RP-REP), a new open-source cloud-enabled software that allows users to execute start-to-end gene-level ribosomal profiling and RNA-Seq analysis on a pre-configured Amazon Virtual Machine Image (AMI) hosted on AWS or on the user's own Ubuntu Linux server. The software works with FASTQ files stored locally, on AWS S3, or at the Sequence Read Archive (SRA). RP-REP automatically executes a series of customizable steps including filtering of contaminant RNA, enrichment of true ribosomal footprints, reference alignment and gene translation quantification, gene body coverage, CRAM compression, reference alignment QC, data normalization, multivariate data visualization, identification of differentially translated genes, and generation of heatmaps, co-translated gene clusters, enriched pathways, and other custom visualizations. RP-REP provides functionality to contrast RNA-SEQ and ribosomal profiling results, and calculates translational efficiency per gene. The software outputs a PDF report and publication-ready table and figure files. As a use case, we provide RP-REP results for a dengue virus study that tested cytosol and endoplasmic reticulum cellular fractions of human Huh7 cells pre-infection and at 6 h, 12 h, 24 h, and 40 h post-infection. Case study results, Ubuntu installation scripts, and the most recent RP-REP source code are accessible at GitHub. The cloud-ready AMI is available at AWS (AMI ID: RPREP RSEQREP (Ribosome Profiling and RNA-Seq Reports) v2.1 (ami-00b92f52d763145d3)).}, } @article {pmid36939746, year = {2021}, author = {Zhang, G and Zhang, Y and Jin, J}, title = {The Ultrafast and Accurate Mapping Algorithm FANSe3: Mapping a Human Whole-Genome Sequencing Dataset Within 30 Minutes.}, journal = {Phenomics (Cham, Switzerland)}, volume = {1}, number = {1}, pages = {22-30}, pmid = {36939746}, issn = {2730-5848}, abstract = {Aligning billions of reads generated by the next-generation sequencing (NGS) to reference sequences, termed "mapping", is the time-consuming and computationally-intensive process in most NGS applications. A Fast, accurate and robust mapping algorithm is highly needed. Therefore, we developed the FANSe3 mapping algorithm, which can map a 30 × human whole-genome sequencing (WGS) dataset within 30 min, a 50 × human whole exome sequencing (WES) dataset within 30 s, and a typical mRNA-seq dataset within seconds in a single-server node without the need for any hardware acceleration feature. Like its predecessor FANSe2, the error rate of FANSe3 can be kept as low as 10[-9] in most cases, this is more robust than the Burrows-Wheeler transform-based algorithms. Error allowance hardly affected the identification of a driver somatic mutation in clinically relevant WGS data and provided robust gene expression profiles regardless of the parameter settings and sequencer used. The novel algorithm, designed for high-performance cloud-computing after infrastructures, will break the bottleneck of speed and accuracy in NGS data analysis and promote NGS applications in various fields. The FANSe3 algorithm can be downloaded from the website: http://www.chi-biotech.com/fanse3/.}, } @article {pmid36082106, year = {2021}, author = {Pipia, L and Amin, E and Belda, S and Salinero-Delgado, M and Verrelst, J}, title = {Green LAI Mapping and Cloud Gap-Filling Using Gaussian Process Regression in Google Earth Engine.}, journal = {Remote sensing}, volume = {13}, number = {3}, pages = {403}, pmid = {36082106}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {For the last decade, Gaussian process regression (GPR) proved to be a competitive machine learning regression algorithm for Earth observation applications, with attractive unique properties such as band relevance ranking and uncertainty estimates. More recently, GPR also proved to be a proficient time series processor to fill up gaps in optical imagery, typically due to cloud cover. This makes GPR perfectly suited for large-scale spatiotemporal processing of satellite imageries into cloud-free products of biophysical variables. With the advent of the Google Earth Engine (GEE) cloud platform, new opportunities emerged to process local-to-planetary scale satellite data using advanced machine learning techniques and convert them into gap-filled vegetation properties products. However, GPR is not yet part of the GEE ecosystem. To circumvent this limitation, this work proposes a general adaptation of GPR formulation to parallel processing framework and its integration into GEE. To demonstrate the functioning and utility of the developed workflow, a GPR model predicting green leaf area index (LAI G) from Sentinel-2 imagery was imported. Although by running this GPR model into GEE any corner of the world can be mapped into LAI G at a resolution of 20 m, here we show some demonstration cases over western Europe with zoom-ins over Spain. Thanks to the computational power of GEE, the mapping takes place on-the-fly. Additionally, a GPR-based gap filling strategy based on pre-optimized kernel hyperparameters is also put forward for the generation of multi-orbit cloud-free LAI G maps with an unprecedented level of detail, and the extraction of regularly-sampled LAI G time series at a pixel level. The ability to plugin a locally-trained GPR model into the GEE framework and its instant processing opens up a new paradigm of remote sensing image processing.}, } @article {pmid36654308, year = {2021}, author = {Chen, X and Cheng, B and Li, Z and Nie, X and Yu, N and Yung, MH and Peng, X}, title = {Experimental cryptographic verification for near-term quantum cloud computing.}, journal = {Science bulletin}, volume = {66}, number = {1}, pages = {23-28}, doi = {10.1016/j.scib.2020.08.013}, pmid = {36654308}, issn = {2095-9281}, abstract = {An important task for quantum cloud computing is to make sure that there is a real quantum computer running, instead of classical simulation. Here we explore the applicability of a cryptographic verification scheme for verifying quantum cloud computing. We provided a theoretical extension and implemented the scheme on a 5-qubit NMR quantum processor in the laboratory and a 5-qubit and 16-qubit processors of the IBM quantum cloud. We found that the experimental results of the NMR processor can be verified by the scheme with about 1.4% error, after noise compensation by standard techniques. However, the fidelity of the IBM quantum cloud is currently too low to pass the test (about 42% error). This verification scheme shall become practical when servers claim to offer quantum-computing resources that can achieve quantum supremacy.}, } @article {pmid36081683, year = {2021}, author = {Berger, K and Caicedo, JPR and Martino, L and Wocher, M and Hank, T and Verrelst, J}, title = {A Survey of Active Learning for Quantifying Vegetation Traits from Terrestrial Earth Observation Data.}, journal = {Remote sensing}, volume = {13}, number = {2}, pages = {287}, pmid = {36081683}, issn = {2072-4292}, support = {755617/ERC_/European Research Council/International ; }, abstract = {The current exponential increase of spatiotemporally explicit data streams from satellitebased Earth observation missions offers promising opportunities for global vegetation monitoring. Intelligent sampling through active learning (AL) heuristics provides a pathway for fast inference of essential vegetation variables by means of hybrid retrieval approaches, i.e., machine learning regression algorithms trained by radiative transfer model (RTM) simulations. In this study we summarize AL theory and perform a brief systematic literature survey about AL heuristics used in the context of Earth observation regression problems over terrestrial targets. Across all relevant studies it appeared that: (i) retrieval accuracy of AL-optimized training data sets outperformed models trained over large randomly sampled data sets, and (ii) Euclidean distance-based (EBD) diversity method tends to be the most efficient AL technique in terms of accuracy and computational demand. Additionally, a case study is presented based on experimental data employing both uncertainty and diversity AL criteria. Hereby, a a simulated training data base by the PROSAIL-PRO canopy RTM is used to demonstrate the benefit of AL techniques for the estimation of total leaf carotenoid content (Cxc) and leaf water content (Cw). Gaussian process regression (GPR) was incorporated to minimize and optimize the training data set with AL. Training the GPR algorithm on optimally AL-based sampled data sets led to improved variable retrievals compared to training on full data pools, which is further demonstrated on a mapping example. From these findings we can recommend the use of AL-based sub-sampling procedures to select the most informative samples out of large training data pools. This will not only optimize regression accuracy due to exclusion of redundant information, but also speed up processing time and reduce final model size of kernel-based machine learning regression algorithms, such as GPR. With this study we want to encourage further testing and implementation of AL sampling methods for hybrid retrieval workflows. AL can contribute to the solution of regression problems within the framework of operational vegetation monitoring using satellite imaging spectroscopy data, and may strongly facilitate data processing for cloud-computing platforms.}, } @article {pmid38217155, year = {2021}, author = {Raucci, U and Valentini, A and Pieri, E and Weir, H and Seritan, S and Martínez, TJ}, title = {Voice-controlled quantum chemistry.}, journal = {Nature computational science}, volume = {1}, number = {1}, pages = {42-45}, pmid = {38217155}, issn = {2662-8457}, support = {N00014-18-1-2624//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-16-1-2557//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2659//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-18-1-2624//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; N00014-16-1-2557//United States Department of Defense | United States Navy | Office of Naval Research (ONR)/ ; }, abstract = {Over the past decade, artificial intelligence has been propelled forward by advances in machine learning algorithms and computational hardware, opening up myriads of new avenues for scientific research. Nevertheless, virtual assistants and voice control have yet to be widely used in the natural sciences. Here, we present ChemVox, an interactive Amazon Alexa skill that uses speech recognition to perform quantum chemistry calculations. This new application interfaces Alexa with cloud computing and returns the results through a capable device. ChemVox paves the way to making computational chemistry routinely accessible to the wider community.}, } @article {pmid36504549, year = {2021}, author = {Hanke, M and Pestilli, F and Wagner, AS and Markiewicz, CJ and Poline, JB and Halchenko, YO}, title = {In defense of decentralized research data management.}, journal = {Neuroforum}, volume = {27}, number = {1}, pages = {17-25}, pmid = {36504549}, issn = {2363-7013}, support = {P41 EB019936/EB/NIBIB NIH HHS/United States ; }, abstract = {Decentralized research data management (dRDM) systems handle digital research objects across participating nodes without critically relying on central services. We present four perspectives in defense of dRDM, illustrating that, in contrast to centralized or federated research data management solutions, a dRDM system based on heterogeneous but interoperable components can offer a sustainable, resilient, inclusive, and adaptive infrastructure for scientific stakeholders: An individual scientist or laboratory, a research institute, a domain data archive or cloud computing platform, and a collaborative multisite consortium. All perspectives share the use of a common, self-contained, portable data structure as an abstraction from current technology and service choices. In conjunction, the four perspectives review how varying requirements of independent scientific stakeholders can be addressed by a scalable, uniform dRDM solution and present a working system as an exemplary implementation.}, } @article {pmid35983015, year = {2020}, author = {Rizzo, JR and Feng, C and Riewpaiboon, W and Mongkolwat, P}, title = {A Low-Vision Navigation Platform for Economies in Transition Countries.}, journal = {Proceedings IEEE World Congress on Services (SERVICES). IEEE World Congress on Services}, volume = {2020}, number = {}, pages = {1-3}, pmid = {35983015}, issn = {2642-939X}, support = {R21 EY033689/EY/NEI NIH HHS/United States ; }, abstract = {An ability to move freely, when wanted, is an essential activity for healthy living. Visually impaired and completely blinded persons encounter many disadvantages in their day-to-day activities, including performing work-related tasks. They are at risk of mobility losses, illness, debility, social isolation, and premature mortality. A novel wearable device and computing platform called VIS[4]ION is reducing the disadvantage gaps and raising living standards for the visually challenged. It provides personal mobility navigational services that serves as a customizable, human-in-the-loop, sensing-to-feedback platform to deliver functional assistance. The platform is configured as a wearable that provides on-board microcomputers, human-machine interfaces, and sensory augmentation. Mobile edge computing enhances functionality as more services are unleashed with the computational gains. The meta-level goal is to support spatial cognition, personal freedom, and activities, and to promoting health and wellbeing. VIS[4]ION can be conceptualized as the dovetailing of two thrusts: an on-person navigational and computing device and a multimodal functional aid providing microservices through the cloud. The device has on-board wireless capabilities connected through Wi-Fi or 4/5G. The cloud-based microservices reduce hardware and power requirements while allowing existing and new services to be enhanced and added such as loading new map and real-time communication via haptic or audio signals. This technology can be made available and affordable in the economies of transition countries.}, } @article {pmid35939281, year = {2020}, author = {Kaplan, M and Kneifel, C and Orlikowski, V and Dorff, J and Newton, M and Howard, A and Shinn, D and Bishawi, M and Chidyagwai, S and Balogh, P and Randles, A}, title = {Cloud Computing for COVID-19: Lessons Learned From Massively Parallel Models of Ventilator Splitting.}, journal = {Computing in science & engineering}, volume = {22}, number = {6}, pages = {37-47}, pmid = {35939281}, issn = {1521-9615}, abstract = {A patient-specific airflow simulation was developed to help address the pressing need for an expansion of the ventilator capacity in response to the COVID-19 pandemic. The computational model provides guidance regarding how to split a ventilator between two or more patients with differing respiratory physiologies. To address the need for fast deployment and identification of optimal patient-specific tuning, there was a need to simulate hundreds of millions of different clinically relevant parameter combinations in a short time. This task, driven by the dire circumstances, presented unique computational and research challenges. We present here the guiding principles and lessons learned as to how a large-scale and robust cloud instance was designed and deployed within 24 hours and 800 000 compute hours were utilized in a 72-hour period. We discuss the design choices to enable a quick turnaround of the model, execute the simulation, and create an intuitive and interactive interface.}, } @article {pmid38620477, year = {2020}, author = {Tuli, S and Tuli, S and Tuli, R and Gill, SS}, title = {Predicting the growth and trend of COVID-19 pandemic using machine learning and cloud computing.}, journal = {Internet of things (Amsterdam, Netherlands)}, volume = {11}, number = {}, pages = {100222}, pmid = {38620477}, issn = {2542-6605}, abstract = {The outbreak of COVID-19 Coronavirus, namely SARS-CoV-2, has created a calamitous situation throughout the world. The cumulative incidence of COVID-19 is rapidly increasing day by day. Machine Learning (ML) and Cloud Computing can be deployed very effectively to track the disease, predict growth of the epidemic and design strategies and policies to manage its spread. This study applies an improved mathematical model to analyse and predict the growth of the epidemic. An ML-based improved model has been applied to predict the potential threat of COVID-19 in countries worldwide. We show that using iterative weighting for fitting Generalized Inverse Weibull distribution, a better fit can be obtained to develop a prediction framework. This has been deployed on a cloud computing platform for more accurate and real-time prediction of the growth behavior of the epidemic. A data driven approach with higher accuracy as here can be very useful for a proactive response from the government and citizens. Finally, we propose a set of research opportunities and setup grounds for further practical applications.}, } @article {pmid38116301, year = {2020}, author = {He, X and Lin, X}, title = {Challenges and Opportunities in Statistics and Data Science: Ten Research Areas.}, journal = {Harvard data science review}, volume = {2}, number = {3}, pages = {}, pmid = {38116301}, issn = {2644-2353}, support = {R35 CA197449/CA/NCI NIH HHS/United States ; U01 HG009088/HG/NHGRI NIH HHS/United States ; U19 CA203654/CA/NCI NIH HHS/United States ; }, abstract = {As a discipline that deals with many aspects of data, statistics is a critical pillar in the rapidly evolving landscape of data science. The increasingly vital role of data, especially big data, in many applications, presents the field of statistics with unparalleled challenges and exciting opportunities. Statistics plays a pivotal role in data science by assisting with the use of data and decision making in the face of uncertainty. In this article, we present ten research areas that could make statistics and data science more impactful on science and society. Focusing on these areas will help better transform data into knowledge, actionable insights and deliverables, and promote more collaboration with computer and other quantitative scientists and domain scientists.}, } @article {pmid37981900, year = {2020}, author = {Jayathilaka, H and Krintz, C and Wolski, R}, title = {Detecting Performance Anomalies in Cloud Platform Applications.}, journal = {IEEE transactions on cloud computing}, volume = {8}, number = {3}, pages = {764-777}, pmid = {37981900}, issn = {2168-7161}, support = {R01 EB014877/EB/NIBIB NIH HHS/United States ; }, abstract = {We present Roots, a full-stack monitoring and analysis system for performance anomaly detection and bottleneck identification in cloud platform-as-a-service (PaaS) systems. Roots facilitates application performance monitoring as a core capability of PaaS clouds, and relieves the developers from having to instrument application code. Roots tracks HTTP/S requests to hosted cloud applications and their use of PaaS services. To do so it employs lightweight monitoring of PaaS service interfaces. Roots processes this data in the background using multiple statistical techniques that in combination detect performance anomalies (i.e. violations of service-level objectives). For each anomaly, Roots determines whether the event was caused by a change in the request workload or by a performance bottleneck in a PaaS service. By correlating data collected across different layers of the PaaS, Roots is able to trace high-level performance anomalies to bottlenecks in specific components in the cloud platform. We implement Roots using the AppScale PaaS and evaluate its overhead and accuracy.}, } @article {pmid38486787, year = {2020}, author = {Liang, F and Yu, W and Liu, X and Griffith, D and Golmie, N}, title = {Towards Edge-Based Deep Learning in Industrial Internet of Things.}, journal = {IEEE internet of things journal}, volume = {7}, number = {5}, pages = {}, pmid = {38486787}, issn = {2327-4662}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {As a typical application of the Internet of Things (IoT), the Industrial Internet of Things (IIoT) connects all the related IoT sensing and actuating devices ubiquitously so that the monitoring and control of numerous industrial systems can be realized. Deep learning, as one viable way to carry out big data-driven modeling and analysis, could be integrated in IIoT systems to aid the automation and intelligence of IIoT systems. As deep learning requires large computation power, it is commonly deployed in cloud servers. Thus, the data collected by IoT devices must be transmitted to the cloud for training process, contributing to network congestion and affecting the IoT network performance as well as the supported applications. To address this issue, in this paper we leverage fog/edge computing paradigm and propose an edge computing-based deep learning model, which utilizes edge computing to migrate the deep learning process from cloud servers to edge nodes, reducing data transmission demands in the IIoT network and mitigating network congestion. Since edge nodes have limited computation ability compared to servers, we design a mechanism to optimize the deep learning model so that its requirements for computational power can be reduced. To evaluate our proposed solution, we design a testbed implemented in the Google cloud and deploy the proposed Convolutional Neural Network (CNN) model, utilizing a real-world IIoT dataset to evaluate our approach. Our experimental results confirm the effectiveness of our approach, which can not only reduce the network traffic overhead for IIoT, but also maintain the classification accuracy in comparison with several baseline schemes.}, } @article {pmid37309413, year = {2019}, author = {Liu, DM and Salganik, MJ}, title = {Successes and Struggles with Computational Reproducibility: Lessons from the Fragile Families Challenge.}, journal = {Socius : sociological research for a dynamic world}, volume = {5}, number = {}, pages = {}, pmid = {37309413}, issn = {2378-0231}, support = {R01 HD039135/HD/NICHD NIH HHS/United States ; R24 HD047879/HD/NICHD NIH HHS/United States ; P2C HD047879/HD/NICHD NIH HHS/United States ; R01 HD036916/HD/NICHD NIH HHS/United States ; R01 HD040421/HD/NICHD NIH HHS/United States ; }, abstract = {Reproducibility is fundamental to science, and an important component of reproducibility is computational reproducibility: the ability of a researcher to recreate the results of a published study using the original author's raw data and code. Although most people agree that computational reproducibility is important, it is still difficult to achieve in practice. In this article, the authors describe their approach to enabling computational reproducibility for the 12 articles in this special issue of Socius about the Fragile Families Challenge. The approach draws on two tools commonly used by professional software engineers but not widely used by academic researchers: software containers (e.g., Docker) and cloud computing (e.g., Amazon Web Services). These tools made it possible to standardize the computing environment around each submission, which will ease computational reproducibility both today and in the future. Drawing on their successes and struggles, the authors conclude with recommendations to researchers and journals.}, } @article {pmid36658912, year = {2018}, author = {Xin, T and Huang, S and Lu, S and Li, K and Luo, Z and Yin, Z and Li, J and Lu, D and Long, G and Zeng, B}, title = {NMRCloudQ: a quantum cloud experience on a nuclear magnetic resonance quantum computer.}, journal = {Science bulletin}, volume = {63}, number = {1}, pages = {17-23}, doi = {10.1016/j.scib.2017.12.022}, pmid = {36658912}, issn = {2095-9281}, abstract = {Cloud-based quantum computing is anticipated to be the most useful and reachable form for public users to experience with the power of quantum. As initial attempts, IBM Q has launched influential cloud services on a superconducting quantum processor in 2016, but no other platforms has followed up yet. Here, we report our new cloud quantum computing service - NMRCloudQ (http://nmrcloudq.com/zh-hans/), where nuclear magnetic resonance, one of the pioneer platforms with mature techniques in experimental quantum computing, plays as the role of implementing computing tasks. Our service provides a comprehensive software environment preconfigured with a list of quantum information processing packages, and aims to be freely accessible to either amateurs that look forward to keeping pace with this quantum era or professionals that are interested in carrying out real quantum computing experiments in person. In our current version, four qubits are already usable with in average 99.10% single-qubit gate fidelity and 97.15% two-qubit fidelity via randomized benchmaking tests. Improved control precisions as well as a new seven-qubit processor are also in preparation and will be available later.}, } @article {pmid36937228, year = {2017}, author = {Navas-Molina, JA and Hyde, ER and Sanders, J and Knight, R}, title = {The Microbiome and Big Data.}, journal = {Current opinion in systems biology}, volume = {4}, number = {}, pages = {92-96}, pmid = {36937228}, issn = {2452-3100}, support = {P01 DK078669/DK/NIDDK NIH HHS/United States ; R01 HG004872/HG/NHGRI NIH HHS/United States ; U01 HG004866/HG/NHGRI NIH HHS/United States ; U01 HG006537/HG/NHGRI NIH HHS/United States ; }, abstract = {Microbiome datasets have expanded rapidly in recent years. Advances in DNA sequencing, as well as the rise of shotgun metagenomics and metabolomics, are producing datasets that exceed the ability of researchers to analyze them on their personal computers. Here we describe what Big Data is in the context of microbiome research, how this data can be transformed into knowledge about microbes and their functions in their environments, and how the knowledge can be applied to move microbiome research forward. In particular, the development of new high-resolution tools to assess strain-level variability (moving away from OTUs), the advent of cloud computing and centralized analysis resources such as Qiita (for sequences) and GNPS (for mass spectrometry), and better methods for curating and describing "metadata" (contextual information about the sequence or chemical information) are rapidly assisting the use of microbiome data in fields ranging from human health to environmental studies.}, } @article {pmid35799648, year = {2022}, author = {Ehsan, A and Haider, KZ and Faisal, S and Zahid, FM and Wangari, IM}, title = {Self-Adaptation Resource Allocation for Continuous Offloading Tasks in Pervasive Computing.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8040487}, pmid = {35799648}, issn = {1748-6718}, mesh = {Algorithms ; *Artificial Intelligence ; Cloud Computing ; Humans ; *Mobile Applications ; Resource Allocation ; }, abstract = {Advancement in technology has led to an increase in data. Consequently, techniques such as deep learning and artificial intelligence which are used in deciphering data are increasingly becoming popular. Further, advancement in technology does increase user expectations on devices, including consumer interfaces such as mobile apps, virtual environments, or popular software systems. As a result, power from the battery is consumed fast as it is used in providing high definition display as well as in charging the sensors of the devices. Low latency requires more power consumption in certain conditions. Cloud computing improves the computational difficulties of smart devices with offloading. By optimizing the device's parameters to make it easier to find optimal decisions for offloading tasks, using a metaheuristic algorithm to transfer the data or offload the task, cloud computing makes it easier. In cloud servers, we offload the tasks and limit their resources by simulating them in a virtual environment. Then we check resource parameters and compare them using metaheuristic algorithms. When comparing the default algorithm FCFS to ACO or PSO, we find that PSO has less battery or makespan time compared to FCFS or ACO. The energy consumption of devices is reduced if their resources are offloaded, so we compare the results of metaheuristic algorithms to find less battery usage or makespan time, resulting in the PSO increasing battery life or making the system more efficient.}, } @article {pmid35795755, year = {2022}, author = {Li, J and Guo, B and Liu, K and Zhou, J}, title = {Low Power Scheduling Approach for Heterogeneous System Based on Heuristic and Greedy Method.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9598933}, pmid = {35795755}, issn = {1687-5273}, abstract = {Big data, cloud computing, and artificial intelligence technologies supported by heterogeneous systems are constantly changing our life and cognition of the world. At the same time, its energy consumption affects the operation cost and system reliability, and this attracts the attention of architecture designers and researchers. In order to solve the problem of energy in heterogeneous system environment, inspired by the results of 0-1 programming, a scheduling method of heuristic and greedy energy saving (HGES) approach is proposed to allocate tasks reasonably to achieve the purpose of energy saving. Firstly, all tasks are assigned to each GPU in the system, and then the tasks are divided into high-value tasks and low-value tasks by the calculated average time value and variance value of all tasks. By using the greedy method, the high-value tasks are assigned first, and then the low-value tasks are allocated. In order to verify the effectiveness and rationality of HGES, different tasks with different inputs and different comparison methods are designed and tested. The experimental results on different platforms show that the HGES has better energy saving than that of existing method and can get result faster than that of the 0-1 programming.}, } @article {pmid35795749, year = {2022}, author = {Zhang, H and Zuo, F}, title = {Construction of Digital Teaching Resources of British and American Literature Using Few-Shot Learning and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4526128}, pmid = {35795749}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; *Learning ; Reproducibility of Results ; United States ; }, abstract = {British and American literature is a compulsory course for English majors in Chinese colleges and universities. It plays an important role in cultivating students' aesthetic consciousness and moral cultivation, improving students' humanistic quality and cultural taste, and shaping students' complete personalities. With the rapid development of cloud technology and mobile Internet technology, mobile learning based on mobile devices will become an important direction of mobile Internet technology applications. Based on cloud computing, this paper studies the construction of digital teaching resources of the British and American literature. Through the experiment on the learning simplicity of literature courses for English majors, it is found that during the learning period of 40 people, the average proportion of the most difficult is 16.3%, the average proportion of the second difficult is 35.2%, and the average proportion of the easier is 18.5%. Compared with the next difficulty, the proportion of difficulty is the highest, followed by the easy and finally the most difficult. As one of the core technologies of cloud computing, data split storage technology adopts measures such as isomorphism and interchangeability of computing nodes, redundant storage, and multicopy fault tolerance to ensure the high security and reliability of user data and users do not have to worry about data loss and virus invasion. As a new generation of technical means, cloud computing can realize the unified management and scheduling of distributed and heterogeneous resources and provide a new development direction for promoting the coconstruction and sharing of the British and American literature digital teaching platforms in higher vocational colleges and truly realizing national learning and lifelong learning.}, } @article {pmid35792609, year = {2022}, author = {Gause, G and Mokgaola, IO and Rakhudu, MA}, title = {Technology usage for teaching and learning in nursing education: An integrative review.}, journal = {Curationis}, volume = {45}, number = {1}, pages = {e1-e9}, pmid = {35792609}, issn = {2223-6279}, mesh = {*COVID-19 ; *Education, Nursing ; Humans ; Learning ; Technology ; }, abstract = {BACKGROUND: The increasing availability of technology devices or portable digital assistant devices continues to change the teaching-learning landscape, including technology-supported learning. Portable digital assistants and technology usage have become an integral part of teaching and learning nowadays. Cloud computing, which includes YouTube, Google Apps, Dropbox and Twitter, has become the reality of today's teaching and learning and has noticeably improved higher education, including nursing education.

OBJECTIVES:  The aim of this integrative literature review was to explore and describe technology usage for teaching and learning in nursing education.

METHOD:  A five-step integrative review framework by Whittemore and Knafl was used to attain the objective of this study. The authors searched for both empirical and non-empirical articles from EBSCOhost (health information source and health science), ScienceDirect and African Journals Online Library databases to establish what is already known about the keywords. Key terms included in literature search were coronavirus disease 2019 (COVID-19), digital learning, online learning, nursing, teaching and learning, and technology use.

RESULTS:  Nineteen articles were selected for analysis. The themes that emerged from this review were (1) technology use in nursing education, (2) the manner in which technology is used in nursing education, (3) antecedents for technology use in nursing education, (4) advantages of technology use in nursing education, (5) disadvantages of technology use in nursing education and (6) technology use in nursing education amidst COVID-19.

CONCLUSION:  Technology in nursing education is used in both clinical and classroom teaching to complement learning. However, there is still a gap in its acceptance despite its upward trend.Contribution: The findings of this study contribute to the body of knowledge on the phenomenon of technology use for teaching and learning in nursing education.}, } @article {pmid35782725, year = {2022}, author = {Wang, X and Wang, C and Li, L and Ma, Q and Ma, A and Liu, B}, title = {DESSO-DB: A web database for sequence and shape motif analyses and identification.}, journal = {Computational and structural biotechnology journal}, volume = {20}, number = {}, pages = {3053-3058}, pmid = {35782725}, issn = {2001-0370}, abstract = {Cis-regulatory motif (motif for short) identification and analyses are essential steps in detecting gene regulatory mechanisms. Deep learning (DL) models have shown substantial advances in motif prediction. In parallel, intuitive and integrative web databases are needed to make effective use of DL models and ensure easy access to the identified motifs. Here, we present DESSO-DB, a web database developed to allow efficient access to the identified motifs and diverse motif analyses. DESSO-DB provides motif prediction results and visualizations of 690 ENCODE human Chromatin Immunoprecipitation sequencing (ChIP-seq) data (including 161 transcription factors (TFs) in 91 cell lines) and 1,677 human ChIP-seq data (including 547 TFs in 359 cell lines) from Cistrome DB using DESSO, which is an in-house developed DL tool for motif prediction. It also provides online motif finding and scanning functions for new ChIP-seq/ATAC-seq datasets and downloadable motif results of the above 690 DECODE datasets, 126 cancer ChIP-seq, 55 RNA Crosslinking-Immunoprecipitation and high-throughput sequencing (CLIP-seq) data. DESSO-DB is deployed on the Google Cloud Platform, providing stabilized and efficient resources freely to the public. DESSO-DB is free and available at http://cloud.osubmi.com/DESSO/.}, } @article {pmid35773889, year = {2022}, author = {Kiourtis, A and Karamolegkos, P and Karabetian, A and Voulgaris, K and Poulakis, Y and Mavrogiorgou, A and Kyriazis, D}, title = {An Autoscaling Platform Supporting Graph Data Modelling Big Data Analytics.}, journal = {Studies in health technology and informatics}, volume = {295}, number = {}, pages = {376-379}, doi = {10.3233/SHTI220743}, pmid = {35773889}, issn = {1879-8365}, mesh = {Big Data ; *COVID-19 ; Data Science ; Delivery of Health Care ; *Diastema ; Humans ; }, abstract = {Big Data has proved to be vast and complex, without being efficiently manageable through traditional architectures, whereas data analysis is considered crucial for both technical and non-technical stakeholders. Current analytics platforms are siloed for specific domains, whereas the requirements to enhance their use and lower their technicalities are continuously increasing. This paper describes a domain-agnostic single access autoscaling Big Data analytics platform, namely Diastema, as a collection of efficient and scalable components, offering user-friendly analytics through graph data modelling, supporting technical and non-technical stakeholders. Diastema's applicability is evaluated in healthcare through a predicting classifier for a COVID19 dataset, considering real-world constraints.}, } @article {pmid35759991, year = {2022}, author = {Wu, Z and Xuan, S and Xie, J and Lin, C and Lu, C}, title = {How to ensure the confidentiality of electronic medical records on the cloud: A technical perspective.}, journal = {Computers in biology and medicine}, volume = {147}, number = {}, pages = {105726}, doi = {10.1016/j.compbiomed.2022.105726}, pmid = {35759991}, issn = {1879-0534}, mesh = {Computer Security ; *Confidentiality ; *Electronic Health Records ; Humans ; }, abstract = {From a technical perspective, for electronic medical records (EMR), this paper proposes an effective confidential management solution on the cloud, whose basic idea is to deploy a trusted local server between the untrusted cloud and each trusted client of a medical information management system, responsible for running an EMR cloud hierarchical storage model and an EMR cloud segmentation query model. (1) The EMR cloud hierarchical storage model is responsible for storing light EMR data items (such as patient basic information) on the local server, while encrypting heavy EMR data items (such as patient medical images) and storing them on the cloud, to ensure the confidentiality of electronic medical records on the cloud. (2) The EMR cloud segmentation query model performs EMR related query operations through the collaborative interaction between the local server and the cloud server, to ensure the accuracy and efficiency of each EMR query statement. Finally, both theoretical analysis and experimental evaluation demonstrate the effectiveness of the proposed solution for confidentiality management of electronic medical records on the cloud, i.e., which can ensure the confidentiality of electronic medical records on the untrusted cloud, without compromising the availability of an existing medical information management system.}, } @article {pmid35756852, year = {2022}, author = {Puneet, and Kumar, R and Gupta, M}, title = {Optical coherence tomography image based eye disease detection using deep convolutional neural network.}, journal = {Health information science and systems}, volume = {10}, number = {1}, pages = {13}, pmid = {35756852}, issn = {2047-2501}, abstract = {Over the past few decades, health care industries and medical practitioners faced a lot of obstacles to diagnosing medical-related problems due to inadequate technology and availability of equipment. In the present era, computer science technologies such as IoT, Cloud Computing, Artificial Intelligence and its allied techniques, etc. play a crucial role in the identification of medical diseases, especially in the domain of Ophthalmology. Despite this, ophthalmologists have to perform the various disease diagnosis task manually which is time-consuming and the chances of error are also very high because some of the abnormalities of eye diseases possess the same symptoms. Furthermore, multiple autonomous systems also exist to categorize the diseases but their prediction rate does not accomplish state-of-art accuracy. In the proposed approach by implementing the concept of Attention, Transfer Learning with the Deep Convolution Neural Network, the model accomplished an accuracy of 97.79% and 95.6% on the training and testing data respectively. This autonomous model efficiently classifies the various oscular disorders namely Choroidal Neovascularization, Diabetic Macular Edema, Drusen from the Optical Coherence Tomography images. It may provide a realistic solution to the healthcare sector to bring down the ophthalmologist burden in the screening of Diabetic Retinopathy.}, } @article {pmid35756406, year = {2022}, author = {Zhang, H and Li, M}, title = {Integrated Design and Development of Intelligent Scenic Area Rural Tourism Information Service Based on Hybrid Cloud.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {5316304}, pmid = {35756406}, issn = {1748-6718}, mesh = {Humans ; Information Services ; *Tourism ; *Travel ; }, abstract = {Although the "Internet+" technologies (big data and cloud computing) have been implemented in many industries, each industry involved in rural tourism economic information services has its own database, and there are still vast economic information resources that have not been exploited. Z travel agency through rural tourism enterprise third-party information services and mobile context-awareness-based Z travel has achieved good economic and social benefits by deep value mining and innovative application of the existing data of the enterprise through the third-party information service of rural tourism enterprises and mobile context-aware travel recommendation service. It clearly demonstrates that, in order to maximise the benefits of economic data, rural tourist businesses should focus not only on the application of new technologies and methodologies but also on the core of demand and data-driven and thoroughly investigate the potential value of current data. This paper mainly analyzes the problems related to how rural tourism can be upgraded under the smart tourism platform, with the aim of improving the development of China's rural tourism industry with the help of an integrated smart tourism platform, and proposes a hybrid cloud-based integrated system of smart scenic rural tourism information services, which can meet the actual use needs of rural tourism, with good shared service effect and platform application performance, and promote the development of rural tourism and resource utilization rate.}, } @article {pmid35755764, year = {2022}, author = {Hu, Q}, title = {Optimization of Online Course Platform for Piano Preschool Education Based on Internet Cloud Computing System.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6525866}, pmid = {35755764}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; Internet ; *Learning ; Students ; }, abstract = {This article focuses on introducing online piano teaching methods and has developed and implemented a preschool piano education online course platform. The system consists of four parts: backend, WeChat, client, and web page. Backend development uses PHP language and Laravel system framework, WeChat and web development both use JavaScript language and React framework, client development uses Objective-C language, and the system provides internal support for RESTful API, mainly for client, WeChat, and web. The client relies on the existing voice sensors of the research group to recognize and evaluate the performance of the students. The role of the client is to show the students their homework and demonstrate the activities performed by the teacher. The function of the WeChat terminal is to manage student work, user information, and user social interaction functions. The function of the web page is the score management and data analysis functions. Based on the knowledge of network course design, this article studies the design of piano preschool education platform and adds relevant components of the Internet cloud computer system and voice sensor to this platform, which provides great convenience for students to learn piano.}, } @article {pmid35755732, year = {2022}, author = {Liu, B and Zhang, T and Hu, W}, title = {Intelligent Traffic Flow Prediction and Analysis Based on Internet of Things and Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6420799}, pmid = {35755732}, issn = {1687-5273}, mesh = {Automobiles ; *Big Data ; Cities ; Humans ; *Internet of Things ; Transportation ; }, abstract = {Nowadays, the problem of road traffic safety cannot be ignored. Almost all major cities have problems such as poor traffic environment and low road efficiency. Large-scale and long-term traffic congestion occurs almost every day. Transportation has developed rapidly, and more and more advanced means of transportation have emerged. However, automobile is one of the main means of transportation for people to travel. In the world, there are serious traffic jams in almost all cities. The excessive traffic flow every day leads to the paralysis of the urban transportation system, which brings great inconvenience and impact to people's travel. Various countries have also actively taken corresponding measures, i.e., traffic diversion, number restriction, or expanding the scale of the road network, but these measures can bring little effect. Traditional intelligent traffic flow forecasting has some problems, such as low accuracy and delay. Aiming at this problem, this paper uses the model of the combination of Internet of Things and big data to apply and analyze its social benefits in intelligent traffic flow forecasting and analyzes its three-tier network architecture model, namely, perception layer, network layer, and application layer. Research and analyze the mode of combining cloud computing and edge computing. From the multiperspective linear discriminant analysis algorithm of the combination method of combining the same points and differences between data and data into multiple atomic services, intelligent traffic flow prediction based on the combination of Internet of Things and big data is performed. Through the monitoring and extraction of relevant traffic flow data, data analysis, processing and storage, and visual display, improve the accuracy and effectiveness and make it easier to improve the prediction accuracy of overall traffic flow. The traffic flow prediction of the system of Internet of Things and big data is given through the case experiment. The method proposed in this paper can be applied in intelligent transportation services and can predict the stability of transportation and traffic flow in real time so as to optimize traffic congestion, reduce manual intervention, and achieve the goal of intelligent traffic management.}, } @article {pmid35755635, year = {2022}, author = {Sladky, V and Nejedly, P and Mivalt, F and Brinkmann, BH and Kim, I and St Louis, EK and Gregg, NM and Lundstrom, BN and Crowe, CM and Attia, TP and Crepeau, D and Balzekas, I and Marks, VS and Wheeler, LP and Cimbalnik, J and Cook, M and Janca, R and Sturges, BK and Leyde, K and Miller, KJ and Van Gompel, JJ and Denison, T and Worrell, GA and Kremen, V}, title = {Distributed brain co-processor for tracking spikes, seizures and behaviour during electrical brain stimulation.}, journal = {Brain communications}, volume = {4}, number = {3}, pages = {fcac115}, pmid = {35755635}, issn = {2632-1297}, support = {R01 NS092882/NS/NINDS NIH HHS/United States ; }, abstract = {Early implantable epilepsy therapy devices provided open-loop electrical stimulation without brain sensing, computing, or an interface for synchronized behavioural inputs from patients. Recent epilepsy stimulation devices provide brain sensing but have not yet developed analytics for accurately tracking and quantifying behaviour and seizures. Here we describe a distributed brain co-processor providing an intuitive bi-directional interface between patient, implanted neural stimulation and sensing device, and local and distributed computing resources. Automated analysis of continuous streaming electrophysiology is synchronized with patient reports using a handheld device and integrated with distributed cloud computing resources for quantifying seizures, interictal epileptiform spikes and patient symptoms during therapeutic electrical brain stimulation. The classification algorithms for interictal epileptiform spikes and seizures were developed and parameterized using long-term ambulatory data from nine humans and eight canines with epilepsy, and then implemented prospectively in out-of-sample testing in two pet canines and four humans with drug-resistant epilepsy living in their natural environments. Accurate seizure diaries are needed as the primary clinical outcome measure of epilepsy therapy and to guide brain-stimulation optimization. The brain co-processor system described here enables tracking interictal epileptiform spikes, seizures and correlation with patient behavioural reports. In the future, correlation of spikes and seizures with behaviour will allow more detailed investigation of the clinical impact of spikes and seizures on patients.}, } @article {pmid35751030, year = {2022}, author = {Shaukat, Z and Farooq, QUA and Tu, S and Xiao, C and Ali, S}, title = {A state-of-the-art technique to perform cloud-based semantic segmentation using deep learning 3D U-Net architecture.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {251}, pmid = {35751030}, issn = {1471-2105}, mesh = {*Brain Neoplasms/diagnostic imaging/pathology ; Cloud Computing ; *Deep Learning ; *Glioma ; Humans ; Image Processing, Computer-Assisted/methods ; Magnetic Resonance Imaging/methods ; Semantics ; }, abstract = {Glioma is the most aggressive and dangerous primary brain tumor with a survival time of less than 14 months. Segmentation of tumors is a necessary task in the image processing of the gliomas and is important for its timely diagnosis and starting a treatment. Using 3D U-net architecture to perform semantic segmentation on brain tumor dataset is at the core of deep learning. In this paper, we present a unique cloud-based 3D U-Net method to perform brain tumor segmentation using BRATS dataset. The system was effectively trained by using Adam optimization solver by utilizing multiple hyper parameters. We got an average dice score of 95% which makes our method the first cloud-based method to achieve maximum accuracy. The dice score is calculated by using Sørensen-Dice similarity coefficient. We also performed an extensive literature review of the brain tumor segmentation methods implemented in the last five years to get a state-of-the-art picture of well-known methodologies with a higher dice score. In comparison to the already implemented architectures, our method ranks on top in terms of accuracy in using a cloud-based 3D U-Net framework for glioma segmentation.}, } @article {pmid35747132, year = {2022}, author = {Li, W and Guo, Y}, title = {A Secure Private Cloud Storage Platform for English Education Resources Based on IoT Technology.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {8453470}, pmid = {35747132}, issn = {1748-6718}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Technology ; }, abstract = {The contemporary ubiquitous "cloud" network knowledge and information resources, as well as ecological pedagogy theory, have enlarged teaching research's perspective, widened teaching research's innovation area, and created practical options for English classroom reform. Cloud education relies on the Internet of Things, cloud computing, and big data to have a huge impact on the English learning process. The key to the integration of English education resources is the storage of huge amount of English teaching data. Applying the technology and methods of cloud storage to the construction of English education resource integration can effectively save the educational resources of schools, improve the utilization rate of English education resources, and thus enhance the teaching level of English subjects. In this work, we examine the existing state of English education resource building and teaching administration and offer a way for creating a "private cloud" of English education materials. We not only examined the architecture and three-layer modules of cloud computing in depth, but we also analyzed the "private cloud" technology and built the cloud structure of English teaching materials on this foundation. We hope that this paper can help and inspire us to solve the problems of uneven distribution, irregular management, and difficult sharing in the construction of English education resources.}, } @article {pmid35746414, year = {2022}, author = {Ud Din, MM and Alshammari, N and Alanazi, SA and Ahmad, F and Naseem, S and Khan, MS and Haider, HSI}, title = {InteliRank: A Four-Pronged Agent for the Intelligent Ranking of Cloud Services Based on End-Users' Feedback.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746414}, issn = {1424-8220}, mesh = {*Cloud Computing ; Data Collection ; Feedback ; Reproducibility of Results ; *Software ; }, abstract = {Cloud Computing (CC) provides a combination of technologies that allows the user to use the most resources in the least amount of time and with the least amount of money. CC semantics play a critical role in ranking heterogeneous data by using the properties of different cloud services and then achieving the optimal cloud service. Regardless of the efforts made to enable simple access to this CC innovation, in the presence of various organizations delivering comparative services at varying cost and execution levels, it is far more difficult to identify the ideal cloud service based on the user's requirements. In this research, we propose a Cloud-Services-Ranking Agent (CSRA) for analyzing cloud services using end-users' feedback, including Platform as a Service (PaaS), Infrastructure as a Service (IaaS), and Software as a Service (SaaS), based on ontology mapping and selecting the optimal service. The proposed CSRA possesses Machine-Learning (ML) techniques for ranking cloud services using parameters such as availability, security, reliability, and cost. Here, the Quality of Web Service (QWS) dataset is used, which has seven major cloud services categories, ranked from 0-6, to extract the required persuasive features through Sequential Minimal Optimization Regression (SMOreg). The classification outcomes through SMOreg are capable and demonstrate a general accuracy of around 98.71% in identifying optimum cloud services through the identified parameters. The main advantage of SMOreg is that the amount of memory required for SMO is linear. The findings show that our improved model in terms of precision outperforms prevailing techniques such as Multilayer Perceptron (MLP) and Linear Regression (LR).}, } @article {pmid35746245, year = {2022}, author = {Liu, X and Jin, J and Dong, F}, title = {Edge-Computing-Based Intelligent IoT: Architectures, Algorithms and Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746245}, issn = {1424-8220}, abstract = {With the rapid growth of the Internet of Things (IoT), 5G networks and beyond, the computing paradigm for intelligent IoT systems is shifting from conventional centralized-cloud computing to distributed edge computing [...].}, } @article {pmid35746169, year = {2022}, author = {Dezfouli, B and Liu, Y}, title = {Editorial: Special Issue "Edge and Fog Computing for Internet of Things Systems".}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746169}, issn = {1424-8220}, abstract = {Employing edge and fog computing for building IoT systems is essential, especially because of the massive number of data generated by sensing devices, the delay requirements of IoT applications, the high burden of data processing on cloud platforms, and the need to take immediate actions against security threats.}, } @article {pmid35746127, year = {2022}, author = {Lakhan, A and Morten Groenli, T and Majumdar, A and Khuwuthyakorn, P and Hussain Khoso, F and Thinnukool, O}, title = {Potent Blockchain-Enabled Socket RPC Internet of Healthcare Things (IoHT) Framework for Medical Enterprises.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {35746127}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Computer Security ; Delivery of Health Care ; Humans ; Internet ; *Internet of Things ; }, abstract = {Present-day intelligent healthcare applications offer digital healthcare services to users in a distributed manner. The Internet of Healthcare Things (IoHT) is the mechanism of the Internet of Things (IoT) found in different healthcare applications, with devices that are attached to external fog cloud networks. Using different mobile applications connecting to cloud computing, the applications of the IoHT are remote healthcare monitoring systems, high blood pressure monitoring, online medical counseling, and others. These applications are designed based on a client-server architecture based on various standards such as the common object request broker (CORBA), a service-oriented architecture (SOA), remote method invocation (RMI), and others. However, these applications do not directly support the many healthcare nodes and blockchain technology in the current standard. Thus, this study devises a potent blockchain-enabled socket RPC IoHT framework for medical enterprises (e.g., healthcare applications). The goal is to minimize service costs, blockchain security costs, and data storage costs in distributed mobile cloud networks. Simulation results show that the proposed blockchain-enabled socket RPC minimized the service cost by 40%, the blockchain cost by 49%, and the storage cost by 23% for healthcare applications.}, } @article {pmid35745356, year = {2022}, author = {Liu, H and Zhang, R and Liu, Y and He, C}, title = {Unveiling Evolutionary Path of Nanogenerator Technology: A Novel Method Based on Sentence-BERT.}, journal = {Nanomaterials (Basel, Switzerland)}, volume = {12}, number = {12}, pages = {}, pmid = {35745356}, issn = {2079-4991}, support = {72104224, 71974107, L2124002, 91646102//National Natural Science Foundation of China/ ; CKCEST-2022-1-30//Construction Project of China Knowledge Center for Engineering Sciences and Technology/ ; }, abstract = {In recent years, nanogenerator technology has developed rapidly with the rise of cloud computing, artificial intelligence, and other fields. Therefore, the quick identification of the evolutionary path of nanogenerator technology from a large amount of data attracts much attention. It is of great significance in grasping technical trends and analyzing technical areas of interest. However, there are some limitations in previous studies. On the one hand, previous research on technological evolution has generally utilized bibliometrics, patent analysis, and citations between patents and papers, ignoring the rich semantic information contained therein; on the other hand, its evolution analysis perspective is single, and it is difficult to obtain accurate results. Therefore, this paper proposes a new framework based on the methods of Sentence-BERT and phrase mining, using multi-source data, such as papers and patents, to unveil the evolutionary path of nanogenerator technology. Firstly, using text vectorization, clustering algorithms, and the phrase mining method, current technical themes of significant interest to researchers can be obtained. Next, this paper correlates the multi-source fusion themes through semantic similarity calculation and demonstrates the multi-dimensional technology evolutionary path by using the "theme river map". Finally, this paper presents an evolution analysis from the perspective of frontier research and technology research, so as to discover the development focus of nanogenerators and predict the future application prospects of nanogenerator technology.}, } @article {pmid35742161, year = {2022}, author = {Ashraf, E and Areed, NFF and Salem, H and Abdelhay, EH and Farouk, A}, title = {FIDChain: Federated Intrusion Detection System for Blockchain-Enabled IoT Healthcare Applications.}, journal = {Healthcare (Basel, Switzerland)}, volume = {10}, number = {6}, pages = {}, pmid = {35742161}, issn = {2227-9032}, abstract = {Recently, there has been considerable growth in the internet of things (IoT)-based healthcare applications; however, they suffer from a lack of intrusion detection systems (IDS). Leveraging recent technologies, such as machine learning (ML), edge computing, and blockchain, can provide suitable and strong security solutions for preserving the privacy of medical data. In this paper, FIDChain IDS is proposed using lightweight artificial neural networks (ANN) in a federated learning (FL) way to ensure healthcare data privacy preservation with the advances of blockchain technology that provides a distributed ledger for aggregating the local weights and then broadcasting the updated global weights after averaging, which prevents poisoning attacks and provides full transparency and immutability over the distributed system with negligible overhead. Applying the detection model at the edge protects the cloud if an attack happens, as it blocks the data from its gateway with smaller detection time and lesser computing and processing capacity as FL deals with smaller sets of data. The ANN and eXtreme Gradient Boosting (XGBoost) models were evaluated using the BoT-IoT dataset. The results show that ANN models have higher accuracy and better performance with the heterogeneity of data in IoT devices, such as intensive care unit (ICU) in healthcare systems. Testing the FIDChain with different datasets (CSE-CIC-IDS2018, Bot Net IoT, and KDD Cup 99) reveals that the BoT-IoT dataset has the most stable and accurate results for testing IoT applications, such as those used in healthcare systems.}, } @article {pmid35734349, year = {2022}, author = {Aldahwan, NS and Ramzan, MS}, title = {The Descriptive Data Analysis for the Adoption of Community Cloud in Saudi HEI-Based Factor Adoption.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {7765204}, pmid = {35734349}, issn = {2314-6141}, mesh = {*Cloud Computing ; *Data Analysis ; Humans ; Reproducibility of Results ; Saudi Arabia ; Surveys and Questionnaires ; }, abstract = {Due to its increased reliability, adaptability, scalability, availability, and processing capacity, cloud computing is rapidly becoming a popular trend around the world. One of the major issues with cloud computing is making informed decision about adoption of community cloud (CC) computing (ACCC). To date, there are various technology acceptance theories and models to validate perspective of ACCC at both organizational and individual levels. However, no experimental studies have been carried out to provide a comprehensive assessment of the factors of ACCC, specifically in the area of the Saudi Higher Education (HEI) Institution. Thus, this research was aimed at exploring the factors of ACCC and the relationship to the experiences of the employees. The analysis of the employee context was driven by the success factors of technological, organizational, environmental, human, security, and advantage contexts on community cloud computing adoption in HEI. The data collection was a questionnaire-based survey based on 106 responses. We present findings based on descriptive analysis in identifying the significant component that contributed to the effective implementation of ACCC. Security concerns are a significant influencing element in the adoption of community cloud technology.}, } @article {pmid35730340, year = {2022}, author = {Cotur, Y and Olenik, S and Asfour, T and Bruyns-Haylett, M and Kasimatis, M and Tanriverdi, U and Gonzalez-Macia, L and Lee, HS and Kozlov, AS and Güder, F}, title = {Bioinspired Stretchable Transducer for Wearable Continuous Monitoring of Respiratory Patterns in Humans and Animals.}, journal = {Advanced materials (Deerfield Beach, Fla.)}, volume = {34}, number = {33}, pages = {e2203310}, doi = {10.1002/adma.202203310}, pmid = {35730340}, issn = {1521-4095}, support = {//Imperial College Department of Bioengineering/ ; //Institute for Security Science and Technology/ ; //Turkish Ministry of Education/ ; //EPSRC IAA/ ; OPP1212574//Bill and Melinda Gates Foundation/ ; W911QY-20-R-0022//US Army/ ; W911NF1820120//US Army/ ; 1846144//EPSRC DTP/ ; //Imperial College Centre for Processable Electronics/ ; 10004425//Innovate UK/ ; //Centre for Blast Injury Studies/ ; 214234/Z/18/Z/WT_/Wellcome Trust/United Kingdom ; }, mesh = {Animals ; Artificial Intelligence ; Dogs ; Humans ; Monitoring, Physiologic ; Silicones ; Transducers ; *Wearable Electronic Devices ; }, abstract = {A bio-inspired continuous wearable respiration sensor modeled after the lateral line system of fish is reported which is used for detecting mechanical disturbances in the water. Despite the clinical importance of monitoring respiratory activity in humans and animals, continuous measurements of breathing patterns and rates are rarely performed in or outside of clinics. This is largely because conventional sensors are too inconvenient or expensive for wearable sensing for most individuals and animals. The bio-inspired air-silicone composite transducer (ASiT) is placed on the chest and measures respiratory activity by continuously measuring the force applied to an air channel embedded inside a silicone-based elastomeric material. The force applied on the surface of the transducer during breathing changes the air pressure inside the channel, which is measured using a commercial pressure sensor and mixed-signal wireless electronics. The transducer produced in this work are extensively characterized and tested with humans, dogs, and laboratory rats. The bio-inspired ASiT may enable the early detection of a range of disorders that result in altered patterns of respiration. The technology reported can also be combined with artificial intelligence and cloud computing to algorithmically detect illness in humans and animals remotely, reducing unnecessary visits to clinics.}, } @article {pmid35730064, year = {2023}, author = {Pillen, D and Eckard, M}, title = {The impact of the shift to cloud computing on digital recordkeeping practices at the University of Michigan Bentley historical library.}, journal = {Archival science}, volume = {23}, number = {1}, pages = {65-80}, pmid = {35730064}, issn = {1573-7500}, abstract = {Cloud-based productivity, collaboration, and storage tools offer increased opportunities for collaboration and potential cost-savings over locally hosted solutions and have seen widespread adoption throughout industry, government, and academia over the last decade. While these tools benefit organizations, IT departments, and day-to-day-users, they present unique challenges for records managers and archivists. As a review of the relevant literature demonstrates, issues surrounding cloud computing are not limited to the technology-although the implementation and technological issues are numerous-but also include organization management, human behavior, regulation, and records management, making the process of archiving digital information in this day and age all the more difficult. This paper explores some of the consequences of this shift and its effect on digital recordkeeping at the Bentley Historical Library, whose mission is to "collect the materials for the University of Michigan." After providing context for this problem by discussing relevant literature, two practicing archivists will explore the impact of the move toward cloud computing as well as various productivity software and collaboration tools in use at U-M throughout the various stages of a standard lifecycle model for managing records.}, } @article {pmid35730008, year = {2022}, author = {Mahanty, C and Kumar, R and Patro, SGK}, title = {Internet of Medical Things-Based COVID-19 Detection in CT Images Fused with Fuzzy Ensemble and Transfer Learning Models.}, journal = {New generation computing}, volume = {40}, number = {4}, pages = {1125-1141}, pmid = {35730008}, issn = {0288-3635}, abstract = {One of the most difficult research areas in today's healthcare industry to combat the coronavirus pandemic is accurate COVID-19 detection. Because of its low infection miss rate and high sensitivity, chest computed tomography (CT) imaging has been recommended as a viable technique for COVID-19 diagnosis in a number of recent clinical investigations. This article presents an Internet of Medical Things (IoMT)-based platform for improving and speeding up COVID-19 identification. Clinical devices are connected to network resources in the suggested IoMT platform using cloud computing. The method enables patients and healthcare experts to work together in real time to diagnose and treat COVID-19, potentially saving time and effort for both patients and physicians. In this paper, we introduce a technique for classifying chest CT scan images into COVID, pneumonia, and normal classes that use a Sugeno fuzzy integral ensemble across three transfer learning models, namely SqueezeNet, DenseNet-201, and MobileNetV2. The suggested fuzzy ensemble techniques outperform each individual transfer learning methodology as well as trainable ensemble strategies in terms of accuracy. The suggested MobileNetV2 fused with Sugeno fuzzy integral ensemble model has a 99.15% accuracy rate. In the present research, this framework was utilized to identify COVID-19, but it may also be implemented and used for medical imaging analyses of other disorders.}, } @article {pmid35730007, year = {2022}, author = {Gupta, A and Singh, A}, title = {An Intelligent Healthcare Cyber Physical Framework for Encephalitis Diagnosis Based on Information Fusion and Soft-Computing Techniques.}, journal = {New generation computing}, volume = {40}, number = {4}, pages = {1093-1123}, pmid = {35730007}, issn = {0288-3635}, abstract = {Viral encephalitis is a contagious disease that causes life insecurity and is considered one of the major health concerns worldwide. It causes inflammation of the brain and, if left untreated, can have persistent effects on the central nervous system. Conspicuously, this paper proposes an intelligent cyber-physical healthcare framework based on the IoT-fog-cloud collaborative network, employing soft-computing technology and information fusion. The proposed framework uses IoT-based sensors, electronic medical records, and user devices for data acquisition. The fog layer, composed of numerous nodes, processes the most specific encephalitis symptom-related data to classify possible encephalitis cases in real time to issue an alarm when a significant health emergency occurs. Furthermore, the cloud layer involves a multi-step data processing scheme for in-depth data analysis. First, data obtained across multiple data generation sources are fused to obtain a more consistent, accurate, and reliable feature set. Data preprocessing and feature selection techniques are applied to the fused data for dimensionality reduction over the cloud computing platform. An adaptive neuro-fuzzy inference system is applied in the cloud to determine the risk of a disease and classify the results into one of four categories: no risk, probable risk, low risk, and acute risk. Moreover, the alerts are generated and sent to the stakeholders based on the risk factor. Finally, the computed results are stored in the cloud database for future use. For validation purposes, various experiments are performed using real-time datasets. The analysis results performed on the fog and cloud layers show higher performance than the existing models. Future research will focus on the resource allocation in the cloud layer while considering various security aspects to improve the utility of the proposed work.}, } @article {pmid35729139, year = {2022}, author = {Yue, YF and Chen, GP and Wang, L and Yang, J and Yang, KT}, title = {[Dynamic monitoring and evaluation of ecological environment quality in Zhouqu County, Gansu, China based on Google Earth Engine cloud platform].}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {33}, number = {6}, pages = {1608-1614}, doi = {10.13287/j.1001-9332.202206.036}, pmid = {35729139}, issn = {1001-9332}, mesh = {China ; Cloud Computing ; *Ecosystem ; Environmental Monitoring/methods ; *Remote Sensing Technology ; Rivers ; Search Engine ; }, abstract = {Zhouqu County is located in the transition region from the Qinghai-Tibet Plateau to the Qinba Mountains, and is an important part of the ecological barrier in the upper stream of the Yangtze River. In this study, we used the Google Earth Engine cloud processing platform to perform inter-image optimal reconstruction of Landsat surface reflectance images from 1998-2019. We calculated four indicators of regional wet, green, dry, and hot. The component indicators were coupled by principal component analysis to construct remote sensing ecological index (RSEI) and to analyze the spatial and temporal variations of ecological environment quality in Zhouqu County. The results showed that the contribution of the four component indicators to the eigenvalues of the coupled RSEI were above 70%, with even distribution of the loadings, indicating that the RSEI integrated most of the features of the component indicators. From 1998 to 2019, the RSEI of Zhouqu County ranged from 0.55 to 0.63, showing an increasing trend with a growth rate of 0.04·(10 a)[-1], and the area of better grade increased by 425.56 km[2]. The area with altitude ≤2200 m was dominated by medium and lower ecological environment quality grade, while the area of better ecological environment quality grade area increased by 16.5%. The ecological and environmental quality of the region from 2200 to 3300 m was dominated by good grades, increasing to 71.3% in 2019, with the area of medium and below ecological and environmental quality grades decreasing year by year. The area with altitude ≥3300 m was dominated by the medium ecological quality grade. The medium and below ecological quality grades showed a "U" shape trend during the study period. The trend of ecological environment quality in Zhouqu County was becoming better, but with fluctuations. It is necessary to continuously strengthen the protection and management of ecological environment in order to guarantee the continuous improvement of ecological environment quality.}, } @article {pmid35729113, year = {2022}, author = {Erdem, C and Mutsuddy, A and Bensman, EM and Dodd, WB and Saint-Antoine, MM and Bouhaddou, M and Blake, RC and Gross, SM and Heiser, LM and Feltus, FA and Birtwistle, MR}, title = {A scalable, open-source implementation of a large-scale mechanistic model for single cell proliferation and death signaling.}, journal = {Nature communications}, volume = {13}, number = {1}, pages = {3555}, pmid = {35729113}, issn = {2041-1723}, support = {R35 GM141891/GM/NIGMS NIH HHS/United States ; U54 CA209988/CA/NCI NIH HHS/United States ; U54 HG008098/HG/NHGRI NIH HHS/United States ; R01 GM104184/GM/NIGMS NIH HHS/United States ; }, mesh = {Cell Proliferation ; *Cloud Computing ; Computer Simulation ; Signal Transduction ; *Software ; }, abstract = {Mechanistic models of how single cells respond to different perturbations can help integrate disparate big data sets or predict response to varied drug combinations. However, the construction and simulation of such models have proved challenging. Here, we developed a python-based model creation and simulation pipeline that converts a few structured text files into an SBML standard and is high-performance- and cloud-computing ready. We applied this pipeline to our large-scale, mechanistic pan-cancer signaling model (named SPARCED) and demonstrate it by adding an IFNγ pathway submodel. We then investigated whether a putative crosstalk mechanism could be consistent with experimental observations from the LINCS MCF10A Data Cube that IFNγ acts as an anti-proliferative factor. The analyses suggested this observation can be explained by IFNγ-induced SOCS1 sequestering activated EGF receptors. This work forms a foundational recipe for increased mechanistic model-based data integration on a single-cell level, an important building block for clinically-predictive mechanistic models.}, } @article {pmid35725904, year = {2022}, author = {Pradhan, C and Padhee, SK and Bharti, R and Dutta, S}, title = {A process-based recovery indicator for anthropogenically disturbed river system.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {10390}, pmid = {35725904}, issn = {2045-2322}, mesh = {Cross-Sectional Studies ; Environmental Monitoring ; *Floods ; India ; *Rivers ; Seasons ; }, abstract = {The present paper utilizes entropy theory and Google earth engine cloud computing technique to investigate system state and river recovery potential in two large sub-basins of the Mahanadi River, India. The cross-sectional intensity entropy (CIE) is computed for the post-monsoon season (October-March) along the selected reaches. Further, a normalized river recovery indicator (NRRI) is formulated to assess the temporal changes in river health. Finally, NRRI is related to a process-based variable-LFE (low flow exceedance) to comprehend the dominating system dynamics and evolutionary adjustments. The results highlight the existence of both threshold-modulated and filter-dominated systems based on CIE and NRRI variabilities. In addition, the gradual decline in CIE and subsequent stabilization of vegetated landforms can develop an 'event-driven' state, where floods exceeding the low-flow channel possess a direct impact on the river recovery trajectory. Finally, this study emphasizes the presence of instream vegetation as an additional degree of freedom, which further controls the hierarchy of energy dissipation and morphological continuum in the macrochannel settings.}, } @article {pmid35721670, year = {2022}, author = {Bamasag, O and Alsaeedi, A and Munshi, A and Alghazzawi, D and Alshehri, S and Jamjoom, A}, title = {Real-time DDoS flood attack monitoring and detection (RT-AMD) model for cloud computing.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e814}, pmid = {35721670}, issn = {2376-5992}, abstract = {In recent years, the advent of cloud computing has transformed the field of computing and information technology. It has been enabling customers to rent virtual resources and take advantage of various on-demand services with the lowest costs. Despite the advantages of cloud computing, it faces several threats; an example is a distributed denial of service (DDoS) attack, which is considered among the most serious. This article presents real-time monitoring and detection of DDoS attacks on the cloud using a machine learning approach. Naïve Bayes, K-nearest neighbor, decision tree, and random forest machine learning classifiers have been selected to build a predictive model named "Real-Time DDoS flood Attack Monitoring and Detection RT-AMD." The DDoS-2020 dataset was constructed with 70,020 records to evaluate RT-AMD's accuracy. The DDoS-2020 contains three protocols for network/transport-level, which are TCP, DNS, and ICMP. This article evaluates the proposed model by comparing its accuracy with related works. Our model has shown improvement in the results and reached real-time attack detection using incremental learning. The model achieved 99.38% accuracy for the random forest in real-time on the cloud environment and 99.39% on local testing. The RT-AMD was evaluated on the NSL-KDD dataset as well, in which it achieved 99.30% accuracy in real-time in a cloud environment.}, } @article {pmid35721407, year = {2022}, author = {Osmanoglu, M and Demir, S and Tugrul, B}, title = {Privacy-preserving k-NN interpolation over two encrypted databases.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e965}, pmid = {35721407}, issn = {2376-5992}, abstract = {Cloud computing enables users to outsource their databases and the computing functionalities to a cloud service provider to avoid the cost of maintaining a private storage and computational requirements. It also provides universal access to data, applications, and services without location dependency. While cloud computing provides many benefits, it possesses a number of security and privacy concerns. Outsourcing data to a cloud service provider in encrypted form may help to overcome these concerns. However, dealing with the encrypted data makes it difficult for the cloud service providers to perform some operations over the data that will especially be required in query processing tasks. Among the techniques employed in query processing task, the k-nearest neighbor method draws attention due to its simplicity and efficiency, particularly on massive data sets. A number of k-nearest neighbor algorithms for query processing task on a single encrypted database have been proposed. However, the performance of k-nearest neighbor algorithms on a single database may create accuracy and reliability problems. It is a fact that collaboration among different cloud service providers yields more accurate and more reliable results in query processing. By considering this fact, we focus on the k-nearest neighbor (k-NN) problem over two encrypted databases. We introduce a secure two-party k-NN interpolation protocol that enables a query owner to extract the interpolation of the k-nearest neighbors of a query point from two different databases outsourced to two different cloud service providers. We also show that our protocol protects the confidentiality of the data and the query point, and hides data access patterns. Furthermore, we conducted a number of experiment to demonstrate the efficiency of our protocol. The results show that the running time of our protocol is linearly dependent on both the number of nearest neighbours and data size.}, } @article {pmid35720928, year = {2022}, author = {Yuan, G and Xie, F and Tan, H}, title = {Construction of Economic Security Early Warning System Based on Cloud Computing and Data Mining.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2080840}, pmid = {35720928}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; Data Mining ; Forecasting ; *Models, Theoretical ; }, abstract = {Economic security is a core theoretical issue in economics. In modern economic conditions, the ups and downs caused by economic instability in any economic system will affect the stability of the financial market, bring huge losses to the economy, and affect the development of the whole national economy. Therefore, research on the regularity of economic security and economic fluctuations is one of the important contents to ensure economic stability and scientific development. Accurate monitoring and forecasting of economic security are an indispensable link in economic system regulation, and it is also an important reference factor for any economic organization to make decisions. This article focuses on the construction of an economic security early warning system as the main research content. It integrates cloud computing and data mining technologies and is supported by CNN-SVM algorithm and designs an early warning model that can adaptively evaluate and warn the economic security state. Experiments show that when the CNN network in the model uses ReLU activation function and SVM uses RBF function, the prediction accuracy can reach 0.98, and the prediction effect is the best. The data set is verified, and the output Q province's 2018 economic security early warning comprehensive index is 0.893. The 2019 economic security early warning index is 0.829, which is consistent with the actual situation.}, } @article {pmid35720893, year = {2022}, author = {Yin, X and He, J}, title = {Construction of Tourism E-Commerce Platform Based on Artificial Intelligence Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5558011}, pmid = {35720893}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Commerce ; Data Analysis ; Humans ; *Tourism ; Travel ; }, abstract = {In the late twentieth century, with the rapid development of the Internet, e-commerce has emerged rapidly, which has changed the way people travel around the world. The greatest advantages of e-commerce are the flow of information and data and the importance of traveling freely to experience the mind and body in different fields. Tourism is an important part of the development of e-commerce, but the development of e-commerce tourism lags behind. To solve the current situation of the backward development of tourism e-commerce, this article studies the construction of a tourism e-commerce platform based on an artificial intelligence algorithm. By introducing modern information technology, based on a cloud computing platform, big data analysis, K-means, and other key technologies, this article solves the current situation of the development of an e-commerce platform. It also analyzes the construction methods of traditional cloud platforms and modern cloud platforms through comparative analysis and solves the construction methods suitable for artificial intelligence tourism. At the same time, combined with the actual situation of tourism, this article selects the appropriate networking method based on the analysis of the advantages and disadvantages of wired and wireless coverage methods and economics to complete the project design. Its purpose is to ensure that the work meets the specific construction needs and build an artificial intelligence-based smart tourism big data analysis model. It promotes the development of tourism e-commerce industry. It saves costs and improves efficiency for travel service providers. Then, according to the actual situation of tourism, it conducts demand analysis from the perspectives of tourists, scenic spots, service providers, tourism administrative agencies, etc. Experiments show that, through the practical application of the artificial intelligence tourism mobile e-commerce platform in this article, it can be seen that the artificial intelligence tourism mobile e-commerce platform designed in this article can meet the needs of customers for shopping-related tourism commodities. Tourists of attractions have increased by 3.54%, and the economy of tourist destinations has increased by 4.2%.}, } @article {pmid35720617, year = {2022}, author = {Cheng, W and Lian, W and Tian, J}, title = {Building the hospital intelligent twins for all-scenario intelligence health care.}, journal = {Digital health}, volume = {8}, number = {}, pages = {20552076221107894}, pmid = {35720617}, issn = {2055-2076}, abstract = {The COVID-19 pandemic has accelerated a long-term trend of smart hospital development. However, there is no consistent conceptualization of what a smart hospital entails. Few hospitals have genuinely reached being "smart," primarily failing to bring systems together and consider implications from all perspectives. Hospital Intelligent Twins, a new technology integration powered by IoT, AI, cloud computing, and 5G application to create all-scenario intelligence for health care and hospital management. This communication presented a smart hospital for all-scenario intelligence by creating the hospital Intelligent Twins. Intelligent Twins is widely involved in medical activities. However, solving the medical ethics, protecting patient privacy, and reducing security risks involved are significant challenges for all-scenario intelligence applications. This exploration of creating hospital Intelligent Twins that can be a worthwhile endeavor to assess how to inform evidence-based decision-making better and enhance patient satisfaction and outcomes.}, } @article {pmid35713563, year = {2023}, author = {Chen, X and Xue, Y and Sun, Y and Shen, J and Song, S and Zhu, M and Song, Z and Cheng, Z and Zhou, P}, title = {Neuromorphic Photonic Memory Devices Using Ultrafast, Non-Volatile Phase-Change Materials.}, journal = {Advanced materials (Deerfield Beach, Fla.)}, volume = {35}, number = {37}, pages = {e2203909}, doi = {10.1002/adma.202203909}, pmid = {35713563}, issn = {1521-4095}, support = {2020YFA0308800//National Key Research and Development Program of China/ ; 62074042//National Natural Science Foundation of China/ ; 20501130100//Science and Technology Commission of Shanghai Municipality/ ; //Young Scientist Project of MOE Innovation Platform/ ; //Fudan University/ ; }, abstract = {The search for ultrafast photonic memory devices is inspired by the ever-increasing number of cloud-computing, supercomputing, and artificial-intelligence applications, together with the unique advantages of signal processing in the optical domain such as high speed, large bandwidth, and low energy consumption. By embracing silicon photonics with chalcogenide phase-change materials (PCMs), non-volatile integrated photonic memory is developed with promising potential in photonic integrated circuits and nanophotonic applications. While conventional PCMs suffer from slow crystallization speed, scandium-doped antimony telluride (SST) has been recently developed for ultrafast phase-change random-access memory applications. An ultrafast non-volatile photonic memory based on an SST thin film with a 2 ns write/erase speed is demonstrated, which is the fastest write/erase speed ever reported in integrated phase-change photonic devices. SST-based photonic memories exhibit multilevel capabilities and good stability at room temperature. By mapping the memory level to the biological synapse weight, an artificial neural network based on photonic memory devices is successfully established for image classification. Additionally, a reflective nanodisplay application using SST with optoelectronic modulation capabilities is demonstrated. Both the optical and electrical changes in SST during the phase transition and the fast-switching speed demonstrate their potential for use in photonic computing, neuromorphic computing, nanophotonics, and optoelectronic applications.}, } @article {pmid35712069, year = {2022}, author = {Hassan, J and Shehzad, D and Habib, U and Aftab, MU and Ahmad, M and Kuleev, R and Mazzara, M}, title = {The Rise of Cloud Computing: Data Protection, Privacy, and Open Research Challenges-A Systematic Literature Review (SLR).}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8303504}, pmid = {35712069}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; Confidentiality ; Delivery of Health Care ; *Privacy ; }, abstract = {Cloud computing is a long-standing dream of computing as a utility, where users can store their data remotely in the cloud to enjoy on-demand services and high-quality applications from a shared pool of configurable computing resources. Thus, the privacy and security of data are of utmost importance to all of its users regardless of the nature of the data being stored. In cloud computing environments, it is especially critical because data is stored in various locations, even around the world, and users do not have any physical access to their sensitive data. Therefore, we need certain data protection techniques to protect the sensitive data that is outsourced over the cloud. In this paper, we conduct a systematic literature review (SLR) to illustrate all the data protection techniques that protect sensitive data outsourced over cloud storage. Therefore, the main objective of this research is to synthesize, classify, and identify important studies in the field of study. Accordingly, an evidence-based approach is used in this study. Preliminary results are based on answers to four research questions. Out of 493 research articles, 52 studies were selected. 52 papers use different data protection techniques, which can be divided into two main categories, namely noncryptographic techniques and cryptographic techniques. Noncryptographic techniques consist of data splitting, data anonymization, and steganographic techniques, whereas cryptographic techniques consist of encryption, searchable encryption, homomorphic encryption, and signcryption. In this work, we compare all of these techniques in terms of data protection accuracy, overhead, and operations on masked data. Finally, we discuss the future research challenges facing the implementation of these techniques.}, } @article {pmid35712065, year = {2022}, author = {Chen, M}, title = {Integration and Optimization of British and American Literature Information Resources in the Distributed Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4318962}, pmid = {35712065}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Models, Theoretical ; Publications ; United States ; }, abstract = {One of the most effective approaches to improve resource usage efficiency and degree of resource collecting is to integrate resources. Many studies on the integration of information resources are also available. The search engines are the most well-known. At the same time, this article intends to optimize the integration of British and American literature information resources by employing distributed cloud computing, based on the needs of British and American literature. This research develops a model for the dispersed nature of cloud computing. It optimizes the method by fitting the mathematical model of transmission cost and latency. This article analyzes the weaknesses of the current British and American literature information resource integration and optimizes them for the integration of British and American literature resources. The Random algorithm has the longest delay, according to the results of this paper's experiments (maximum user weighted distance). The algorithms NPA-PDP and BWF have longer delays than the algorithm Opt. The percentage decline varies between 0.17 percent and 1.11 percent for different algorithms. It demonstrates that the algorithm presented in this work can be used to integrate and maximize information resources from English and American literature.}, } @article {pmid35707200, year = {2022}, author = {Chen, Y and Zhou, W}, title = {Application of Network Information Technology in Physical Education and Training System under the Background of Big Data.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3081523}, pmid = {35707200}, issn = {1687-5273}, mesh = {Big Data ; Humans ; *Information Technology ; *Physical Education and Training ; Students ; Universities ; }, abstract = {During the last two decades, rapid development in the network technology has been observed, particularly hardware, and the development of software technology has accelerated, resulting in the launch of a variety of novel products with a wide range of applications. Traditional sports training systems, on the other hand, have a single function and a complex operation that cannot be fully implemented in colleges and universities, causing China's sports training to stagnate for a long time. The goal of physical education and training is to teach a specific action to attain its maximum potential in a variety of ways. As a result, we should use the system to collect scientifically sound and trustworthy data to aid relevant staff in completing their training tasks. Therefore, in the context of big data, network information technology has become the main way to improve the physical education system. By applying cloud computing technology, machine vision technology, and 64-bit machine technology to the physical education training system, extract the video data of the physical education system, design the system video teaching process, and complete the construction of three-dimensional human model, so as to analyze the training situation of the trainers. In this paper, 30 basketball majors in a university are selected as the professional group and 30 computer majors as the control group. The average reaction time, scores, and expert scores of the two groups are analyzed. The results show that the test of the professional group is significantly higher than that of the amateur group. At the same time, the feedback results of students using physical education and training system and normal physical education teaching and training are compared and analyzed. One week later, the students trained by the physical education system have improved their thinking ability, movement accuracy, and judgment ability, indicating that the application of the physical education training system to the actual effect is ideal.}, } @article {pmid35700763, year = {2022}, author = {Cheah, CG and Chia, WY and Lai, SF and Chew, KW and Chia, SR and Show, PL}, title = {Innovation designs of industry 4.0 based solid waste management: Machinery and digital circular economy.}, journal = {Environmental research}, volume = {213}, number = {}, pages = {113619}, doi = {10.1016/j.envres.2022.113619}, pmid = {35700763}, issn = {1096-0953}, mesh = {Artificial Intelligence ; Humans ; Industry ; Machine Learning ; *Solid Waste/analysis ; *Waste Management ; }, abstract = {The Industrial Revolution 4.0 (IR 4.0) holds the opportunity to improve the efficiency of managing solid waste through digital and machinery applications, effectively eliminating, recovering, and repurposing waste. This research aims to discover and review the potential of current technologies encompassing innovative Industry 4.0 designs for solid waste management. Machinery and processes emphasizing on circular economy were summarized and evaluated. The application of IR 4.0 technologies shows promising opportunities in improving the management and efficiency in view of solid waste. Machine learning (ML), artificial intelligence (AI), and image recognition can be used to automate the segregation of waste, reducing the risk of exposing labour workers to harmful waste. Radio Frequency Identification (RFID) and wireless communications enable the traceability in materials to better understand the opportunities in circular economy. Additionally, the interconnectivity of systems and automatic transfer of data enable the creation of more complex system that houses a larger solution space that was previously not possible such as centralised cloud computing to reduce the cost by eliminating the need for individual computing systems. Through this comprehensive review-based work, innovative Industry 4.0 components of machinery and processes involving waste management which focuses on circular economy are identified with the critical ones evaluated briefly. It was found that the current research and work done is based on applying Industry 4.0 technologies on individual waste management systems, which lacks the coherency needed to capitalise on technologies such as cloud computing, interconnectivity, big data, etc on a larger scale. Therefore, a real world comprehensive end-to-end integration aimed to optimize every process within the solid waste management chain should be explored.}, } @article {pmid35693529, year = {2022}, author = {Zhao, Y and Du, D}, title = {Research Orientation and Development of Social Psychology's Concept of Justice in the Era of Cloud Computing.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {902780}, pmid = {35693529}, issn = {1664-1078}, abstract = {With the maturity and rapid expansion of social psychology, great progress has been made in the integration of social psychology with other disciplines. From the very beginning, social psychology is destined to have a diversified and multidisciplinary research orientation and disciplinary nature, which also makes it difficult for social psychology to be defined in a single disciplinary field and a single research method. With the rapid development of the Internet, the emergence of cloud computing technology not only facilitates the orientation of psychological research, but also promotes the emergence and development of some new psychological disciplines. Therefore, the purpose of this paper is to study the orientation of social psychology and its current development in the context of cloud computing era. This paper collects, organizes, and integrates the research data of college students' view of justice from the perspective of social psychology through cloud computing technology, and uses empirical research methods to conduct in-depth research on people's view of justice in social psychology. This paper collects the data reports of college students on social justice issues through cloud computing technology to make the results more accurate. The experimental results show that nearly 70% of college students pay more attention to social justice issues. This data clearly reflects the optimistic trend of people's attention to justice issues in social psychology.}, } @article {pmid35687631, year = {2023}, author = {Chu, Z and Guo, J and Guo, J}, title = {Up-Conversion Luminescence System for Quantitative Detection of IL-6.}, journal = {IEEE transactions on nanobioscience}, volume = {22}, number = {2}, pages = {203-211}, doi = {10.1109/TNB.2022.3178754}, pmid = {35687631}, issn = {1558-2639}, mesh = {Humans ; Algorithms ; *COVID-19/diagnosis ; *Interleukin-6 ; Luminescence ; Software ; }, abstract = {Interleukin-6 (IL-6) is a very important cytokine and an early predictor of survival in febrile patients (eg, patients with COVID-19). With the outbreak of the COVID-19 in the world, the significance of medical detection of interleukin 6 has gradually become prominent. A method to point-of-care(POCT) diagnosis and monitoring of IL-6 levels in patients is urgently needed. In this work, an up-conversion luminescence system (ULS) based on upconverting nanoparticles (UCNs) for quantitative detection of IL-6 was designed. The ULS consists of Micro Controller Units (MCU), transmission device, laser, image acquisition module, Bluetooth module, etc. Through hardware system acquisition and image software algorithm processing, we obtain a limit of detection (LOD) of IL-6 at 1 ng/mL, and the quantitative range is from 1 to 200 ng/mL. The system is handheld and has great detection accuracy. The detection time is 10 minutes. In addition, the system can access mobile device terminals (smartphones, personal computers, etc.) or 5G cloud servers via Bluetooth and WIFI. Patients and family members can view medical data through mobile terminals, and the data stored in the 5G cloud server can be used for edge computing and big data analysis. It is suitable for the early diagnosis of infectious diseases such as COVID-19 and has good application prospects.}, } @article {pmid35687417, year = {2022}, author = {Ito, H and Nakamura, Y and Takanari, K and Oishi, M and Matsuo, K and Kanbe, M and Uchibori, T and Ebisawa, K and Kamei, Y}, title = {Development of a Novel Scar Screening System with Machine Learning.}, journal = {Plastic and reconstructive surgery}, volume = {150}, number = {2}, pages = {465e-472e}, doi = {10.1097/PRS.0000000000009312}, pmid = {35687417}, issn = {1529-4242}, mesh = {Algorithms ; *Cicatrix, Hypertrophic/diagnosis/etiology ; Humans ; *Keloid/drug therapy ; Machine Learning ; }, abstract = {BACKGROUND: Hypertrophic scars and keloids tend to cause serious functional and cosmetic impediments to patients. As these scars are not life threatening, many patients do not seek proper treatment. Thus, educating physicians and patients regarding these scars is important. The authors aimed to develop an algorithm for a scar screening system and compare the accuracy of the system with that of physicians. This algorithm was designed to involve health care providers and patients.

METHODS: Digital images were obtained from Google Images (Google LLC, Mountain View, Calif.), open access repositories, and patients in the authors' hospital. After preprocessing, 3768 images were uploaded to the Google Cloud AutoML Vision platform and labeled with one of the four diagnoses: immature scars, mature scars, hypertrophic scars, and keloid. A consensus label for each image was compared with the label provided by physicians.

RESULTS: For all diagnoses, the average precision (positive predictive value) of the algorithm was 80.7 percent, the average recall (sensitivity) was 71 percent, and the area under the curve was 0.846. The algorithm afforded 77 correct diagnoses with an accuracy of 77 percent. Conversely, the average physician accuracy was 68.7 percent. The Cohen kappa coefficient of the algorithm was 0.69, while that of the physicians was 0.59.

CONCLUSIONS: The authors developed a computer vision algorithm that can diagnose four scar types using automated machine learning. Future iterations of this algorithm, with more comprehensive accuracy, can be embedded in telehealth and digital imaging platforms used by patients and primary doctors. The scar screening system with machine learning may be a valuable support tool for physicians and patients.

Diagnostic, II.}, } @article {pmid35684889, year = {2022}, author = {Hanzelik, PP and Kummer, A and Abonyi, J}, title = {Edge-Computing and Machine-Learning-Based Framework for Software Sensor Development.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684889}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Machine Learning ; Software ; }, abstract = {The present research presents a framework that supports the development and operation of machine-learning (ML) algorithms to develop, maintain and manage the whole lifecycle of modeling software sensors related to complex chemical processes. Our motivation is to take advantage of ML and edge computing and offer innovative solutions to the chemical industry for difficult-to-measure laboratory variables. The purpose of software sensor models is to continuously forecast the quality of products to achieve effective quality control, maintain the stable production condition of plants, and support efficient, environmentally friendly, and harmless laboratory work. As a result of the literature review, quite a few ML models have been developed in recent years that support the quality assurance of different types of materials. However, the problems of continuous operation, maintenance and version control of these models have not yet been solved. The method uses ML algorithms and takes advantage of cloud services in an enterprise environment. Industrial 4.0 devices such as the Internet of Things (IoT), edge computing, cloud computing, ML, and artificial intelligence (AI) are core techniques. The article outlines an information system structure and the related methodology based on data from a quality-assurance laboratory. During the development, we encountered several challenges resulting from the continuous development of ML models and the tuning of their parameters. The article discusses the development, version control, validation, lifecycle, and maintenance of ML models and a case study. The developed framework can continuously monitor the performance of the models and increase the amount of data that make up the models. As a result, the most accurate, data-driven and up-to-date models are always available to quality-assurance engineers with this solution.}, } @article {pmid35684844, year = {2022}, author = {Lin, HY and Tsai, TT and Ting, PY and Chen, CC}, title = {An Improved ID-Based Data Storage Scheme for Fog-Enabled IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684844}, issn = {1424-8220}, abstract = {In a fog-enabled IoT environment, a fog node is regarded as the proxy between end users and cloud servers to reduce the latency of data transmission, so as to fulfill the requirement of more real-time applications. A data storage scheme utilizing fog computing architecture allows a user to share cloud data with other users via the assistance of fog nodes. In particular, a fog node obtaining a re-encryption key of the data owner is able to convert a cloud ciphertext into the one which is decryptable by another designated user. In such a scheme, a proxy should not learn any information about the plaintext during the transmission and re-encryption processes. In 2020, an ID-based data storage scheme utilizing anonymous key generation in fog computing was proposed by some researchers. Although their protocol is provably secure in a proof model of random oracles, we will point out that there are some security flaws inherited in their protocol. On the basis of their work, we further present an improved variant, which not only eliminates their security weaknesses, but also preserves the functionalities of anonymous key generation and user revocation mechanism. Additionally, under the Decisional Bilinear Diffie-Hellman (DBDH) assumption, we demonstrate that our enhanced construction is also provably secure in the security notion of IND-PrID-CPA.}, } @article {pmid35684754, year = {2022}, author = {Bhatia, S and Alsuwailam, RI and Roy, DG and Mashat, A}, title = {Improved Multimedia Object Processing for the Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684754}, issn = {1424-8220}, support = {AN000533//King Faisal University/ ; }, mesh = {Algorithms ; Automation ; *Internet of Things ; *Multimedia ; }, abstract = {The combination of edge computing and deep learning helps make intelligent edge devices that can make several conditional decisions using comparatively secured and fast machine learning algorithms. An automated car that acts as the data-source node of an intelligent Internet of vehicles or IoV system is one of these examples. Our motivation is to obtain more accurate and rapid object detection using the intelligent cameras of a smart car. The competent supervision camera of the smart automobile model utilizes multimedia data for real-time automation in real-time threat detection. The corresponding comprehensive network combines cooperative multimedia data processing, Internet of Things (IoT) fact handling, validation, computation, precise detection, and decision making. These actions confront real-time delays during data offloading to the cloud and synchronizing with the other nodes. The proposed model follows a cooperative machine learning technique, distributes the computational load by slicing real-time object data among analogous intelligent Internet of Things nodes, and parallel vision processing between connective edge clusters. As a result, the system increases the computational rate and improves accuracy through responsible resource utilization and active-passive learning. We achieved low latency and higher accuracy for object identification through real-time multimedia data objectification.}, } @article {pmid35684631, year = {2022}, author = {Jiao, Z and Zhou, F and Wang, Q and Sun, J}, title = {RPVC: A Revocable Publicly Verifiable Computation Solution for Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {11}, pages = {}, pmid = {35684631}, issn = {1424-8220}, support = {62072090, 62173101, and 61902057//National Natural Science Foundation of China/ ; }, mesh = {Algorithms ; *Cloud Computing ; }, abstract = {With publicly verifiable computation (PVC) development, users with limited resources prefer to outsource computing tasks to cloud servers. However, existing PVC schemes are mainly proposed for cloud computing scenarios, which brings bandwidth consumption or network delay of IoT devices in edge computing. In addition, dishonest edge servers may reduce resource utilization by returning unreliable results. Therefore, we propose a revocable publicly verifiable computation(RPVC) scheme for edge computing. On the one hand, RPVC ensures that users can verify the correct results at a small cost. On the other hand, it can revoke the computing abilities of dishonest edge servers. First, polynomial commitments are employed to reduce proofs' length and generation speed. Then, we improve revocable group signature by knowledge signatures and subset covering theory. This makes it possible to revoke dishonest edge servers. Finally, theoretical analysis proves that RPVC has correctness and security, and experiments evaluate the efficiency of RPVC.}, } @article {pmid35677770, year = {2022}, author = {Loo, WK and Hasikin, K and Suhaimi, A and Yee, PL and Teo, K and Xia, K and Qian, P and Jiang, Y and Zhang, Y and Dhanalakshmi, S and Azizan, MM and Lai, KW}, title = {Systematic Review on COVID-19 Readmission and Risk Factors: Future of Machine Learning in COVID-19 Readmission Studies.}, journal = {Frontiers in public health}, volume = {10}, number = {}, pages = {898254}, pmid = {35677770}, issn = {2296-2565}, mesh = {*COVID-19/epidemiology ; Humans ; Logistic Models ; Machine Learning ; *Patient Readmission ; Risk Factors ; United States ; }, abstract = {In this review, current studies on hospital readmission due to infection of COVID-19 were discussed, compared, and further evaluated in order to understand the current trends and progress in mitigation of hospital readmissions due to COVID-19. Boolean expression of ("COVID-19" OR "covid19" OR "covid" OR "coronavirus" OR "Sars-CoV-2") AND ("readmission" OR "re-admission" OR "rehospitalization" OR "rehospitalization") were used in five databases, namely Web of Science, Medline, Science Direct, Google Scholar and Scopus. From the search, a total of 253 articles were screened down to 26 articles. In overall, most of the research focus on readmission rates than mortality rate. On the readmission rate, the lowest is 4.2% by Ramos-Martínez et al. from Spain, and the highest is 19.9% by Donnelly et al. from the United States. Most of the research (n = 13) uses an inferential statistical approach in their studies, while only one uses a machine learning approach. The data size ranges from 79 to 126,137. However, there is no specific guide to set the most suitable data size for one research, and all results cannot be compared in terms of accuracy, as all research is regional studies and do not involve data from the multi region. The logistic regression is prevalent in the research on risk factors of readmission post-COVID-19 admission, despite each of the research coming out with different outcomes. From the word cloud, age is the most dominant risk factor of readmission, followed by diabetes, high length of stay, COPD, CKD, liver disease, metastatic disease, and CAD. A few future research directions has been proposed, including the utilization of machine learning in statistical analysis, investigation on dominant risk factors, experimental design on interventions to curb dominant risk factors and increase the scale of data collection from single centered to multi centered.}, } @article {pmid35677629, year = {2022}, author = {Ghosh, S and Mukherjee, A}, title = {STROVE: spatial data infrastructure enabled cloud-fog-edge computing framework for combating COVID-19 pandemic.}, journal = {Innovations in systems and software engineering}, volume = {}, number = {}, pages = {1-17}, pmid = {35677629}, issn = {1614-5046}, abstract = {The outbreak of 2019 novel coronavirus (COVID-19) has triggered unprecedented challenges and put the whole world in a parlous condition. The impacts of COVID-19 is a matter of grave concern in terms of fatality rate, socio-economical condition, health infrastructure. It is obvious that only pharmaceutical solutions (vaccine) cannot eradicate this pandemic completely, and effective strategies regarding lockdown measures, restricted mobility, emergency services to users-in brief data-driven decision system is of utmost importance. This necessitates an efficient data analytics framework, data infrastructure to store, manage pandemic related information, and distributed computing platform to support such data-driven operations. In the past few decades, Internet of Things-based devices and applications have emerged significantly in various sectors including healthcare and time-critical applications. To be specific, health-sensors help to accumulate health-related parameters at different time-instances of a day, the movement sensors keep track of mobility traces of the user, and helps to assist them in varied conditions. The smartphones are equipped with several such sensors and the ability of low-cost connected sensors to cover large areas makes it the most useful component to combat pandemics such as COVID-19. However, analysing and managing the huge amount of data generated by these sensors is a big challenge. In this paper we have proposed a unified framework which has three major components: (i) Spatial Data Infrastructure to manage, store, analyse and share spatio-temporal information with stakeholders efficiently, (ii) Cloud-Fog-Edge-based hierarchical architecture to support preliminary diagnosis, monitoring patients' mobility, health parameters and activities while they are in quarantine or home-based treatment, and (iii) Assisting users in varied emergency situation leveraging efficient data-driven techniques at low-latency and energy consumption. The mobility data analytics along with SDI is required to interpret the movement dynamics of the region and correlate with COVID-19 hotspots. Further, Cloud-Fog-Edge-based system architecture is required to provision healthcare services efficiently and in timely manner. The proposed framework yields encouraging results in taking decisions based on the COVID-19 context and assisting users effectively by enhancing accuracy of detecting suspected infected people by ∼ 24% and reducing delay by ∼ 55% compared to cloud-only system.}, } @article {pmid35677197, year = {2022}, author = {Zhang, Y and Zhao, H and Peng, D}, title = {Exploration and Research on Smart Sports Classrooms in Colleges in the Information Age.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {2970496}, pmid = {35677197}, issn = {1176-2322}, abstract = {Smart classrooms, made possible by the growing use of Internet information technology in the sphere of education, as one of the important foundations for the realization of smart education, have become the current hot direction of the development of educational information innovation and intend to propose some ideas and directions for smart sports teaching research in IA colleges and universities. The smart classroom is an intelligent and efficient classroom created by the "Internet +" way of thinking and the new generation of information technologies such as big data and cloud computing. This article puts forward the exploratory research methods of smart sports classrooms in colleges and universities in the IA, methods, such as document retrieval, expert interviews, questionnaire surveys, and practical research, and field investigation method, which are used in the exploration and research of college smart sports classrooms in the IA experiment. According to the findings of this study, 96.34 percent of students have a positive attitude toward the smart sports classroom teaching model, which is favorable to the growth of smart sports classroom teaching.}, } @article {pmid35676964, year = {2022}, author = {Nair, R and Zafrullah, SN and Vinayasree, P and Singh, P and Zahra, MMA and Sharma, T and Ahmadi, F}, title = {Blockchain-Based Decentralized Cloud Solutions for Data Transfer.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8209854}, pmid = {35676964}, issn = {1687-5273}, mesh = {*Blockchain ; Cloud Computing ; Information Storage and Retrieval ; }, abstract = {Cloud computing has increased its service area and user experience above traditional platforms through virtualization and resource integration, resulting in substantial economic and societal advantages. Cloud computing is experiencing a significant security and trust dilemma, requiring a trust-enabled transaction environment. The typical cloud trust model is centralized, resulting in high maintenance costs, network congestion, and even single-point failure. Also, due to a lack of openness and traceability, trust rating findings are not universally acknowledged. "Blockchain is a novel, decentralised computing system. Its unique operational principles and record traceability assure the transaction data's integrity, undeniability, and security. So, blockchain is ideal for building a distributed and decentralised trust infrastructure. This study addresses the difficulty of transferring data and related permission policies from the cloud to the distributed file systems (DFS). Our aims include moving the data files from the cloud to the distributed file system and developing a cloud policy. This study addresses the difficulty of transferring data and related permission policies from the cloud to the DFS. In DFS, no node is given the privilege, and storage of all the data is dependent on content-addressing. The data files are moved from Amazon S3 buckets to the interplanetary file system (IPFS). In DFS, no node is given the privilege, and storage of all the data is dependent on content-addressing.}, } @article {pmid35673063, year = {2022}, author = {Anderson, B and Cameron, J and Jefferson, U and Reeder, B}, title = {Designing a Cloud-Based System for Affordable Cyberinfrastructure to Support Software-Based Research.}, journal = {Studies in health technology and informatics}, volume = {290}, number = {}, pages = {489-493}, doi = {10.3233/SHTI220124}, pmid = {35673063}, issn = {1879-8365}, mesh = {*Cloud Computing ; Research ; *Software ; }, abstract = {Interest in cloud-based cyberinfrastructure among higher-education institutions is growing rapidly, driven by needs to realize cost savings and access enhanced computing resources. Through a nonprofit entity, we have created a platform that provides hosting and software support services enabling researchers to responsibly build on cloud technologies. However, there are technical, logistic, and administrative challenges if this platform is to support all types of research. Software-enhanced research is distinctly different from industry applications, typically characterized by needs for lower reduced availability, greater flexibility, and fewer resources for upkeep costs. We describe a swarm environment specifically designed for research in academic settings and our experience developing an operating model for sustainable cyberinfrastructure. We also present three case studies illustrating the types of applications supported by the cyberinfrastructure and explore techniques that address specific application needs. Our findings demonstrate safer, faster, cheaper cloud services by recognizing the intrinsic properties of academic research environments.}, } @article {pmid35673000, year = {2022}, author = {Ruokolainen, J and Haladijan, J and Juutinen, M and Puustinen, J and Holm, A and Vehkaoja, A and Nieminen, H}, title = {Mobilemicroservices Architecture for Remote Monitoring of Patients: A Feasibility Study.}, journal = {Studies in health technology and informatics}, volume = {290}, number = {}, pages = {200-204}, doi = {10.3233/SHTI220061}, pmid = {35673000}, issn = {1879-8365}, mesh = {Cloud Computing ; Delivery of Health Care ; Feasibility Studies ; Humans ; Monitoring, Physiologic ; *Telemedicine ; }, abstract = {Recent developments in smart mobile devices (SMDs), wearable sensors, the Internet, mobile networks, and computing power provide new healthcare opportunities that are not restricted geographically. This paper aims to introduce Mobilemicroservices Architecture (MMA) based on a study on architectures. In MMA, an HTTP-based Mobilemicroservivce (MM) is allocated to each SMD's sensor. The key benefits are extendibility, scalability, ease of use for the patient, security, and the possibility to collect raw data without the necessity to involve cloud services. Feasibility was investigated in a two-year project, where MMA-based solutions were used to collect motor function data from patients with Parkinson's disease. First, we collected motor function data from 98 patients and healthy controls during their visit to a clinic. Second, we monitored the same subjects in real-time for three days in their everyday living environment. These MMA applications represent HTTP-based business-logic computing in which the SMDs' resources are accessible globally.}, } @article {pmid35669983, year = {2022}, author = {Khan, NJ and Ahamad, G and Naseem, M}, title = {An IoT/FOG based framework for sports talent identification in COVID-19 like situations.}, journal = {International journal of information technology : an official journal of Bharati Vidyapeeth's Institute of Computer Applications and Management}, volume = {14}, number = {5}, pages = {2513-2521}, pmid = {35669983}, issn = {2511-2112}, abstract = {COVID-19 crippled all the domains of our society. The inevitable lockdowns and social distancing procedures have hit the process of traditional sports talent identification (TiD) severely. This will interrupt the career-excellency of athletes and will also affect the future talent in the years to come. We explore the effect of COVID-19 on sports talent identification and propose an IoT/Fog-based framework for theTiD process during COVID-19 and COVID-like situations. Our proposed novel six-layer model facilitates the sports talent identification remotely using the various latest Information and Communication Technologies like IoT, fog and cloud computing. All the stakeholders like experts, coaches, players, institutes etc. are taken into consideration. The framework is mobile, widely accessible, scalable, cost-effective, secure, platform/location independent and fast. A brief case study of cricket talent identification using the proposed framework is also provided.}, } @article {pmid35669659, year = {2022}, author = {Li, K}, title = {Application of Artificial Intelligence System Based on Wireless Sensor Network in Enterprise Management.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2169521}, pmid = {35669659}, issn = {1687-5273}, mesh = {Artificial Intelligence ; *Computer Communication Networks ; Remote Sensing Technology ; Technology ; *Wireless Technology ; }, abstract = {With the improvement of the ability to acquire natural information, wireless sensor networks also need to transmit corresponding information in terms of collecting information. Wireless sensor nodes have great application prospects as a key component of wireless sensors. Therefore, different wireless sensors play an important decisive role in the operation of wireless network applications. With the continuous development of wireless sensor networks, existing wireless sensor network nodes exhibit limitations and shortcomings such as inflexible structure, low variability, and low versatility. Specifically, the learning and neural networks obtained by different artificial intelligence expert systems in computing technology are different. On the one hand, it can meet the needs of users for information systems to a certain extent, and on the other hand, it can also help accelerate the development of computer science. At present, the new generation of information technology industry is listed in the seven emerging strategic industries of the country. The new cloud computing technology has gradually expanded to important corporate governance capabilities in terms of information technology. The intelligent application of cloud computing technology replaces traditional enterprise management technology. Efficiency management and risk management can improve the quality and business capabilities of the entire enterprise, improve system applications according to the actual situation of the enterprise, improve system applications, and implement health and the sustainable development of the enterprise, thereby promoting the sustainable development of the computer technology industry.}, } @article {pmid35669657, year = {2022}, author = {Yang, M and Gao, C and Han, J}, title = {Edge Computing Deployment Algorithm and Sports Training Data Mining Based on Software Defined Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8056360}, pmid = {35669657}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Data Mining ; Software ; Technology ; }, abstract = {The wireless sensor network collects data from various areas through specific network nodes and uploads it to the decision-making layer for analysis and processing. Therefore, it has become a perception network of the Internet of Things and has made great achievements in monitoring and prevention at this stage. At this stage, the main problem is the motive power of sensor nodes, so the energy storage and transmission of wireless sensor network is imminent. Mobile edge computing technology provides a new type of technology for today's edge networks, enabling it to process resource-intensive data blocks and feedback to managers in time. It is a new starting point for cloud computing services, compared to traditional cloud computing services. The transmission speed is more efficient and will be widely used in various industries and serve them in the future. Among them, education and related industries urgently need in-depth information, which in turn promotes the rapid development of data mining by sensor networks. This article focuses on data mining technology, mainly expounds the meaning and main mining methods of data mining technology, and conducts data mining on sports training requirements from the aspects of demand collection and analysis, algorithm design and optimization, demand results and realization, etc. Monitor the training status and give the trainer reasonable suggestions. Through the processing of the training data mining results and proofreading the database standardized training data, we can formulate a personalized program suitable for sportsmen, reduce sports injuries caused by no trainer's guidance, and open new doors for training modes. Therefore, this paper studies the sensor network technology, edge computing deployment algorithm, and sports training data mining.}, } @article {pmid35668959, year = {2022}, author = {Zhong, M and Ali, M and Faqir, K and Begum, S and Haider, B and Shahzad, K and Nosheen, N}, title = {China Pakistan Economic Corridor Digital Transformation.}, journal = {Frontiers in psychology}, volume = {13}, number = {}, pages = {887848}, pmid = {35668959}, issn = {1664-1078}, abstract = {The China-Pakistan Economic Corridor (CPEC) vision and mission are to improve the people's living standards of Pakistan and China through bilateral investments, trade, cultural exchanges, and economic activities. To achieve this envisioned dream, Pakistan established the China-Pakistan Economic Corridor Authority (CPECA) to further its completion, but Covid-19 slowed it down. This situation compelled the digitalization of CPEC. This article reviews the best practices and success stories of various digitalization and e-governance programs and, in this light, advises the implementation of the Ajman Digital Governance (ADG) model as a theoretical framework for CPEC digitalization. This article concludes that the Pakistani government needs to transform CPEC digitalization by setting up the CPEC Digitalization and Transformation Center (DTC) at the CPECA office to attract more investors and businesses.}, } @article {pmid35668732, year = {2023}, author = {Butt, UA and Amin, R and Aldabbas, H and Mohan, S and Alouffi, B and Ahmadian, A}, title = {Cloud-based email phishing attack using machine and deep learning algorithm.}, journal = {Complex & intelligent systems}, volume = {9}, number = {3}, pages = {3043-3070}, pmid = {35668732}, issn = {2198-6053}, abstract = {Cloud computing refers to the on-demand availability of personal computer system assets, specifically data storage and processing power, without the client's input. Emails are commonly used to send and receive data for individuals or groups. Financial data, credit reports, and other sensitive data are often sent via the Internet. Phishing is a fraudster's technique used to get sensitive data from users by seeming to come from trusted sources. The sender can persuade you to give secret data by misdirecting in a phished email. The main problem is email phishing attacks while sending and receiving the email. The attacker sends spam data using email and receives your data when you open and read the email. In recent years, it has been a big problem for everyone. This paper uses different legitimate and phishing data sizes, detects new emails, and uses different features and algorithms for classification. A modified dataset is created after measuring the existing approaches. We created a feature extracted comma-separated values (CSV) file and label file, applied the support vector machine (SVM), Naive Bayes (NB), and long short-term memory (LSTM) algorithm. This experimentation considers the recognition of a phished email as a classification issue. According to the comparison and implementation, SVM, NB and LSTM performance is better and more accurate to detect email phishing attacks. The classification of email attacks using SVM, NB, and LSTM classifiers achieve the highest accuracy of 99.62%, 97% and 98%, respectively.}, } @article {pmid35665291, year = {2022}, author = {Kumar, RR and Tomar, A and Shameem, M and Alam, MN}, title = {OPTCLOUD: An Optimal Cloud Service Selection Framework Using QoS Correlation Lens.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {2019485}, pmid = {35665291}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {Cloud computing has grown as a computing paradigm in the last few years. Due to the explosive increase in the number of cloud services, QoS (quality of service) becomes an important factor in service filtering. Moreover, it becomes a nontrivial problem when comparing the functionality of cloud services with different performance metrics. Therefore, optimal cloud service selection is quite challenging and extremely important for users. In the existing approaches of cloud service selection, the user's preferences are offered by the user in a quantitative form. With fuzziness and subjectivity, it is a hurdle task for users to express clear preferences. Moreover, many QoS attributes are not independent but interrelated; therefore, the existing weighted summation method cannot accommodate correlations among QoS attributes and produces inaccurate results. To resolve this problem, we propose a cloud service framework that takes the user's preferences and chooses the optimal cloud service based on the user's QoS constraints. We propose a cloud service selection algorithm, based on principal component analysis (PCA) and the best-worst method (BWM), which eliminates the correlations between QoS and provides the best cloud services with the best QoS values for users. In the end, a numerical example is shown to validate the effectiveness and feasibility of the proposed methodology.}, } @article {pmid35655579, year = {2022}, author = {Ma, S and Liu, ZP}, title = {Machine learning potential era of zeolite simulation.}, journal = {Chemical science}, volume = {13}, number = {18}, pages = {5055-5068}, pmid = {35655579}, issn = {2041-6520}, abstract = {Zeolites, owing to their great variety and complexity in structure and wide applications in chemistry, have long been the hot topic in chemical research. This perspective first presents a short retrospect of theoretical investigations on zeolites using the tools from classical force fields to quantum mechanics calculations and to the latest machine learning (ML) potential simulations. ML potentials as the next-generation technique for atomic simulation open new avenues to simulate and interpret zeolite systems and thus hold great promise for finally predicting the structure-functionality relation of zeolites. Recent advances using ML potentials are then summarized from two main aspects: the origin of zeolite stability and the mechanism of zeolite-related catalytic reactions. We also discussed the possible scenarios of ML potential application aiming to provide instantaneous and easy access of zeolite properties. These advanced applications could now be accomplished by combining cloud-computing-based techniques with ML potential-based atomic simulations. The future development of ML potentials for zeolites in the respects of improving the calculation accuracy, expanding the application scope and constructing the zeolite-related datasets is finally outlooked.}, } @article {pmid35651671, year = {2022}, author = {Francini, S and Chirici, G}, title = {A Sentinel-2 derived dataset of forest disturbances occurred in Italy between 2017 and 2020.}, journal = {Data in brief}, volume = {42}, number = {}, pages = {108297}, pmid = {35651671}, issn = {2352-3409}, abstract = {Forests absorb 30% of human emissions associated with fossil fuel burning. For this reason, forest disturbances monitoring is needed for assessing greenhouse gas balance. However, in several countries, the information regarding the spatio-temporal distribution of forest disturbances is missing. Remote sensing data and the new Sentinel-2 satellite missions, in particular, represent a game-changer in this topic. Here we provide a spatially explicit dataset (10-meters resolution) of Italian forest disturbances and magnitude from 2017 to 2020 constructed using Sentinel-2 level-1C imagery and exploiting the Google Earth Engine GEE implementation of the 3I3D algorithm. For each year between 2017 and 2020, we provide three datasets: (i) a magnitude of the change map (between 0 and 255), (ii) a categorical map of forest disturbances, and (iii) a categorical map obtained by stratification of the previous maps that can be used to estimate the areas of several different forest disturbances. The data we provide represent the state-of-the-art for Mediterranean ecosystems in terms of omission and commission errors, they support greenhouse gas balance, forest sustainability assessment, and decision-makers forest managing, they help forest companies to monitor forest harvestings activity over space and time, and, supported by reference data, can be used to obtain the national estimates of forest harvestings and disturbances that Italy is called upon to provide.}, } @article {pmid35649841, year = {2022}, author = {Sakshuwong, S and Weir, H and Raucci, U and Martínez, TJ}, title = {Bringing chemical structures to life with augmented reality, machine learning, and quantum chemistry.}, journal = {The Journal of chemical physics}, volume = {156}, number = {20}, pages = {204801}, doi = {10.1063/5.0090482}, pmid = {35649841}, issn = {1089-7690}, mesh = {*Augmented Reality ; Machine Learning ; Molecular Conformation ; }, abstract = {Visualizing 3D molecular structures is crucial to understanding and predicting their chemical behavior. However, static 2D hand-drawn skeletal structures remain the preferred method of chemical communication. Here, we combine cutting-edge technologies in augmented reality (AR), machine learning, and computational chemistry to develop MolAR, an open-source mobile application for visualizing molecules in AR directly from their hand-drawn chemical structures. Users can also visualize any molecule or protein directly from its name or protein data bank ID and compute chemical properties in real time via quantum chemistry cloud computing. MolAR provides an easily accessible platform for the scientific community to visualize and interact with 3D molecular structures in an immersive and engaging way.}, } @article {pmid35646109, year = {2021}, author = {Sauber, AM and El-Kafrawy, PM and Shawish, AF and Amin, MA and Hagag, IM}, title = {A New Secure Model for Data Protection over Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {8113253}, pmid = {35646109}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; *Confidentiality ; Information Storage and Retrieval ; }, abstract = {The main goal of any data storage model on the cloud is accessing data in an easy way without risking its security. A security consideration is a major aspect in any cloud data storage model to provide safety and efficiency. In this paper, we propose a secure data protection model over the cloud. The proposed model presents a solution to some security issues of cloud such as data protection from any violations and protection from a fake authorized identity user, which adversely affects the security of the cloud. This paper includes multiple issues and challenges with cloud computing that impairs security and privacy of data. It presents the threats and attacks that affect data residing in the cloud. Our proposed model provides the benefits and effectiveness of security in cloud computing such as enhancement of the encryption of data in the cloud. It provides security and scalability of data sharing for users on the cloud computing. Our model achieves the security functions over cloud computing such as identification and authentication, authorization, and encryption. Also, this model protects the system from any fake data owner who enters malicious information that may destroy the main goal of cloud services. We develop the one-time password (OTP) as a logging technique and uploading technique to protect users and data owners from any fake unauthorized access to the cloud. We implement our model using a simulation of the model called Next Generation Secure Cloud Server (NG-Cloud). These results increase the security protection techniques for end user and data owner from fake user and fake data owner in the cloud.}, } @article {pmid35645427, year = {2022}, author = {Algani, YMA and Boopalan, K and Elangovan, G and Santosh, DT and Chanthirasekaran, K and Patra, I and Pughazendi, N and Kiranbala, B and Nikitha, R and Saranya, M}, title = {Autonomous service for managing real time notification in detection of COVID-19 virus.}, journal = {Computers & electrical engineering : an international journal}, volume = {101}, number = {}, pages = {108117}, pmid = {35645427}, issn = {0045-7906}, abstract = {In today's world, the most prominent public issue in the field of medicine is the rapid spread of viral sickness. The seriousness of the disease lies in its fast spreading nature. The main aim of the study is the proposal of a framework for the earlier detection and forecasting of the COVID-19 virus infection amongst the people to avoid the spread of the disease across the world by undertaking the precautionary measures. According to this framework, there are four stages for the proposed work. This includes the collection of necessary data followed by the classification of the collected information which is then taken in the process of mining and extraction and eventually ending with the process of decision modelling. Since the frequency of the infection is very often a prescient one, the probabilistic examination is measured as a degree of membership characterised by the fever measure related to the same. The predictions are thereby realised using the temporal RNN. The model finally provides effective outcomes in the efficiency of classification, reliability, the prediction viability etc.}, } @article {pmid35639724, year = {2022}, author = {Wang, C and Zhang, M}, title = {The road to change: Broadband China strategy and enterprise digitization.}, journal = {PloS one}, volume = {17}, number = {5}, pages = {e0269133}, pmid = {35639724}, issn = {1932-6203}, mesh = {*Artificial Intelligence ; China ; Cloud Computing ; Commerce ; *Organizations ; }, abstract = {The digitization of a company necessitates not only the effort of the company but also state backing of network infrastructure. In this study, we applied the difference-in-differences method to examine the impact of the Broadband China Strategy on corporate digitalization and its heterogeneity using the data from Chinese listed firms from 2010 to 2020. The results show that the improvement in network infrastructure plays a vital role in promoting company digitization; this improvement is extremely varied due to variances in market demand and endowments. Non-state-owned firms, businesses in the eastern area, and technology-intensive businesses have profited the most. Among the five types of digitization, artificial intelligence and cloud computing are top priorities for enterprises. Our findings add to the literature on the spillover effects of broadband construction and the factors affecting enterprise digitalization.}, } @article {pmid35637932, year = {2022}, author = {Martín, A and Camacho, D}, title = {Recent advances on effective and efficient deep learning-based solutions.}, journal = {Neural computing & applications}, volume = {34}, number = {13}, pages = {10205-10210}, pmid = {35637932}, issn = {0941-0643}, abstract = {This editorial briefly analyses, describes, and provides a short summary of a set of selected papers published in a special issue focused on deep learning methods and architectures and their application to several domains and research areas. The set of selected and published articles covers several aspects related to two basic aspects in deep learning (DL) methods, efficiency of the models and effectiveness of the architectures These papers revolve around different interesting application domains such as health (e.g. cancer, polyps, melanoma, mental health), wearable technologies solar irradiance, social networks, cloud computing, wind turbines, object detection, music, and electricity, among others. This editorial provides a short description of each published article and a brief analysis of their main contributions.}, } @article {pmid35635621, year = {2022}, author = {Yan, EG and Arzt, NH}, title = {A Commentary on Process Improvements to Reduce Manual Tasks and Paper at Covid-19 Mass Vaccination Points of Dispensing in California.}, journal = {Journal of medical systems}, volume = {46}, number = {7}, pages = {47}, pmid = {35635621}, issn = {1573-689X}, mesh = {*COVID-19/prevention & control ; California ; Child ; Humans ; *Mass Vaccination ; Vaccination ; }, abstract = {My Turn is software used to manage several Covid-19 mass vaccination campaigns in California. The objective of this article is to describe the use of My Turn at two points of dispensing in California and comment on process improvements to reduce manual tasks of six identified processes of vaccination-registration, scheduling, administration, documentation, follow-up, and digital vaccine record-and paper. We reviewed publicly available documents of My Turn and patients vaccinated at George R. Moscone Convention Center in San Francisco and Oakland Coliseum Community Vaccination Clinic. For publicly available documents of My Turn, we examined videos of My Turn on YouTube, and documentation from EZIZ, the website for the California Vaccines for Children Program. For patients, we examined publicly available vaccination record cards on Instagram and Google. At the George R. Moscone Convention Center, 329,608 vaccines doses were given. At the Oakland Coliseum Community Vaccination Clinic, more than 500,000 vaccine doses were administered. The use of My Turn can be used to reduce manual tasks and paper for mass vaccinating patients against Covid-19.}, } @article {pmid35634070, year = {2022}, author = {Rahmani, MKI and Shuaib, M and Alam, S and Siddiqui, ST and Ahmad, S and Bhatia, S and Mashat, A}, title = {Blockchain-Based Trust Management Framework for Cloud Computing-Based Internet of Medical Things (IoMT): A Systematic Review.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9766844}, pmid = {35634070}, issn = {1687-5273}, mesh = {*Blockchain ; Cloud Computing ; Internet ; Reproducibility of Results ; Trust ; }, abstract = {The internet of medical things (IoMT) is a smart medical device structure that includes apps, health services, and systems. These medical equipment and applications are linked to healthcare systems via the internet. Because IoT devices lack computational power, the collected data can be processed and analyzed in the cloud by more computationally intensive tools. Cloud computing in IoMT is also used to store IoT data as part of a collaborative effort. Cloud computing has provided new avenues for providing services to users with better user experience, scalability, and proper resource utilization compared to traditional platforms. However, these cloud platforms are susceptible to several security breaches evident from recent and past incidents. Trust management is a crucial feature required for providing secure and reliable service to users. The traditional trust management protocols in the cloud computing situation are centralized and result in single-point failure. Blockchain has emerged as the possible use case for the domain that requires trust and reliability in several aspects. Different researchers have presented various blockchain-based trust management approaches. This study reviews the trust challenges in cloud computing and analyzes how blockchain technology addresses these challenges using blockchain-based trust management frameworks. There are ten (10) solutions under two broad categories of decentralization and security. These challenges are centralization, huge overhead, trust evidence, less adaptive, and inaccuracy. This systematic review has been performed in six stages: identifying the research question, research methods, screening the related articles, abstract and keyword examination, data retrieval, and mapping processing. Atlas.ti software is used to analyze the relevant articles based on keywords. A total of 70 codes and 262 quotations are compiled, and furthermore, these quotations are categorized using manual coding. Finally, 20 solutions under two main categories of decentralization and security were retrieved. Out of these ten (10) solutions, three (03) fell in the security category, and the rest seven (07) came under the decentralization category.}, } @article {pmid35634057, year = {2022}, author = {Ni, Q}, title = {Deep Neural Network Model Construction for Digital Human Resource Management with Human-Job Matching.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1418020}, pmid = {35634057}, issn = {1687-5273}, mesh = {*Algorithms ; Big Data ; Humans ; *Neural Networks, Computer ; Workforce ; }, abstract = {This article uses deep neural network technology and combines digital HRM knowledge to research human-job matching systematically. Through intelligent digital means such as 5G communication, cloud computing, big data, neural network, and user portrait, this article proposes the design of the corresponding digital transformation strategy of HRM. This article further puts forward the guaranteed measures in enhancing HRM thinking and establishing HRM culture to ensure the smooth implementation of the digital transformation strategy of the HRM. This system uses charts for data visualization and flask framework for background construction, and the data is stored through CSV files, My SQL, and configuration files. The system is based on a deep learning algorithm for job applicant matching, intelligent recommendation of jobs for job seekers, and more real help for job applicants to apply for jobs. The job intelligent recommendation algorithm partly adopts bidirectional long and short-term memory neural network (Bi-LSTM) and the word-level human post-matching neural network APJFNN built by the attention mechanism. By embedding the text representation of job demand information into the representation vector of public space, a joint embedded convolutional neural network (JE-CNN) for post matching analysis is designed and implemented. The quantitative analysis method analyzes the degree of matching with the job.}, } @article {pmid35632364, year = {2022}, author = {Umoren, O and Singh, R and Pervez, Z and Dahal, K}, title = {Securing Fog Computing with a Decentralised User Authentication Approach Based on Blockchain.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632364}, issn = {1424-8220}, mesh = {Biometry ; *Blockchain ; Computer Security ; Privacy ; Reproducibility of Results ; }, abstract = {The use of low-cost sensors in IoT over high-cost devices has been considered less expensive. However, these low-cost sensors have their own limitations such as the accuracy, quality, and reliability of the data collected. Fog computing offers solutions to those limitations; nevertheless, owning to its intrinsic distributed architecture, it faces challenges in the form of security of fog devices, secure authentication and privacy. Blockchain technology has been utilised to offer solutions for the authentication and security challenges in fog systems. This paper proposes an authentication system that utilises the characteristics and advantages of blockchain and smart contracts to authenticate users securely. The implemented system uses the email address, username, Ethereum address, password and data from a biometric reader to register and authenticate users. Experiments showed that the proposed method is secure and achieved performance improvement when compared to existing methods. The comparison of results with state-of-the-art showed that the proposed authentication system consumed up to 30% fewer resources in transaction and execution cost; however, there was an increase of up to 30% in miner fees.}, } @article {pmid35632264, year = {2022}, author = {Wu, TY and Meng, Q and Kumari, S and Zhang, P}, title = {Rotating behind Security: A Lightweight Authentication Protocol Based on IoT-Enabled Cloud Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632264}, issn = {1424-8220}, abstract = {With the rapid development of technology based on the Internet of Things (IoT), numerous IoT devices are being used on a daily basis. The rise in cloud computing plays a crucial role in solving the resource constraints of IoT devices and in promoting resource sharing, whereby users can access IoT services provided in various environments. However, this complex and open wireless network environment poses security and privacy challenges. Therefore, designing a secure authentication protocol is crucial to protecting user privacy in IoT services. In this paper, a lightweight authentication protocol was designed for IoT-enabled cloud computing environments. A real or random model, and the automatic verification tool ProVerif were used to conduct a formal security analysis. Its security was further proved through an informal analysis. Finally, through security and performance comparisons, our protocol was confirmed to be relatively secure and to display a good performance.}, } @article {pmid35632161, year = {2022}, author = {Alnaim, AK and Alwakeel, AM and Fernandez, EB}, title = {Towards a Security Reference Architecture for NFV.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632161}, issn = {1424-8220}, support = {1443-001//Sensor Network and Cellular Systems Research Center (SNCS)/ ; }, mesh = {*Computers ; Reproducibility of Results ; *Software ; }, abstract = {Network function virtualization (NFV) is an emerging technology that is becoming increasingly important due to its many advantages. NFV transforms legacy hardware-based network infrastructure into software-based virtualized networks. This transformation increases the flexibility and scalability of networks, at the same time reducing the time for the creation of new networks. However, the attack surface of the network increases, which requires the definition of a clear map of where attacks may happen. ETSI standards precisely define many security aspects of this architecture, but these publications are very long and provide many details which are not of interest to software architects. We start by conducting threat analysis of some of the NFV use cases. The use cases serve as scenarios where the threats to the architecture can be enumerated. Representing threats as misuse cases that describe the modus operandi of attackers, we can find countermeasures to them in the form of security patterns, and we can build a security reference architecture (SRA). Until now, only imprecise models of NFV architectures existed; by making them more detailed and precise it is possible to handle not only security but also safety and reliability, although we do not explore those aspects. Because security is a global property that requires a holistic approach, we strongly believe that architectural models are fundamental to produce secure networks and allow us to build networks which are secure by design. The resulting SRA defines a roadmap to implement secure concrete architectures.}, } @article {pmid35632158, year = {2022}, author = {Makarichev, V and Lukin, V and Illiashenko, O and Kharchenko, V}, title = {Digital Image Representation by Atomic Functions: The Compression and Protection of Data for Edge Computing in IoT Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632158}, issn = {1424-8220}, support = {830943//European Commission/ ; }, abstract = {Digital images are used in various technological, financial, economic, and social processes. Huge datasets of high-resolution images require protected storage and low resource-intensive processing, especially when applying edge computing (EC) for designing Internet of Things (IoT) systems for industrial domains such as autonomous transport systems. For this reason, the problem of the development of image representation, which provides compression and protection features in combination with the ability to perform low complexity analysis, is relevant for EC-based systems. Security and privacy issues are important for image processing considering IoT and cloud architectures as well. To solve this problem, we propose to apply discrete atomic transform (DAT) that is based on a special class of atomic functions generalizing the well-known up-function of V.A. Rvachev. A lossless image compression algorithm based on DAT is developed, and its performance is studied for different structures of DAT. This algorithm, which combines low computational complexity, efficient lossless compression, and reliable protection features with convenient image representation, is the main contribution of the paper. It is shown that a sufficient reduction of memory expenses can be obtained. Additionally, a dependence of compression efficiency measured by compression ratio (CR) on the structure of DAT applied is investigated. It is established that the variation of DAT structure produces a minor variation of CR. A possibility to apply this feature to data protection and security assurance is grounded and discussed. In addition, a structure or file for storing the compressed and protected data is proposed, and its properties are considered. Multi-level structure for the application of atomic functions in image processing and protection for EC in IoT systems is suggested and analyzed.}, } @article {pmid35632088, year = {2022}, author = {Hossain, MD and Sultana, T and Hossain, MA and Layek, MA and Hossain, MI and Sone, PP and Lee, GW and Huh, EN}, title = {Dynamic Task Offloading for Cloud-Assisted Vehicular Edge Computing Networks: A Non-Cooperative Game Theoretic Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632088}, issn = {1424-8220}, support = {IITP-2022-2015-0-00742//Ministry of Science and ICT, Korea/ ; }, abstract = {Vehicular edge computing (VEC) is one of the prominent ideas to enhance the computation and storage capabilities of vehicular networks (VNs) through task offloading. In VEC, the resource-constrained vehicles offload their computing tasks to the local road-side units (RSUs) for rapid computation. However, due to the high mobility of vehicles and the overloaded problem, VEC experiences a great deal of challenges when determining a location for processing the offloaded task in real time. As a result, this degrades the quality of vehicular performance. Therefore, to deal with these above-mentioned challenges, an efficient dynamic task offloading approach based on a non-cooperative game (NGTO) is proposed in this study. In the NGTO approach, each vehicle can make its own strategy on whether a task is offloaded to a multi-access edge computing (MEC) server or a cloud server to maximize its benefits. Our proposed strategy can dynamically adjust the task-offloading probability to acquire the maximum utility for each vehicle. However, we used a best response offloading strategy algorithm for the task-offloading game in order to achieve a unique and stable equilibrium. Numerous simulation experiments affirm that our proposed scheme fulfills the performance guarantees and can reduce the response time and task-failure rate by almost 47.6% and 54.6%, respectively, when compared with the local RSU computing (LRC) scheme. Moreover, the reduced rates are approximately 32.6% and 39.7%, respectively, when compared with a random offloading scheme, and approximately 26.5% and 28.4%, respectively, when compared with a collaborative offloading scheme.}, } @article {pmid35632024, year = {2022}, author = {Sepulveda, F and Thangraj, JS and Pulliam, J}, title = {The Edge of Exploration: An Edge Storage and Computing Framework for Ambient Noise Seismic Interferometry Using Internet of Things Based Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {10}, pages = {}, pmid = {35632024}, issn = {1424-8220}, support = {FOA DE-FOA-0001445//United States Department of Energy/ ; }, abstract = {Recent technological advances have reduced the complexity and cost of developing sensor networks for remote environmental monitoring. However, the challenges of acquiring, transmitting, storing, and processing remote environmental data remain significant. The transmission of large volumes of sensor data to a centralized location (i.e., the cloud) burdens network resources, introduces latency and jitter, and can ultimately impact user experience. Edge computing has emerged as a paradigm in which substantial storage and computing resources are located at the "edge" of the network. In this paper, we present an edge storage and computing framework leveraging commercially available components organized in a tiered architecture and arranged in a hub-and-spoke topology. The framework includes a popular distributed database to support the acquisition, transmission, storage, and processing of Internet-of-Things-based sensor network data in a field setting. We present details regarding the architecture, distributed database, embedded systems, and topology used to implement an edge-based solution. Lastly, a real-world case study (i.e., seismic) is presented that leverages the edge storage and computing framework to acquire, transmit, store, and process millions of samples of data per hour.}, } @article {pmid35629136, year = {2022}, author = {Silva, P and Dahlke, DV and Smith, ML and Charles, W and Gomez, J and Ory, MG and Ramos, KS}, title = {An Idealized Clinicogenomic Registry to Engage Underrepresented Populations Using Innovative Technology.}, journal = {Journal of personalized medicine}, volume = {12}, number = {5}, pages = {}, pmid = {35629136}, issn = {2075-4426}, abstract = {Current best practices in tumor registries provide a glimpse into a limited time frame over the natural history of disease, usually a narrow window around diagnosis and biopsy. This creates challenges meeting public health and healthcare reimbursement policies that increasingly require robust documentation of long-term clinical trajectories, quality of life, and health economics outcomes. These challenges are amplified for underrepresented minority (URM) and other disadvantaged populations, who tend to view the institution of clinical research with skepticism. Participation gaps leave such populations underrepresented in clinical research and, importantly, in policy decisions about treatment choices and reimbursement, thus further augmenting health, social, and economic disparities. Cloud computing, mobile computing, digital ledgers, tokenization, and artificial intelligence technologies are powerful tools that promise to enhance longitudinal patient engagement across the natural history of disease. These tools also promise to enhance engagement by giving participants agency over their data and addressing a major impediment to research participation. This will only occur if these tools are available for use with all patients. Distributed ledger technologies (specifically blockchain) converge these tools and offer a significant element of trust that can be used to engage URM populations more substantively in clinical research. This is a crucial step toward linking composite cohorts for training and optimization of the artificial intelligence tools for enhancing public health in the future. The parameters of an idealized clinical genomic registry are presented.}, } @article {pmid35627629, year = {2022}, author = {Li, J and Gong, J and Guldmann, JM and Yang, J and Zhang, Z}, title = {Simulation of Land-Use Spatiotemporal Changes under Ecological Quality Constraints: The Case of the Wuhan Urban Agglomeration Area, China, over 2020-2030.}, journal = {International journal of environmental research and public health}, volume = {19}, number = {10}, pages = {}, pmid = {35627629}, issn = {1660-4601}, mesh = {China ; Cities ; *Conservation of Natural Resources/methods ; *Ecosystem ; Forests ; Humans ; }, abstract = {Human activities coupled with land-use change pose a threat to the regional ecological environment. Therefore, it is essential to determine the future land-use structure and spatial layout for ecological protection and sustainable development. Land use simulations based on traditional scenarios do not fully consider ecological protection, leading to urban sprawl. Timely and dynamic monitoring of ecological status and change is vital to managing and protecting urban ecology and sustainable development. Remote sensing indices, including greenness, humidity, dryness, and heat, are calculated annually. This method compensates for data loss and difficulty in stitching remote sensing ecological indices over large-scale areas and long time-series. Herein, a framework is developed by integrating the four above-mentioned indices for a rapid, large-scale prediction of land use/cover that incorporates the protection of high ecological quality zone (HEQZ) land. The Google Earth Engine (GEE) platform is used to build a comprehensive HEQZ map of the Wuhan Urban Agglomeration Area (WUAA). Two scenarios are considered: Ecological protection (EP) based on HEQZ and natural growth (NG) without spatial ecological constraints. Land use/cover in the WUAA is predicted over 2020-2030, using the patch-generating land use simulation (PLUS) model. The results show that: (1) the HEQZ area covers 21,456 km[2], accounting for 24% of the WUAA, and is mainly distributed in the Xianning, Huangshi, and Xiantao regions. Construction land has the highest growth rate (5.2%) under the NG scenario. The cropland area decreases by 3.2%, followed by woodlands (0.62%). (2) By delineating the HEQZ, woodlands, rivers, lakes, and wetlands are well protected; construction land displays a downward trend based on the EP scenario with the HEQZ, and the simulated construction land in 2030 is located outside the HEQZ. (3) Image processing based on GEE cloud computing can ameliorate the difficulties of remote sensing data (i.e., missing data, cloudiness, chromatic aberration, and time inconsistency). The results of this study can provide essential scientific guidance for territorial spatial planning under the premise of ecological security.}, } @article {pmid35623334, year = {2022}, author = {Gutz, SE and Stipancic, KL and Yunusova, Y and Berry, JD and Green, JR}, title = {Validity of Off-the-Shelf Automatic Speech Recognition for Assessing Speech Intelligibility and Speech Severity in Speakers With Amyotrophic Lateral Sclerosis.}, journal = {Journal of speech, language, and hearing research : JSLHR}, volume = {65}, number = {6}, pages = {2128-2143}, pmid = {35623334}, issn = {1558-9102}, support = {F31 DC019016/DC/NIDCD NIH HHS/United States ; K24 DC016312/DC/NIDCD NIH HHS/United States ; R01 DC017291/DC/NIDCD NIH HHS/United States ; T32 DC000038/DC/NIDCD NIH HHS/United States ; }, mesh = {*Amyotrophic Lateral Sclerosis/complications ; Dysarthria/diagnosis/etiology ; Humans ; Reproducibility of Results ; Speech Disorders ; Speech Intelligibility ; *Speech Perception ; Speech Production Measurement/methods ; }, abstract = {PURPOSE: There is increasing interest in using automatic speech recognition (ASR) systems to evaluate impairment severity or speech intelligibility in speakers with dysarthria. We assessed the clinical validity of one currently available off-the-shelf (OTS) ASR system (i.e., a Google Cloud ASR API) for indexing sentence-level speech intelligibility and impairment severity in individuals with amyotrophic lateral sclerosis (ALS), and we provided guidance for potential users of such systems in research and clinic.

METHOD: Using speech samples collected from 52 individuals with ALS and 20 healthy control speakers, we compared word recognition rate (WRR) from the commercially available Google Cloud ASR API (Machine WRR) to clinician-provided judgments of impairment severity, as well as sentence intelligibility (Human WRR). We assessed the internal reliability of Machine and Human WRR by comparing the standard deviation of WRR across sentences to the minimally detectable change (MDC), a clinical benchmark that indicates whether results are within measurement error. We also evaluated Machine and Human WRR diagnostic accuracy for classifying speakers into clinically established categories.

RESULTS: Human WRR achieved better accuracy than Machine WRR when indexing speech severity, and, although related, Human and Machine WRR were not strongly correlated. When the speech signal was mixed with noise (noise-augmented ASR) to reduce a ceiling effect, Machine WRR performance improved. Internal reliability metrics were worse for Machine than Human WRR, particularly for typical and mildly impaired severity groups, although sentence length significantly impacted both Machine and Human WRRs.

CONCLUSIONS: Results indicated that the OTS ASR system was inadequate for early detection of speech impairment and grading overall speech severity. While Machine and Human WRR were correlated, ASR should not be used as a one-to-one proxy for transcription speech intelligibility or clinician severity ratings. Overall, findings suggested that the tested OTS ASR system, Google Cloud ASR, has limited utility for grading clinical speech impairment in speakers with ALS.}, } @article {pmid35622338, year = {2022}, author = {Christley, S and Stervbo, U and Cowell, LG and , }, title = {Immune Repertoire Analysis on High-Performance Computing Using VDJServer V1: A Method by the AIRR Community.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2453}, number = {}, pages = {439-446}, pmid = {35622338}, issn = {1940-6029}, mesh = {*Computing Methodologies ; High-Throughput Nucleotide Sequencing ; *Software ; Workflow ; }, abstract = {AIRR-seq data sets are usually large and require specialized analysis methods and software tools. A typical Illumina MiSeq sequencing run generates 20-30 million 2 × 300 bp paired-end sequence reads, which roughly corresponds to 15 GB of sequence data to be processed. Other platforms like NextSeq, which is useful in projects where the full V gene is not needed, create about 400 million 2 × 150 bp paired-end reads. Because of the size of the data sets, the analysis can be computationally expensive, particularly the early analysis steps like preprocessing and gene annotation that process the majority of the sequence data. A standard desktop PC may take 3-5 days of constant processing for a single MiSeq run, so dedicated high-performance computational resources may be required.VDJServer provides free access to high-performance computing (HPC) at the Texas Advanced Computing Center (TACC) through a graphical user interface (Christley et al. Front Immunol 9:976, 2018). VDJServer is a cloud-based analysis portal for immune repertoire sequence data that provides access to a suite of tools for a complete analysis workflow, including modules for preprocessing and quality control of sequence reads, V(D)J gene assignment, repertoire characterization, and repertoire comparison. Furthermore, VDJServer has parallelized execution for tools such as IgBLAST, so more compute resources are utilized as the size of the input data grows. Analysis that takes days on a desktop PC might take only a few hours on VDJServer. VDJServer is a free, publicly available, and open-source licensed resource. Here, we describe the workflow for performing immune repertoire analysis on VDJServer's high-performance computing.}, } @article {pmid35611115, year = {2022}, author = {Rudrapati, R}, title = {Using industrial 4.0 technologies to combat the COVID-19 pandemic.}, journal = {Annals of medicine and surgery (2012)}, volume = {78}, number = {}, pages = {103811}, pmid = {35611115}, issn = {2049-0801}, abstract = {The COVID 19 (Coronavirus) pandemic has led to a surge in the demand for healthcare devices, pre-cautions, or medicines along with advanced information technology. It has become a global mission to control the Coronavirus to prevent the death of innocent people. The fourth industrial revolution (I4.0) is a new approach to thinking that is proposed across a wide range of industries and services to achieve greater success and quality of life. Several initiatives associated with industry 4.0 are expected to make a difference in the fight against COVID-19. Implementing I4.0 components effectively could lead to a reduction in barriers between patients and healthcare workers and could result in improved communication between them. The present study aims to review the components of I4.0 and related tools used to combat the Coronavirus. This article highlights the benefits of each component of the I4.0, which is useful in controlling the spread of COVID-19. From the present study, it is stated that I4.0 technologies could provide an effective solution to deal with local as well as global medical crises in an innovative way.}, } @article {pmid35607467, year = {2022}, author = {Yang, Q}, title = {Analysis of English Cultural Teaching Model Based on Machine Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7126758}, pmid = {35607467}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; Humans ; Machine Learning ; Pandemics ; *Students ; Surveys and Questionnaires ; }, abstract = {According to the world population, nearly five billion people use mobile phones in their daily lives, and this has increased by 20% in the last twelve months compared to the previous report. An average survey conducted by researchers to find the amount of data consumed in a month by every mobile phone in the world has finally resulted in 45 exabytes of data being collected from a single user within a month. In today's world, data consumption and data analytics are being considered as one of the most important necessities for e-commerce companies. With the help of such collected data from a person, it is possible to predict the future signature or activity of the person. If 45 terabytes of data can be stored for a single user, determining the average calculation and amount of data to be collected for five billion users appears to be much more difficult. More than the human working concept, it looks like it would be difficult for a traditional computer system to handle this amount of data. To study and understand a concept from machine learning and artificial intelligence requires quite a collection of data to predict according to a person's activity. This article explains the roles of faculty and students, as well as the requirements for academic evaluation. Even before the pandemic, most people did not have any idea about the online teaching model. It is only after the disability of conducting direct (offline) classes that people are forced to get into the online world of teaching. Nearly 60% of countries are trying to convert their education systems to such online models, which improve communication between students and teachers and also enable different schemes for students. Big data can be considered as one of the technological revolutions in information technology companies that became popular after the crisis of cloud computing. A support vector machine (SVM) is proposed for analyzing English culture teaching and is compared with the traditional fuzzy logic. The results show the proposed model achieves an accuracy of 98%, which is 5% higher than the existing algorithm.}, } @article {pmid35607465, year = {2022}, author = {Li, X}, title = {5G Converged Network Resource Allocation Strategy Based on Reinforcement Learning in Edge Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6174708}, pmid = {35607465}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Resource Allocation ; }, abstract = {Aiming at the problem that computing power and resources of Mobile Edge Computing (MEC) servers are difficult to process long-period intensive task data, this study proposes a 5G converged network resource allocation strategy based on reinforcement learning in edge cloud computing environment. n order to solve the problem of insufficient local computing power, the proposed strategy offloads some tasks to the edge of network. Firstly, we build a multi-MEC server and multi-user mobile edge system, and design optimization objectives to minimize the average response time of system tasks and total energy consumption. Then, task offloading and resource allocation process is modeled as Markov decision process. Furthermore, the deep Q-network is used to find the optimal resource allocation scheme. Finally, the proposed strategy is analyzed experimentally based on TensorFlow learning framework. Experimental results show that when the number of users is 110, final energy consumption is about 2500 J, which effectively reduces task delay and improves the utilization of resources.}, } @article {pmid35607458, year = {2022}, author = {Li, J}, title = {Study on Integration and Application of Artificial Intelligence and Wireless Network in Piano Music Teaching.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8745833}, pmid = {35607458}, issn = {1687-5273}, mesh = {Algorithms ; *Artificial Intelligence ; Humans ; *Music ; Students ; Technology ; Universities ; }, abstract = {Until 2019, most people had never faced the situation that would be their life-changing moment. Most universities are conducting classes for their students with the help of virtual classrooms indicating massive technological growth. However, this development does not take enough time to reach the students and the teaching person. Within five to six months of successful projects, most application producers have launched their official sites to conduct online classes and test ways for students. The introduction of virtual classes is not the only example of technological advancement; cloud computing, artificial intelligence, and deep learning have collaborated to produce appropriate, fine, and less error-prone results in all such fields of teaching. These technological advancements have given way to design models created with the wireless networks that are being made, particularly for music-related courses. The Quality-Learning (Q-Learning) Algorithm (QLA) is a pillar study for improving the implementation of artificial intelligence in music teaching in this research. The proposed algorithm aids in improving the accuracy of music, its frequency, and its wavelength when it passes. The proposed QLA is compared with the existing K-Nearest Neighbour (KNN) algorithm, and the results show that QLA has achieved 99.23% accuracy in intelligent piano music teaching through wireless network mode.}, } @article {pmid35605202, year = {2022}, author = {Lewsey, MG and Yi, C and Berkowitz, O and Ayora, F and Bernado, M and Whelan, J}, title = {scCloudMine: A cloud-based app for visualization, comparison, and exploration of single-cell transcriptomic data.}, journal = {Plant communications}, volume = {3}, number = {4}, pages = {100302}, pmid = {35605202}, issn = {2590-3462}, mesh = {Cloud Computing ; Hormones ; *Mobile Applications ; Sequence Analysis, RNA ; Single-Cell Analysis ; *Transcriptome ; }, abstract = {scCloudMine is a cloud-based application for visualization, comparison, and exploration of single-cell transcriptome data. It does not require an on-site, high-power computing server, installation, or associated expertise and expense. Users upload their own or publicly available scRNA-seq datasets after pre-processing for visualization using a web browser. The data can be viewed in two color modes-Cluster, representing cell identity, and Values, showing levels of expression-and data can be queried using keywords or gene identification number(s). Using the app to compare studies, we determined that some genes frequently used as cell-type markers are in fact study specific. The apparent cell-specific expression of PHO1;H3 differed between GFP-tagging and scRNA-seq studies. Some phosphate transporter genes were induced by protoplasting, but they retained cell specificity, suggesting that cell-specific responses to stress (i.e., protoplasting) can occur. Examination of the cell specificity of hormone response genes revealed that 132 hormone-responsive genes display restricted expression and that the jasmonate response gene TIFY8 is expressed in endodermal cells, in contrast to previous reports. It also appears that JAZ repressors have cell-type-specific functions. These features identified using scCloudMine highlight the need for resources to enable biological researchers to compare their datasets of interest under a variety of parameters. scCloudMine enables researchers to form new hypotheses and perform comparative studies and allows for the easy re-use of data from this emerging technology by a wide variety of users who may not have access or funding for high-performance on-site computing and support.}, } @article {pmid35602625, year = {2022}, author = {Ye, Q and Wang, M and Meng, H and Xia, F and Yan, X}, title = {Efficient Linkable Ring Signature Scheme over NTRU Lattice with Unconditional Anonymity.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8431874}, pmid = {35602625}, issn = {1687-5273}, mesh = {*Algorithms ; *Computer Security ; }, abstract = {In cloud and edge computing, senders of data often want to be anonymous, while recipients of data always expect that the data come from a reliable sender and they are not redundant. Linkable ring signature (LRS) can not only protect the anonymity of the signer, but also detect whether two different signatures are signed by the same signer. Today, most lattice-based LRS schemes only satisfy computational anonymity. To the best of our knowledge, only the lattice-based LRS scheme proposed by Torres et al. can achieve unconditional anonymity. But the efficiency of signature generation and verification of the scheme is very low, and the signature length is also relatively long. With the preimage sampling, trapdoor generation, and rejection sampling algorithms, this study proposed an efficient LRS scheme with unconditional anonymity based on the e-NTRU problem under the random oracle model. We implemented our scheme and Torres et al.'s scheme, as well as other four efficient lattice-based LRS schemes. It is shown that under the same security level, compared with Torres et al.'s scheme, the signature generation time, signature verification time, and signature size of our scheme are reduced by about 94.52%, 97.18%, and 58.03%, respectively.}, } @article {pmid35602318, year = {2023}, author = {Mansour, RF and Alhumyani, H and Khalek, SA and Saeed, RA and Gupta, D}, title = {Design of cultural emperor penguin optimizer for energy-efficient resource scheduling in green cloud computing environment.}, journal = {Cluster computing}, volume = {26}, number = {1}, pages = {575-586}, pmid = {35602318}, issn = {1386-7857}, abstract = {In recent times, energy related issues have become challenging with the increasing size of data centers. Energy related issues problems are becoming more and more serious with the growing size of data centers. Green cloud computing (GCC) becomes a recent computing platform which aimed to handle energy utilization in cloud data centers. Load balancing is generally employed to optimize resource usage, throughput, and delay. Aiming at the reduction of energy utilization at the data centers of GCC, this paper designs an energy efficient resource scheduling using Cultural emperor penguin optimizer (CEPO) algorithm, called EERS-CEPO in GCC environment. The proposed model is aimed to distribute work load amongst several data centers or other resources and thereby avoiding overload of individual resources. The CEPO algorithm is designed based on the fusion of cultural algorithm (CA) and emperor penguin optimizer (EPO), which boosts the exploitation capabilities of EPO algorithm using the CA, shows the novelty of the work. The EERS-CEPO algorithm has derived a fitness function to optimally schedule the resources in data centers, minimize the operational and maintenance cost of the GCC, and thereby decrease the energy utilization and heat generation. To ensure the improvised performance of the EERS-CEPO algorithm, a wide range of experiments is performed and the experimental outcomes highlighted the better performance over the recent state of art techniques.}, } @article {pmid35592460, year = {2022}, author = {Doyen, S and Dadario, NB}, title = {12 Plagues of AI in Healthcare: A Practical Guide to Current Issues With Using Machine Learning in a Medical Context.}, journal = {Frontiers in digital health}, volume = {4}, number = {}, pages = {765406}, pmid = {35592460}, issn = {2673-253X}, abstract = {The healthcare field has long been promised a number of exciting and powerful applications of Artificial Intelligence (AI) to improve the quality and delivery of health care services. AI techniques, such as machine learning (ML), have proven the ability to model enormous amounts of complex data and biological phenomena in ways only imaginable with human abilities alone. As such, medical professionals, data scientists, and Big Tech companies alike have all invested substantial time, effort, and funding into these technologies with hopes that AI systems will provide rigorous and systematic interpretations of large amounts of data that can be leveraged to augment clinical judgments in real time. However, despite not being newly introduced, AI-based medical devices have more than often been limited in their true clinical impact that was originally promised or that which is likely capable, such as during the current COVID-19 pandemic. There are several common pitfalls for these technologies that if not prospectively managed or adjusted in real-time, will continue to hinder their performance in high stakes environments outside of the lab in which they were created. To address these concerns, we outline and discuss many of the problems that future developers will likely face that contribute to these failures. Specifically, we examine the field under four lenses: approach, data, method and operation. If we continue to prospectively address and manage these concerns with reliable solutions and appropriate system processes in place, then we as a field may further optimize the clinical applicability and adoption of medical based AI technology moving forward.}, } @article {pmid35591261, year = {2022}, author = {Jiang, Y and Wu, S and Mo, Q and Liu, W and Wei, X}, title = {A Cloud-Computing-Based Portable Networked Ground Station System for Microsatellites.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {9}, pages = {}, pmid = {35591261}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Microsatellite Repeats/genetics ; }, abstract = {Microsatellites have attracted a large number of scholars and engineers because of their portability and distribution characteristics. The ground station suitable for microsatellite service has become an important research topic. In this paper, we propose a networked ground station and verify it on our own microsatellite. The specific networked ground station system consists of multiple ground nodes. They can work together to complete data transmission tasks with higher efficiency. After describing our microsatellite project, a reasonable distribution of ground nodes is given. A cloud computing model is used to realize the coordination of multiple ground nodes. An adaptive communication system between satellites and ground stations is used to increase link efficiency. Extensive on-orbit experiments were used to validate our design. The experimental results show that our networked ground station has excellent performance in data transmission capability. Finally, the specific cloud-computing-based ground station network successfully completes our satellite mission.}, } @article {pmid35591112, year = {2022}, author = {Zhang, J and Li, M and Zheng, X and Hsu, CH}, title = {A Time-Driven Cloudlet Placement Strategy for Workflow Applications in Wireless Metropolitan Area Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {9}, pages = {}, pmid = {35591112}, issn = {1424-8220}, support = {2020B0101090005//the Key-Area Research and Development Program of 502 Guangdong Province under Grant/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Computers, Handheld ; Workflow ; }, abstract = {With the rapid development of mobile technology, mobile applications have increasing requirements for computational resources, and mobile devices can no longer meet these requirements. Mobile edge computing (MEC) has emerged in this context and has brought innovation into the working mode of traditional cloud computing. By provisioning edge server placement, the computing power of the cloud center is distributed to the edge of the network. The abundant computational resources of edge servers compensate for the lack of mobile devices and shorten the communication delay between servers and users. Constituting a specific form of edge servers, cloudlets have been widely studied within academia and industry in recent years. However, existing studies have mainly focused on computation offloading for general computing tasks under fixed cloudlet placement positions. They ignored the impact on computation offloading results from cloudlet placement positions and data dependencies among mobile application components. In this paper, we study the cloudlet placement problem based on workflow applications (WAs) in wireless metropolitan area networks (WMANs). We devise a cloudlet placement strategy based on a particle swarm optimization algorithm using genetic algorithm operators with the encoding library updating mode (PGEL), which enables the cloudlet to be placed in appropriate positions. The simulation results show that the proposed strategy can obtain a near-optimal cloudlet placement scheme. Compared with other classic algorithms, this algorithm can reduce the execution time of WAs by 15.04-44.99%.}, } @article {pmid35591011, year = {2022}, author = {Barbeau, M and Garcia-Alfaro, J and Kranakis, E}, title = {Research Trends in Collaborative Drones.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {9}, pages = {}, pmid = {35591011}, issn = {1424-8220}, mesh = {Forecasting ; *Technology ; *Unmanned Aerial Devices ; }, abstract = {The last decade has seen an explosion of interest in drones-introducing new networking technologies, such as 5G wireless connectivity and cloud computing. The resulting advancements in communication capabilities are already expanding the ubiquitous role of drones as primary solution enablers, from search and rescue missions to information gathering and parcel delivery. Their numerous applications encompass all aspects of everyday life. Our focus is on networked and collaborative drones. The available research literature on this topic is vast. No single survey article could do justice to all critical issues. Our goal in this article is not to cover everything and include everybody but rather to offer a personal perspective on a few selected research topics that might lead to fruitful future investigations that could play an essential role in developing drone technologies. The topics we address include distributed computing with drones for the management of anonymity, countering threats posed by drones, target recognition, navigation under uncertainty, risk avoidance, and cellular technologies. Our approach is selective. Every topic includes an explanation of the problem, a discussion of a potential research methodology, and ideas for future research.}, } @article {pmid35586098, year = {2022}, author = {Li, T and Zhao, H and Tao, Y and Huang, D and Yang, C and Xu, S}, title = {Power Intelligent Terminal Intrusion Detection Based on Deep Learning and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1415713}, pmid = {35586098}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; Data Collection ; *Deep Learning ; Information Storage and Retrieval ; }, abstract = {Numerous internal and external intrusion attacks have appeared one after another, which has become a major problem affecting the normal operation of the power system. The power system is the infrastructure of the national economy, ensuring that the information security of its network not only is an aspect of computer information security but also must consider high-standard security requirements. This paper analyzes the intrusion threat brought by the power information network and conducts in-depth research and investigation combined with the intrusion detection technology of the power information network. It analyzes the structure of the power knowledge network and cloud computing through deep learning-based methods and provides a network interference detection model. The model combines the methods of abuse detection and anomaly detection, which solves the problem that the abuse analysis model does not detect new attack variants. At the same time, for big data network data retrieval, it retrieves and analyzes data flow quickly and accurately with the help of deep learning of data components. It uses a fuzzy integral method to optimize the accuracy of power information network intrusion prediction, and the accuracy reaches 98.11%, with an increase of 0.6%.}, } @article {pmid35585733, year = {2022}, author = {Aloraini, T and Aljouie, A and Alniwaider, R and Alharbi, W and Alsubaie, L and AlTuraif, W and Qureshi, W and Alswaid, A and Eyiad, W and Al Mutairi, F and Ababneh, F and Alfadhel, M and Alfares, A}, title = {The variant artificial intelligence easy scoring (VARIES) system.}, journal = {Computers in biology and medicine}, volume = {145}, number = {}, pages = {105492}, doi = {10.1016/j.compbiomed.2022.105492}, pmid = {35585733}, issn = {1879-0534}, mesh = {*Artificial Intelligence ; Humans ; *Machine Learning ; Software ; }, abstract = {PURPOSE: Medical artificial intelligence (MAI) is artificial intelligence (AI) applied to the healthcare field. AI can be applied to many different aspects of genetics, such as variant classification. With little or no prior experience in AI coding, we share our experience with variant classification using the Variant Artificial Intelligence Easy Scoring (VARIES), an open-access platform, and the Automatic Machine Learning (AutoML) of the Google Cloud Platform.

METHODS: We investigated exome sequencing data from a sample of 1410 individuals. The majority (80%) were used for training and 20% for testing. The user-friendly Google Cloud Platform was used to create the VARIES model, and the TRIPOD checklist to develop and validate the prediction model for the development of the VARIES system.

RESULTS: The learning rate of the training dataset reached optimal results at an early stage of iteration, with a loss value near zero in approximately 4 min. For the testing dataset, the results for F1 (micro average) was 0.64, F1 (macro average) 0.34, micro-average area under the curve AUC (one-over-rest) 0.81 and the macro-average AUC (one-over-rest) 0.73. The overall performance characteristics of the VARIES model suggest the classifier has a high predictive ability.

CONCLUSION: We present a systematic guideline to create a genomic AI prediction tool with high predictive power, using a graphical user interface provided by Google Cloud Platform, with no prior experience in creating the software programs required.}, } @article {pmid35580808, year = {2022}, author = {Wallace, G and Polcyn, S and Brooks, PP and Mennen, AC and Zhao, K and Scotti, PS and Michelmann, S and Li, K and Turk-Browne, NB and Cohen, JD and Norman, KA}, title = {RT-Cloud: A cloud-based software framework to simplify and standardize real-time fMRI.}, journal = {NeuroImage}, volume = {257}, number = {}, pages = {119295}, pmid = {35580808}, issn = {1095-9572}, support = {RF1 MH125318/MH/NIMH NIH HHS/United States ; UL1 TR001863/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; Humans ; Magnetic Resonance Imaging ; *Neurofeedback ; Software ; }, abstract = {Real-time fMRI (RT-fMRI) neurofeedback has been shown to be effective in treating neuropsychiatric disorders and holds tremendous promise for future breakthroughs, both with regard to basic science and clinical applications. However, the prevalence of its use has been hampered by computing hardware requirements, the complexity of setting up and running an experiment, and a lack of standards that would foster collaboration. To address these issues, we have developed RT-Cloud (https://github.com/brainiak/rt-cloud), a flexible, cloud-based, open-source Python software package for the execution of RT-fMRI experiments. RT-Cloud uses standardized data formats and adaptable processing streams to support and expand open science in RT-fMRI research and applications. Cloud computing is a key enabling technology for advancing RT-fMRI because it eliminates the need for on-premise technical expertise and high-performance computing; this allows installation, configuration, and maintenance to be automated and done remotely. Furthermore, the scalability of cloud computing makes it easier to deploy computationally-demanding multivariate analyses in real time. In this paper, we describe how RT-Cloud has been integrated with open standards, including the Brain Imaging Data Structure (BIDS) standard and the OpenNeuro database, how it has been applied thus far, and our plans for further development and deployment of RT-Cloud in the coming years.}, } @article {pmid35578669, year = {2022}, author = {Ahmad, S and Mehfuz, S and Mebarek-Oudina, F and Beg, J}, title = {RSM analysis based cloud access security broker: a systematic literature review.}, journal = {Cluster computing}, volume = {25}, number = {5}, pages = {3733-3763}, pmid = {35578669}, issn = {1386-7857}, abstract = {A Cloud Access Security Broker (CASB) is a security enforcement point or cloud-based software that is placed between cloud service users and cloud applications of cloud computing (CC) which is used to run the dimensionality, heterogeneity, and ambiguity correlated with cloud services. They permit the organization to amplify the reach of their security approaches past their claim framework to third-party computer programs and storage. In contrast to other systematic literature reviews (SLR), this one is directed at the client setting. To identify and evaluate methods to understand CASB, the SLR discusses the literature, citing a comprehension of the state-of-the-art and innovative characterization to describe. An SLR was performed to compile CASB related experiments and analyze how CASBs are designed and formed. These studies are then analyzed from different contexts, like motivation, usefulness, building approach, and decision method. The SLR has discussed the contrasts present between the studies and implementations, with planning accomplishments conducted with combinations of market-based courses of action, simulation tools, middleware's, etc. Search words with the keywords, which were extracted from the Research Questions (RQs), were utilized to recognize the essential consideration from the journal papers, conference papers, workshops, and symposiums. This SLR has distinguished 20 particular studies distributed from 2011 to 2021. Chosen studies were evaluated concurring to the defined RQs for their eminence and scope to particular CASB in this way recognizing a few gaps within the literature. Unlike other studies, this one concentrates on the customer's viewpoint. The survey uses a systematic analysis of the literature to discover and classify techniques for realizing CASB, resulting in a comprehensive grasp of the state-of-the-art and a novel taxonomy to describe CASBs. To assemble studies relating to CASB and investigate how CASB are engineered, a systematic literature review was done. These investigations are then evaluated from a variety of angles, including motivation, functionality, engineering approach, and methodology. Engineering efforts were directed at a combination of "market-based solutions", "middlewares", "toolkits", "algorithms", "semantic frameworks", and "conceptual frameworks", according to the study, which noted disparities in the studies' implementations. For further understanding, the different independent parameters influencing the CASB are studied using PCA (Principal Component Analysis). The outcome of their analysis was the identification of five parameters influencing the PCA analysis. The experimental results were used as input for Research Surface Methodology (RSM) to obtain an empirical model. For this, five-level coding was employed for developing the model and considered three dependent parameters and four center values. For more understanding of these independent variables' influence, on the CASB study, RSM analysis was employed. It was observed from the CCD (Central Composite Design) model that the actual values show significant influence with R[2] = 0.90. This wide investigation reveals that CASB is still in a formative state. Even though vital advancement has been carried out in this zone, obvious challenges stay to be tended to, which have been highlighted in this paper.}, } @article {pmid35577816, year = {2022}, author = {Wimberly, MC and Nekorchuk, DM and Kankanala, RR}, title = {Cloud-based applications for accessing satellite Earth observations to support malaria early warning.}, journal = {Scientific data}, volume = {9}, number = {1}, pages = {208}, pmid = {35577816}, issn = {2052-4463}, support = {R01AI079411//U.S. Department of Health & Human Services | NIH | National Institute of Allergy and Infectious Diseases (NIAID)/ ; R01AI079411//U.S. Department of Health & Human Services | NIH | National Institute of Allergy and Infectious Diseases (NIAID)/ ; R01AI079411//U.S. Department of Health & Human Services | NIH | National Institute of Allergy and Infectious Diseases (NIAID)/ ; }, mesh = {Animals ; Climate ; Cloud Computing ; Earth, Planet ; Ethiopia/epidemiology ; *Malaria/prevention & control ; *Software ; }, abstract = {Malaria epidemics can be triggered by fluctuations in temperature and precipitation that influence vector mosquitoes and the malaria parasite. Identifying and monitoring environmental risk factors can thus provide early warning of future outbreaks. Satellite Earth observations provide relevant measurements, but obtaining these data requires substantial expertise, computational resources, and internet bandwidth. To support malaria forecasting in Ethiopia, we developed software for Retrieving Environmental Analytics for Climate and Health (REACH). REACH is a cloud-based application for accessing data on land surface temperature, spectral indices, and precipitation using the Google Earth Engine (GEE) platform. REACH can be implemented using the GEE code editor and JavaScript API, as a standalone web app, or as package with the Python API. Users provide a date range and data for 852 districts in Ethiopia are automatically summarized and downloaded as tables. REACH was successfully used in Ethiopia to support a pilot malaria early warning project in the Amhara region. The software can be extended to new locations and modified to access other environmental datasets through GEE.}, } @article {pmid35571870, year = {2022}, author = {Rahman, MM and Khatun, F and Sami, SI and Uzzaman, A}, title = {The evolving roles and impacts of 5G enabled technologies in healthcare: The world epidemic COVID-19 issues.}, journal = {Array (New York, N.Y.)}, volume = {14}, number = {}, pages = {100178}, pmid = {35571870}, issn = {2590-0056}, abstract = {The latest 5G technology is being introduced the Internet of Things (IoT) Era. The study aims to focus the 5G technology and the current healthcare challenges as well as to highlight 5G based solutions that can handle the COVID-19 issues in different arenas. This paper provides a comprehensive review of 5G technology with the integration of other digital technologies (like AI and machine learning, IoT objects, big data analytics, cloud computing, robotic technology, and other digital platforms) in emerging healthcare applications. From the literature, it is clear that the promising aspects of 5G (such as super-high speed, high throughput, low latency) have a prospect in healthcare advancement. Now healthcare is being adopted 5G-based technologies to aid improved health services, more effective medical research, enhanced quality of life, better experiences of medical professionals and patients in anywhere-anytime. This paper emphasizes the evolving roles of 5G technology for handling the epidemiological challenges. The study also discusses various technological challenges and prospective for developing 5G powered healthcare solutions. Further works will incorporate more studies on how to expand 5G-based digital society as well as to resolve the issues of safety-security-privacy and availability-accessibility-integrity in future health crises.}, } @article {pmid35566391, year = {2022}, author = {Tang, S and Chen, R and Lin, M and Lin, Q and Zhu, Y and Ding, J and Hu, H and Ling, M and Wu, J}, title = {Accelerating AutoDock Vina with GPUs.}, journal = {Molecules (Basel, Switzerland)}, volume = {27}, number = {9}, pages = {}, pmid = {35566391}, issn = {1420-3049}, support = {61872198//National Natural Science Foundation of China/ ; 81771478//National Natural Science Foundation of China/ ; 61971216//National Natural Science Foundation of China/ ; BK20201378//Basic Research Program of Science and Technology Depart- 369 ment of Jiangsu Province/ ; }, mesh = {Algorithms ; *Drug Discovery ; Ligands ; Molecular Docking Simulation ; *Software ; }, abstract = {AutoDock Vina is one of the most popular molecular docking tools. In the latest benchmark CASF-2016 for comparative assessment of scoring functions, AutoDock Vina won the best docking power among all the docking tools. Modern drug discovery is facing a common scenario of large virtual screening of drug hits from huge compound databases. Due to the seriality characteristic of the AutoDock Vina algorithm, there is no successful report on its parallel acceleration with GPUs. Current acceleration of AutoDock Vina typically relies on the stack of computing power as well as the allocation of resource and tasks, such as the VirtualFlow platform. The vast resource expenditure and the high access threshold of users will greatly limit the popularity of AutoDock Vina and the flexibility of its usage in modern drug discovery. In this work, we proposed a new method, Vina-GPU, for accelerating AutoDock Vina with GPUs, which is greatly needed for reducing the investment for large virtual screens and also for wider application in large-scale virtual screening on personal computers, station servers or cloud computing, etc. Our proposed method is based on a modified Monte Carlo using simulating annealing AI algorithm. It greatly raises the number of initial random conformations and reduces the search depth of each thread. Moreover, a classic optimizer named BFGS is adopted to optimize the ligand conformations during the docking progress, before a heterogeneous OpenCL implementation was developed to realize its parallel acceleration leveraging thousands of GPU cores. Large benchmark tests show that Vina-GPU reaches an average of 21-fold and a maximum of 50-fold docking acceleration against the original AutoDock Vina while ensuring their comparable docking accuracy, indicating its potential for pushing the popularization of AutoDock Vina in large virtual screens.}, } @article {pmid35558165, year = {2022}, author = {Porter, SJ and Hook, DW}, title = {Connecting Scientometrics: Dimensions as a Route to Broadening Context for Analyses.}, journal = {Frontiers in research metrics and analytics}, volume = {7}, number = {}, pages = {835139}, pmid = {35558165}, issn = {2504-0537}, abstract = {Modern cloud-based data infrastructures open new vistas for the deployment of scientometric data into the hands of practitioners. These infrastructures lower barriers to entry by making data more available and compute capacity more affordable. In addition, if data are prepared appropriately, with unique identifiers, it is possible to connect many different types of data. Bringing broader world data into the hands of practitioners (policymakers, strategists, and others) who use scientometrics as a tool can extend their capabilities. These ideas are explored through connecting Dimensions and World Bank data on Google BigQuery to study international collaboration between countries of different economic classification.}, } @article {pmid35552142, year = {2023}, author = {Luo, C and Wang, S and Li, T and Chen, H and Lv, J and Yi, Z}, title = {Large-Scale Meta-Heuristic Feature Selection Based on BPSO Assisted Rough Hypercuboid Approach.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {34}, number = {12}, pages = {10889-10903}, doi = {10.1109/TNNLS.2022.3171614}, pmid = {35552142}, issn = {2162-2388}, abstract = {The selection of prominent features for building more compact and efficient models is an important data preprocessing task in the field of data mining. The rough hypercuboid approach is an emerging technique that can be applied to eliminate irrelevant and redundant features, especially for the inexactness problem in approximate numerical classification. By integrating the meta-heuristic-based evolutionary search technique, a novel global search method for numerical feature selection is proposed in this article based on the hybridization of the rough hypercuboid approach and binary particle swarm optimization (BPSO) algorithm, namely RH-BPSO. To further alleviate the issue of high computational cost when processing large-scale datasets, parallelization approaches for calculating the hybrid feature evaluation criteria are presented by decomposing and recombining hypercuboid equivalence partition matrix via horizontal data partitioning. A distributed meta-heuristic optimized rough hypercuboid feature selection (DiRH-BPSO) algorithm is thus developed and embedded in the Apache Spark cloud computing model. Extensive experimental results indicate that RH-BPSO is promising and can significantly outperform the other representative feature selection algorithms in terms of classification accuracy, the cardinality of the selected feature subset, and execution efficiency. Moreover, experiments on distributed-memory multicore clusters show that DiRH-BPSO is significantly faster than its sequential counterpart and is perfectly capable of completing large-scale feature selection tasks that fail on a single node due to memory constraints. Parallel scalability and extensibility analysis also demonstrate that DiRH-BPSO could scale out and extend well with the growth of computational nodes and the volume of data.}, } @article {pmid35548309, year = {2022}, author = {Jiang, F and Deng, M and Long, Y and Sun, H}, title = {Spatial Pattern and Dynamic Change of Vegetation Greenness From 2001 to 2020 in Tibet, China.}, journal = {Frontiers in plant science}, volume = {13}, number = {}, pages = {892625}, pmid = {35548309}, issn = {1664-462X}, abstract = {Due to the cold climate and dramatically undulating altitude, the identification of dynamic vegetation trends and main drivers is essential to maintain the ecological balance in Tibet. The normalized difference vegetation index (NDVI), as the most commonly used greenness index, can effectively evaluate vegetation health and spatial patterns. MODIS-NDVI (Moderate-resolution Imaging Spectroradiometer-NDVI) data for Tibet from 2001 to 2020 were obtained and preprocessed on the Google Earth Engine (GEE) cloud platform. The Theil-Sen median method and Mann-Kendall test method were employed to investigate dynamic NDVI changes, and the Hurst exponent was used to predict future vegetation trends. In addition, the main drivers of NDVI changes were analyzed. The results indicated that (1) the vegetation NDVI in Tibet significantly increased from 2001 to 2020, and the annual average NDVI value fluctuated between 0.31 and 0.34 at an increase rate of 0.0007 year[-1]; (2) the vegetation improvement area accounted for the largest share of the study area at 56.6%, followed by stable unchanged and degraded areas, with proportions of 27.5 and 15.9%, respectively. The overall variation coefficient of the NDVI in Tibet was low, with a mean value of 0.13; (3) The mean value of the Hurst exponent was 0.53, and the area of continuously improving regions accounted for 41.2% of the study area, indicating that the vegetation change trend was continuous in most areas; (4) The NDVI in Tibet indicated a high degree of spatial agglomeration. However, there existed obvious differences in the spatial distribution of NDVI aggregation areas, and the aggregation types mainly included the high-high and low-low types; and (5) Precipitation and population growth significantly contributed to vegetation cover improvement in western Tibet. In addition, the use of the GEE to obtain remote sensing data combined with time-series data analysis provides the potential to quickly obtain large-scale vegetation change trends.}, } @article {pmid35535371, year = {2022}, author = {Lee, SH and Park, J and Yang, K and Min, J and Choi, J}, title = {Accuracy of Cloud-Based Speech Recognition Open Application Programming Interface for Medical Terms of Korean.}, journal = {Journal of Korean medical science}, volume = {37}, number = {18}, pages = {e144}, pmid = {35535371}, issn = {1598-6357}, mesh = {Cloud Computing ; Communication ; Humans ; Software ; *Speech ; *Speech Perception ; }, abstract = {BACKGROUND: There are limited data on the accuracy of cloud-based speech recognition (SR) open application programming interfaces (APIs) for medical terminology. This study aimed to evaluate the medical term recognition accuracy of current available cloud-based SR open APIs in Korean.

METHODS: We analyzed the SR accuracy of currently available cloud-based SR open APIs using real doctor-patient conversation recordings collected from an outpatient clinic at a large tertiary medical center in Korea. For each original and SR transcription, we analyzed the accuracy rate of each cloud-based SR open API (i.e., the number of medical terms in the SR transcription per number of medical terms in the original transcription).

RESULTS: A total of 112 doctor-patient conversation recordings were converted with three cloud-based SR open APIs (Naver Clova SR from Naver Corporation; Google Speech-to-Text from Alphabet Inc.; and Amazon Transcribe from Amazon), and each transcription was compared. Naver Clova SR (75.1%) showed the highest accuracy with the recognition of medical terms compared to the other open APIs (Google Speech-to-Text, 50.9%, P < 0.001; Amazon Transcribe, 57.9%, P < 0.001), and Amazon Transcribe demonstrated higher recognition accuracy compared to Google Speech-to-Text (P < 0.001). In the sub-analysis, Naver Clova SR showed the highest accuracy in all areas according to word classes, but the accuracy of words longer than five characters showed no statistical differences (Naver Clova SR, 52.6%; Google Speech-to-Text, 56.3%; Amazon Transcribe, 36.6%).

CONCLUSION: Among three current cloud-based SR open APIs, Naver Clova SR which manufactured by Korean company showed highest accuracy of medical terms in Korean, compared to Google Speech-to-Text and Amazon Transcribe. Although limitations are existing in the recognition of medical terminology, there is a lot of rooms for improvement of this promising technology by combining strengths of each SR engines.}, } @article {pmid35535196, year = {2022}, author = {Chai, M}, title = {Design of Rural Human Resource Management Platform Integrating IoT and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4133048}, pmid = {35535196}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; *Technology ; Workforce ; }, abstract = {With the advent of the Internet of Things era, these hot technologies such as distributed, parallel computing, network storage, and load balancing can provide a good application foundation for the Internet of Things, enabling real-time dynamic management and intelligent analysis of hundreds of millions of items in the Internet of Things to be possible. The Internet of Things has changed from a concept to a reality, quickly reaching every corner of society. On the other hand, with the enhancement of the mobility of social talents, the file management of the talent service center is becoming more and more difficult. However, the traditional management methods of human resources files have problems such as poor resource sharing, asymmetric resources, and heterogeneous information sharing, which can no longer meet the needs of both the supply and demand sides of human resources with diversified and multiple organizational structures. Cloud computing technology has powerful data collection functions, self-service functions, and unified resource scheduling functions. Introducing it into the human resources file management system can greatly improve management efficiency. In order to carry out information management of rural human resources, this paper develops a rural human resources management system based on the Internet of Things. This paper introduces the design scheme of rural human resource management platform based on Internet of Things technology and cloud computing technology. The design of this system mainly includes organization setting, post planning, personnel management, salary management, insurance benefits, recruitment and selection, training management, performance appraisal management, labor contract management, comprehensive inquiry, rules and regulations, employee self-help, system setting, and system management function modules. The research results show that the rural human resource management system based on cloud computing can provide a complete human resource management solution for the vast rural areas. It can only purchase services, save a lot of development and maintenance costs, and also customize functions, so as to better meet the needs of use.}, } @article {pmid35531323, year = {2022}, author = {Munjal, K and Bhatia, R}, title = {A systematic review of homomorphic encryption and its contributions in healthcare industry.}, journal = {Complex & intelligent systems}, volume = {}, number = {}, pages = {1-28}, pmid = {35531323}, issn = {2198-6053}, abstract = {Cloud computing and cloud storage have contributed to a big shift in data processing and its use. Availability and accessibility of resources with the reduction of substantial work is one of the main reasons for the cloud revolution. With this cloud computing revolution, outsourcing applications are in great demand. The client uses the service by uploading their data to the cloud and finally gets the result by processing it. It benefits users greatly, but it also exposes sensitive data to third-party service providers. In the healthcare industry, patient health records are digital records of a patient's medical history kept by hospitals or health care providers. Patient health records are stored in data centers for storage and processing. Before doing computations on data, traditional encryption techniques decrypt the data in their original form. As a result, sensitive medical information is lost. Homomorphic encryption can protect sensitive information by allowing data to be processed in an encrypted form such that only encrypted data is accessible to service providers. In this paper, an attempt is made to present a systematic review of homomorphic cryptosystems with its categorization and evolution over time. In addition, this paper also includes a review of homomorphic cryptosystem contributions in healthcare.}, } @article {pmid35530181, year = {2022}, author = {Kumar, V and Mahmoud, MS and Alkhayyat, A and Srinivas, J and Ahmad, M and Kumari, A}, title = {RAPCHI: Robust authentication protocol for IoMT-based cloud-healthcare infrastructure.}, journal = {The Journal of supercomputing}, volume = {78}, number = {14}, pages = {16167-16196}, pmid = {35530181}, issn = {0920-8542}, abstract = {With the fast growth of technologies like cloud computing, big data, the Internet of Things, artificial intelligence, and cyber-physical systems, the demand for data security and privacy in communication networks is growing by the day. Patient and doctor connect securely through the Internet utilizing the Internet of medical devices in cloud-healthcare infrastructure (CHI). In addition, the doctor offers to patients online treatment. Unfortunately, hackers are gaining access to data at an alarming pace. In 2019, 41.4 million times, healthcare systems were compromised by attackers. In this context, we provide a secure and lightweight authentication scheme (RAPCHI) for CHI employing Internet of medical Things (IoMT) during pandemic based on cryptographic primitives. The suggested framework is more secure than existing frameworks and is resistant to a wide range of security threats. The paper also explains the random oracle model (ROM) and uses two alternative approaches to validate the formal security analysis of RAPCHI. Further, the paper shows that RAPCHI is safe against man-in-the-middle and reply attacks using the simulation programme AVISPA. In addition, the paper compares RAPCHI to related frameworks and discovers that it is relatively light in terms of computation and communication. These findings demonstrate that the proposed paradigm is suitable for use in real-world scenarios.}, } @article {pmid35528357, year = {2022}, author = {Gao, J}, title = {Network Intrusion Detection Method Combining CNN and BiLSTM in Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7272479}, pmid = {35528357}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Neural Networks, Computer ; }, abstract = {A network intrusion detection method combining CNN and BiLSTM network is proposed. First, the KDD CUP 99 data set is preprocessed by using data extraction algorithm. The data set is transformed into image data set by data cleaning, data extraction, and data mapping; Second, CNN is used to extract the parallel local features of attribute information, and BiLSTM is used to extract the features of long-distance-dependent information, so as to fully consider the influence between the front and back attribute information, and attention mechanism is introduced to improve the classification accuracy. Finally, C5.0 decision tree and CNN BiLSTM deep learning model are combined to skip the design feature selection and directly use deep learning model to learn the representational features of high-dimensional data. Experimental results show that, compared with the methods based on AE-AlexNet and SGM-CNN, the network intrusion detection effect of this method is better, the average accuracy can be improved to 95.50%, the false-positive rate can be reduced to 4.24%, and the false positive rate can be reduced to 6.66%. The proposed method can significantly improve the performance of network intrusion detection system.}, } @article {pmid35528215, year = {2023}, author = {Ahmed, K and Saini, M}, title = {FCML-gait: fog computing and machine learning inspired human identity and gender recognition using gait sequences.}, journal = {Signal, image and video processing}, volume = {17}, number = {4}, pages = {925-936}, pmid = {35528215}, issn = {1863-1703}, abstract = {Security threats are always there if the human intruders are not identified and recognized well in time in highly security-sensitive environments like the military, airports, parliament houses, and banks. Fog computing and machine learning algorithms on Gait sequences can prove to be better for restricting intruders promptly. Gait recognition provides the ability to observe an individual unobtrusively, without any direct cooperation or interaction from the people, making it very attractive than other biometric recognition techniques. In this paper, a Fog Computing and Machine Learning Inspired Human Identity and Gender Recognition using Gait Sequences (FCML-Gait) are proposed. Internet of things (IoT) devices and video capturing sensors are used to acquire data. Frames are clustered using the affinity propagation (AP) clustering technique into several clusters, and cluster-based averaged gait image(C-AGI) feature is determined for each cluster. For training and testing of datasets, sparse reconstruction-based metric learning (SRML) and Speeded Up Robust Features (SURF) with support vector machine (SVM) are applied on benchmark gait database ADSC-AWD having 80 subjects of 20 different individuals in the Fog Layer to improve the processing. The performance metrics, for instance, accuracy, precision, recall, F-measure, C-time, and R-time have been measured, and a comparative evaluation of the projected method with the existing SRML technique has been provided in which the proposed FCML-Gait outperforms and attains the highest accuracy of 95.49%.}, } @article {pmid35528159, year = {2022}, author = {Aldahwan, NS and Ramzan, MS}, title = {Quadruple Theories Based Determinants and their Causal Relationships Affecting the Adoption of Community Cloud in Saudi HEI.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {2382535}, pmid = {35528159}, issn = {2314-6141}, mesh = {*Cloud Computing ; Reproducibility of Results ; Saudi Arabia ; *Technology ; }, abstract = {The higher education institutions (HEIs) are adopting the new modern cloud computing technique rapidly due to its cost effectiveness, efficient and productive feature. Though cloud computing technology is beneficial to educational sector, it is important to assess their economic benefits, technical, organizational, environmental appropriateness and potential obstacles before adopting the new technology. There are four evaluating theory for adopting the cloud computing technology which are the Technology Organization Environment (TOE), the Technology Acceptance Model (TAM), the Diffusion of Innovation (DOI), and the Institutional (INT). This study has developed a new adoption framework for accepting cloud computing technology for HEIs of Saudi by integrating the above mentioned theories. This framework is unique from others because no research has been conducted yet on the adoption of community cloud at the organizational level considering the four theory simultaneously. This research has developed 25 hypotheses on the adoption of community cloud computing in HEIs and analyzed those hypotheses using SPSS statistical analysis software. The reliability of the data was tested by utilizing composite reliability and Cronbach's alpha method. This study have introduced an innovative approach and framework to understand the adoption of the community cloud which will help the decision-makers to build strategies in their organizations for effective adoption of community cloud services.}, } @article {pmid35521547, year = {2022}, author = {Elisseev, V and Gardiner, LJ and Krishna, R}, title = {Scalable in-memory processing of omics workflows.}, journal = {Computational and structural biotechnology journal}, volume = {20}, number = {}, pages = {1914-1924}, pmid = {35521547}, issn = {2001-0370}, abstract = {We present a proof of concept implementation of the in-memory computing paradigm that we use to facilitate the analysis of metagenomic sequencing reads. In doing so we compare the performance of POSIX™file systems and key-value storage for omics data, and we show the potential for integrating high-performance computing (HPC) and cloud native technologies. We show that in-memory key-value storage offers possibilities for improved handling of omics data through more flexible and faster data processing. We envision fully containerized workflows and their deployment in portable micro-pipelines with multiple instances working concurrently with the same distributed in-memory storage. To highlight the potential usage of this technology for event driven and real-time data processing, we use a biological case study focused on the growing threat of antimicrobial resistance (AMR). We develop a workflow encompassing bioinformatics and explainable machine learning (ML) to predict life expectancy of a population based on the microbiome of its sewage while providing a description of AMR contribution to the prediction. We propose that in future, performing such analyses in 'real-time' would allow us to assess the potential risk to the population based on changes in the AMR profile of the community.}, } @article {pmid35464181, year = {2021}, author = {Dawood, HM and Liew, CY and Lau, TC}, title = {Mobile perceived trust mediation on the intention and adoption of FinTech innovations using mobile technology: A systematic literature review.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {1252}, pmid = {35464181}, issn = {2046-1402}, mesh = {Artificial Intelligence ; *COVID-19 ; Humans ; *Intention ; Technology ; Trust ; }, abstract = {The banking and financial sectors have witnessed a significant development recently due to financial technology (FinTech), and it has become an essential part of the financial system. Many factors helped the development of this sector, including the pandemics such as Covid-19, the considerable increasing market value of the FinTech sector worldwide, and new technologies such as blockchain, artificial intelligence, big data, cloud computing and mobile technology. Moreover, changes in consumer's preferences, especially the Z-generation (digital generation). FinTech shifted the traditional business models to mobile platforms characterized by ease of access and swift transactions. Mobile technology became the main backbone for FinTech innovations and acts as a channel to deliver FinTech services that overcome all geographical and timing barriers, thus enhancing financial inclusion. Mobile perceived Trust (MPT), or the trust in using financial business models via mobile technology, is a crucial factor in the FinTech context that has mediation effects on the intention and adoption of different FinTech business models. Unfortunately, few studies have explored MPT mediations on consumers' intention to adopt FinTech innovations using mobile technology. Typically, many studies examined trust/MPT as an independent and unidirectional variable and investigated its effects on behaviour intention without predicting its mediation effects. This study aimed to develop a systematic literature review on MPT mediation in FinTech, focusing on the period from 2016 and 2021, in journals ranked Q1 and Q2, and known-based theories such as the technology acceptance model, the unified theory of acceptance and use of technology, and the mobile technology acceptance model. This study found that only four articles were published in Q1 and Q2 journals. In these articles, the MPT was used as a mediator, and its effects were measured on the intention and adoption of the behaviour.}, } @article {pmid35511912, year = {2022}, author = {Kim, YK and Kim, HJ and Lee, H and Chang, JW}, title = {Privacy-preserving parallel kNN classification algorithm using index-based filtering in cloud computing.}, journal = {PloS one}, volume = {17}, number = {5}, pages = {e0267908}, pmid = {35511912}, issn = {1932-6203}, mesh = {Algorithms ; Artificial Intelligence ; *Cloud Computing ; Computer Security ; *Privacy ; }, abstract = {With the development of cloud computing, interest in database outsourcing has recently increased. In cloud computing, it is necessary to protect the sensitive information of data owners and authorized users. For this, data mining techniques over encrypted data have been studied to protect the original database, user queries and data access patterns. The typical data mining technique is kNN classification which is widely used for data analysis and artificial intelligence. However, existing works do not provide a sufficient level of efficiency for a large amount of encrypted data. To solve this problem, in this paper, we propose a privacy-preserving parallel kNN classification algorithm. To reduce the computation cost for encryption, we propose an improved secure protocol by using an encrypted random value pool. To reduce the query processing time, we not only design a parallel algorithm, but also adopt a garbled circuit. In addition, the security analysis of the proposed algorithm is performed to prove its data protection, query protection, and access pattern protection. Through our performance evaluation, the proposed algorithm shows about 2∼25 times better performance compared with existing algorithms.}, } @article {pmid35511843, year = {2022}, author = {Ghosh, A and Saha, R and Misra, S}, title = {Persistent Service Provisioning Framework for IoMT Based Emergency Mobile Healthcare Units.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {12}, pages = {5851-5858}, doi = {10.1109/JBHI.2022.3172624}, pmid = {35511843}, issn = {2168-2208}, mesh = {Humans ; *Delivery of Health Care ; Monitoring, Physiologic ; *Internet ; Cloud Computing ; }, abstract = {The resource constrained nature of IoT devices set about task offloading over the Internet for robust processing. However, this increases the Turnaround Time (TAT) of the IoT services. High TATs may cause catastrophe in time-sensitive environments such as chemical and steel industries, vehicular networks, healthcare, and others. Moreover, the unreliable Internet in rural parts of underdeveloped and developing countries is unsuitable for time-critical IoT systems. In this work, we propose a framework for continuous delivery of IoT services to address the issue of high latency/TAT with poor/no-internet coverage. The proposed framework guarantees service delivery in such areas. To demonstrate the proposed framework, we implemented an IoT-based mobile patient monitoring system. It predicts the patient's criticality using actual sensor data. When the sensed parameters exceed the pre-set threshold in the rule-base, it initiates data transfer to the fog or cloud server. If fog or the cloud is unreachable, it performs onboard predictions. Thus, the framework ensures essential service delivery to the user at all times. Our test-bed-based evaluation demonstrates edge CPU and RAM load reduction of 16% and 26%, respectively, in the ML model's test phase. Also, the results confirm continuous service delivery, reduced latency, power and computing resource consumption.}, } @article {pmid35511842, year = {2022}, author = {Chen, J and Zheng, Y and Liang, Y and Zhan, Z and Jiang, M and Zhang, X and da Silva, DS and Wu, W and Albuquerque, VHC}, title = {Edge2Analysis: A Novel AIoT Platform for Atrial Fibrillation Recognition and Detection.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {12}, pages = {5772-5782}, doi = {10.1109/JBHI.2022.3171918}, pmid = {35511842}, issn = {2168-2208}, mesh = {Humans ; *Atrial Fibrillation/diagnosis ; Artificial Intelligence ; Neural Networks, Computer ; Electrocardiography ; Computer Simulation ; }, abstract = {Atrial fibrillation (AF) is a serious medical condition of the heart potentially leading to stroke, which can be diagnosed by analyzing electrocardiograms (ECG). Technologies of Artificial Intelligence of Things (AIoT) enable smart abnormality detection by analyzing streaming healthcare data from the sensor end of users. Analyzing streaming data in the cloud leads to challenges of response latency and privacy issues, and local inference by a model deployed on the user end brings difficulties in model update and customization. Therefore, we propose an AIoT Platform with AF recognition neural networks on the sensor edge with model retraining ability on a resource-constrained embedded system. To this aim, we proposed to combine simple but effective neural networks and an ECG feature selection strategy to reduce computing complexity while maintaining recognition performance. Based on the platform, we evaluated and discussed the performance, response time, and requirements for model retraining in the scenario of AF detection from ECG recordings. The proposed lightweight solution was validated with two public datasets and an ECG data stream simulation on an ATmega2560 processor, proving the feasibility of analysis and training on edge.}, } @article {pmid35510052, year = {2022}, author = {Wang, L}, title = {Internet of Things Device Identification Algorithm considering User Privacy.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6173185}, pmid = {35510052}, issn = {1687-5273}, mesh = {Algorithms ; *Blockchain ; Computer Security ; Humans ; Internet ; *Internet of Things ; Privacy ; }, abstract = {The Internet of Things has become the third wave of the information industry and cloud computing, big data, and Internet technologies. Among the many identification technologies used in the Internet of Things, radiofrequency identification technology is undoubtedly one of the most popular methods today. It is replacing the traditional contact IC card and becoming a new trend of smart cards. At the same time, a large amount of data is generated in the IoT environment. A lot of data involve user privacy, and users do not have good control over these data. Collecting and utilizing these data on the basis of protecting user privacy have become an important problem to be solved urgently. With the implementation of the strategy of rejuvenating the country through science and education, major colleges and universities are developing rapidly through enrollment and expansion, which also brings inconvenience to campus security management. Although the traditional campus all-in-one card system can guarantee the security identity of people entering and leaving, it does not reasonably integrate and utilize this information, resulting in waste of information resources and, to a certain extent, the problem of user privacy leakage. To solve the above problems, a new system was developed to integrate resources to identify users. To protect the privacy data of Internet of Things users, a specific solution using blockchain technology is proposed; for the identity authentication problem of Internet of Things users, the identity authentication based on the public key address of the blockchain is used on the chain, and the group signature is used off the chain. The identity authentication method solves the contradiction between anonymity and traceability in blockchain application scenarios. The simulation results show that the system not only considers user privacy but also has extremely important practical significance for the promotion of Internet of Things and RF applications.}, } @article {pmid35510050, year = {2022}, author = {Mittal, S and Bansal, A and Gupta, D and Juneja, S and Turabieh, H and Elarabawy, MM and Sharma, A and Bitsue, ZK}, title = {Using Identity-Based Cryptography as a Foundation for an Effective and Secure Cloud Model for E-Health.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7016554}, pmid = {35510050}, issn = {1687-5273}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; Humans ; Research Design ; *Telemedicine ; }, abstract = {Nowadays, one of the most popular applications is cloud computing for storing data and information through World Wide Web. Since cloud computing has become available, users are rapidly increasing. Cloud computing enables users to obtain a better and more effective application at a lower cost in a more satisfactory way. Health services data must therefore be kept as safe and secure as possible because the release of this data could have serious consequences for patients. A framework for security and privacy must be employed to store and manage extremely sensitive data. Patients' confidential health records have been encrypted and saved in the cloud using cypher text so far. To ensure privacy and security in a cloud computing environment is a big issue. The medical system has been designed as a standard, access of records, and effective use by medical practitioners as required. In this paper, we propose a novel algorithm along with implementation details as an effective and secure E-health cloud model using identity-based cryptography. The comparison of the proposed and existing techniques has been carried out in terms of time taken for encryption and decryption, energy, and power. Decryption time has been decreased up to 50% with the proposed method of cryptography. As it will take less time for decryption, less power is consumed for doing the cryptography operations.}, } @article {pmid35503992, year = {2022}, author = {Pinter, N and Glätzer, D and Fahrner, M and Fröhlich, K and Johnson, J and Grüning, BA and Warscheid, B and Drepper, F and Schilling, O and Föll, MC}, title = {MaxQuant and MSstats in Galaxy Enable Reproducible Cloud-Based Analysis of Quantitative Proteomics Experiments for Everyone.}, journal = {Journal of proteome research}, volume = {21}, number = {6}, pages = {1558-1565}, doi = {10.1021/acs.jproteome.2c00051}, pmid = {35503992}, issn = {1535-3907}, mesh = {Cloud Computing ; Mass Spectrometry/methods ; Proteins/analysis ; *Proteomics/methods ; Reproducibility of Results ; *Software ; }, abstract = {Quantitative mass spectrometry-based proteomics has become a high-throughput technology for the identification and quantification of thousands of proteins in complex biological samples. Two frequently used tools, MaxQuant and MSstats, allow for the analysis of raw data and finding proteins with differential abundance between conditions of interest. To enable accessible and reproducible quantitative proteomics analyses in a cloud environment, we have integrated MaxQuant (including TMTpro 16/18plex), Proteomics Quality Control (PTXQC), MSstats, and MSstatsTMT into the open-source Galaxy framework. This enables the web-based analysis of label-free and isobaric labeling proteomics experiments via Galaxy's graphical user interface on public clouds. MaxQuant and MSstats in Galaxy can be applied in conjunction with thousands of existing Galaxy tools and integrated into standardized, sharable workflows. Galaxy tracks all metadata and intermediate results in analysis histories, which can be shared privately for collaborations or publicly, allowing full reproducibility and transparency of published analysis. To further increase accessibility, we provide detailed hands-on training materials. The integration of MaxQuant and MSstats into the Galaxy framework enables their usage in a reproducible way on accessible large computational infrastructures, hence realizing the foundation for high-throughput proteomics data science for everyone.}, } @article {pmid35501696, year = {2022}, author = {Hadish, JA and Biggs, TD and Shealy, BT and Bender, MR and McKnight, CB and Wytko, C and Smith, MC and Feltus, FA and Honaas, L and Ficklin, SP}, title = {GEMmaker: process massive RNA-seq datasets on heterogeneous computational infrastructure.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {156}, pmid = {35501696}, issn = {1471-2105}, support = {1659300//National Science Foundation/ ; AP-19-103//Washington Tree Fruit Research Commission/ ; Emerging Research Initiatives//Washington State University/ ; Livestock Health//Washington State University/ ; Food Security program award//Washington State University/ ; 1014919//U.S. Department of Agriculture/ ; WNP00009//McIntyre Stennis/ ; }, mesh = {*High-Throughput Nucleotide Sequencing/methods ; RNA-Seq ; Reproducibility of Results ; Sequence Analysis, RNA/methods ; *Software ; }, abstract = {BACKGROUND: Quantification of gene expression from RNA-seq data is a prerequisite for transcriptome analysis such as differential gene expression analysis and gene co-expression network construction. Individual RNA-seq experiments are larger and combining multiple experiments from sequence repositories can result in datasets with thousands of samples. Processing hundreds to thousands of RNA-seq data can result in challenges related to data management, access to sufficient computational resources, navigation of high-performance computing (HPC) systems, installation of required software dependencies, and reproducibility. Processing of larger and deeper RNA-seq experiments will become more common as sequencing technology matures.

RESULTS: GEMmaker, is a nf-core compliant, Nextflow workflow, that quantifies gene expression from small to massive RNA-seq datasets. GEMmaker ensures results are highly reproducible through the use of versioned containerized software that can be executed on a single workstation, institutional compute cluster, Kubernetes platform or the cloud. GEMmaker supports popular alignment and quantification tools providing results in raw and normalized formats. GEMmaker is unique in that it can scale to process thousands of local or remote stored samples without exceeding available data storage.

CONCLUSIONS: Workflows that quantify gene expression are not new, and many already address issues of portability, reusability, and scale in terms of access to CPUs. GEMmaker provides these benefits and adds the ability to scale despite low data storage infrastructure. This allows users to process hundreds to thousands of RNA-seq samples even when data storage resources are limited. GEMmaker is freely available and fully documented with step-by-step setup and execution instructions.}, } @article {pmid35498196, year = {2022}, author = {Almuzaini, KK and Sinhal, AK and Ranjan, R and Goel, V and Shrivastava, R and Halifa, A}, title = {Key Aggregation Cryptosystem and Double Encryption Method for Cloud-Based Intelligent Machine Learning Techniques-Based Health Monitoring Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3767912}, pmid = {35498196}, issn = {1687-5273}, mesh = {*Cloud Computing ; Commerce ; Humans ; Intelligence ; Machine Learning ; *Research Design ; }, abstract = {Cloud technology is a business strategy that aims to provide the necessary material to customers depending on their needs. Individuals and cloud businesses alike have embraced the cloud storage service, which has become the most widely used service. The industries outsource their data to cloud storage space to relieve themselves of the load of dealing with redundant data contents. This must be protected to prevent the theft of personal belongings, and privacy must be improved as well. Different research projects have been suggested to ensure the safe management of the information included within the data content. The security of current research projects, on the contrary, still needs improvement. As a result, this method has been suggested to address the security concerns associated with cloud computing. The primary goal of this study effort is to offer a safe environment for cloud users while also increasing the profit of cloud resource providers by managing and securely delivering data contents to the cloud users. The bulk of sectors, including business, finance, military, and healthcare industry, do not store data in cloud-based storage systems. This technique is used to attract these kinds of customers. Increasing public acceptance, medical researchers are drawn to cloud computing because it allows them to store their study material in a centralized location and distribute and access it in a more flexible manner. They were collected from numerous individuals who were being evaluated for medical care at the time. Scalable and enhanced key aggregate cryptosystem is a protected data protection method that provides highly effective security in the healthcare industry. When parties interested in a dispute disagree on the outflow of sensitive information, this technique manages the disputes and ensures the data security deployment of a cloud-based intelligent health monitoring system for the parties involved. The encrypted data structure of medical and healthcare prescriptions is recorded as they move through the hands of patients and healthcare facilities, according to the technique recommended. The double encryption approach is used in order to raise the overall degree of security. An encryption class is created by referring to the Ciphertext ID during the encryption procedure. The keyholder is a master secret key that facilitates in the recovery of the secret keys of various monsters and creatures by acting as a conduit between them. It is transferred and stored as a single aggregate for the benefit of the patient or customer in order to make decryption more convenient and efficient. A safe connection between cloud-based intelligent health monitoring systems and healthcare organizations and their patients may be established via the use of a key aggregation cryptosystem and a double encryption approach, according to the researchers. Because of this, when compared to earlier techniques, the findings reveal that the research methodology provides high levels of security in terms of confidentiality and integrity, in addition to excellent scalability.}, } @article {pmid35495546, year = {2021}, author = {Vekaria, K and Calyam, P and Sivarathri, SS and Wang, S and Zhang, Y and Pandey, A and Chen, C and Xu, D and Joshi, T and Nair, S}, title = {Recommender-as-a-Service with Chatbot Guided Domain-science Knowledge Discovery in a Science Gateway.}, journal = {Concurrency and computation : practice & experience}, volume = {33}, number = {19}, pages = {}, pmid = {35495546}, issn = {1532-0626}, support = {R01 MH122023/MH/NIMH NIH HHS/United States ; }, abstract = {Scientists in disciplines such as neuroscience and bioinformatics are increasingly relying on science gateways for experimentation on voluminous data, as well as analysis and visualization in multiple perspectives. Though current science gateways provide easy access to computing resources, datasets and tools specific to the disciplines, scientists often use slow and tedious manual efforts to perform knowledge discovery to accomplish their research/education tasks. Recommender systems can provide expert guidance and can help them to navigate and discover relevant publications, tools, data sets, or even automate cloud resource configurations suitable for a given scientific task. To realize the potential of integration of recommenders in science gateways in order to spur research productivity, we present a novel "OnTimeRecommend" recommender system. The OnTimeRecommend comprises of several integrated recommender modules implemented as microservices that can be augmented to a science gateway in the form of a recommender-as-a-service. The guidance for use of the recommender modules in a science gateway is aided by a chatbot plug-in viz., Vidura Advisor. To validate our OnTimeRecommend, we integrate and show benefits for both novice and expert users in domain-specific knowledge discovery within two exemplar science gateways, one in neuroscience (CyNeuro) and the other in bioinformatics (KBCommons).}, } @article {pmid35494839, year = {2022}, author = {Wang, B and Cheng, J and Cao, J and Wang, C and Huang, W}, title = {Integer particle swarm optimization based task scheduling for device-edge-cloud cooperative computing to improve SLA satisfaction.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e893}, pmid = {35494839}, issn = {2376-5992}, abstract = {Task scheduling helps to improve the resource efficiency and the user satisfaction for Device-Edge-Cloud Cooperative Computing (DE3C), by properly mapping requested tasks to hybrid device-edge-cloud resources. In this paper, we focused on the task scheduling problem for optimizing the Service-Level Agreement (SLA) satisfaction and the resource efficiency in DE3C environments. Existing works only focused on one or two of three sub-problems (offloading decision, task assignment and task ordering), leading to a sub-optimal solution. To address this issue, we first formulated the problem as a binary nonlinear programming, and proposed an integer particle swarm optimization method (IPSO) to solve the problem in a reasonable time. With integer coding of task assignment to computing cores, our proposed method exploited IPSO to jointly solve the problems of offloading decision and task assignment, and integrated earliest deadline first scheme into the IPSO to solve the task ordering problem for each core. Extensive experimental results showed that our method achieved upto 953% and 964% better performance than that of several classical and state-of-the-art task scheduling methods in SLA satisfaction and resource efficiency, respectively.}, } @article {pmid35492501, year = {2021}, author = {Sherbert, K and Cerasoli, F and Buongiorno Nardelli, M}, title = {A systematic variational approach to band theory in a quantum computer.}, journal = {RSC advances}, volume = {11}, number = {62}, pages = {39438-39449}, pmid = {35492501}, issn = {2046-2069}, abstract = {Quantum computers promise to revolutionize our ability to simulate molecules, and cloud-based hardware is becoming increasingly accessible to a wide body of researchers. Algorithms such as Quantum Phase Estimation and the Variational Quantum Eigensolver are being actively developed and demonstrated in small systems. However, extremely limited qubit count and low fidelity seriously limit useful applications, especially in the crystalline phase, where compact orbital bases are difficult to develop. To address this difficulty, we present a hybrid quantum-classical algorithm to solve the band structure of any periodic system described by an adequate tight-binding model. We showcase our algorithm by computing the band structure of a simple-cubic crystal with one s and three p orbitals per site (a simple model for polonium) using simulators with increasingly realistic levels of noise and culminating with calculations on IBM quantum computers. Our results show that the algorithm is reliable in a low-noise device, functional with low precision on present-day noisy quantum computers, and displays a complexity that scales as Ω(M [3]) with the number M of tight-binding orbitals per unit-cell, similarly to its classical counterparts. Our simulations offer a new insight into the "quantum" mindset and demonstrate how the algorithms under active development today can be optimized in special cases, such as band structure calculations.}, } @article {pmid35492053, year = {2021}, author = {Read, RL and Clarke, L and Mulligan, G}, title = {VentMon: An open source inline ventilator tester and monitor.}, journal = {HardwareX}, volume = {9}, number = {}, pages = {e00195}, pmid = {35492053}, issn = {2468-0672}, abstract = {Humanitarian engineers responded to the pandemic ventilator shortage of March, 2020 by beginning over 100 open source ventilator projects [Robert L. Read et al. COVID-19 Vent List. Oct. 2020. url: https://docs.google.com/spreadsheets/d/1inYw5H4RiL0AC_J9vPWzJxXCdlkMLPBRdPgEVKF8DZw/edit#gid=0, Joshua M. Pearce. A review of open source ventilators for COVID-19 and future pandemics. In: F1000Research 9 (2020).]. By ventilator, we mean both an invasive ventilator (requiring intubation of the patient) and non-invasive ventilator (generally supporting spontaneously breathing). Inexpensive ventilator test equipment can facilitate projects forced to be geographically distributed by lockdowns. The VentMon is a modular, open source, IoT-enabled tester that plugs into a standard 22 mm airway between a ventilator and a physical test lung to test any ventilator. The VentMon measures flow, pressure, fractional oxygen, humidity, and temperature. Data is stored and graphed at a data lake accessible to all devlopment team members, and, eventually, clinicians. The open source design of the VentMon, its firmware, and cloud-based software may allow it to be used as a component of modular ventilators to provide a clinical readout. The software system surrounding VentMon has been designed to be as modular and composable as possible. By combining new, openly published standards for data with composable and modifiable hardware, the VentMon forms the beginning of an open system or eco-system of ventilation devices and data. Thanks to grants, 20 VentMons have been given away free of charge to pandemic response teams building open source ventilators.}, } @article {pmid35491772, year = {2022}, author = {Crichton, DJ and Cinquini, L and Kincaid, H and Mahabal, A and Altinok, A and Anton, K and Colbert, M and Kelly, S and Liu, D and Patriotis, C and Lombeyda, S and Srivastava, S}, title = {From space to biomedicine: Enabling biomarker data science in the cloud.}, journal = {Cancer biomarkers : section A of Disease markers}, volume = {33}, number = {4}, pages = {479-488}, doi = {10.3233/CBM-210350}, pmid = {35491772}, issn = {1875-8592}, mesh = {*Artificial Intelligence ; Biomarkers, Tumor ; *Data Science ; Ecosystem ; Humans ; Software ; }, abstract = {NASA's Jet Propulsion Laboratory (JPL) is advancing research capabilities for data science with two of the National Cancer Institute's major research programs, the Early Detection Research Network (EDRN) and the Molecular and Cellular Characterization of Screen-Detected Lesions (MCL), by enabling data-driven discovery for cancer biomarker research. The research team pioneered a national data science ecosystem for cancer biomarker research to capture, process, manage, share, and analyze data across multiple research centers. By collaborating on software and data-driven methods developed for space and earth science research, the biomarker research community is heavily leveraging similar capabilities to support the data and computational demands to analyze research data. This includes linking diverse data from clinical phenotypes to imaging to genomics. The data science infrastructure captures and links data from over 1600 annotations of cancer biomarkers to terabytes of analysis results on the cloud in a biomarker data commons known as "LabCAS". As the data increases in size, it is critical that automated approaches be developed to "plug" laboratories and instruments into a data science infrastructure to systematically capture and analyze data directly. This includes the application of artificial intelligence and machine learning to automate annotation and scale science analysis.}, } @article {pmid35480156, year = {2022}, author = {Ren, H and Dan, W}, title = {Analysis of Reasonable Respiratory Efficiency in Tennis Competition and Training Environment Based on Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {4289667}, pmid = {35480156}, issn = {2040-2309}, mesh = {Cloud Computing ; Humans ; Oxygen ; Physical Fitness ; Respiratory Rate ; *Tennis/physiology ; }, abstract = {Competitive tennis is developing in the direction of quantification. How to use and give full play to all positive factors, in order to attack actively and give full play to the limits of body and psychology, breathing, as the basic metabolic function of human body, also plays a vital role in tennis. This paper studies that it plays an important role in the rationality and explosiveness of sports and the psychological and physiological regulation in competition. The characteristics of tennis events determine the importance of scientific and rational breathing. Reasonable breathing during exercise is conducive to maintaining the basic stability of the internal environment, improving the training effect, and giving full play to the functional ability of the human body, so as to create excellent sports results. First, reduce respiratory resistance. Second, there are two methods to improve alveolar ventilation efficiency and pulmonary ventilation: increasing respiratory rate and increasing respiratory depth. When the inhalation volume is constant, the alveolar gas freshness rate depends on the functional residual volume in the alveolar cavity at the end of expiratory or before inhalation. The less functional the residual air, the more fresh air inhaled, and the higher the oxygen partial pressure in alveolar gas. An effective way to reduce the functional residual volume in the alveolar cavity is to exhale as deeply as possible, so as to ensure that more oxygen enters the body. Reasonable breathing methods can not only accelerate the excitation of the body, increase movement strength, reduce fatigue, and promote recovery but also play a vital role in the rational allocation of physical fitness and the improvement of sports performance. The purpose of this study is to provide a theoretical basis for scientific tennis training by analyzing the characteristics of tennis events, the form of breathing in tennis and the efficiency of reasonable breathing in tennis.}, } @article {pmid35475238, year = {2021}, author = {Mano, T and Murata, K and Kon, K and Shimizu, C and Ono, H and Shi, S and Yamada, RG and Miyamichi, K and Susaki, EA and Touhara, K and Ueda, HR}, title = {CUBIC-Cloud provides an integrative computational framework toward community-driven whole-mouse-brain mapping.}, journal = {Cell reports methods}, volume = {1}, number = {2}, pages = {100038}, pmid = {35475238}, issn = {2667-2375}, mesh = {Mice ; Animals ; *Brain/diagnostic imaging ; Brain Mapping ; *Alzheimer Disease/diagnostic imaging ; Neurons ; }, abstract = {Recent advancements in tissue clearing technologies have offered unparalleled opportunities for researchers to explore the whole mouse brain at cellular resolution. With the expansion of this experimental technique, however, a scalable and easy-to-use computational tool is in demand to effectively analyze and integrate whole-brain mapping datasets. To that end, here we present CUBIC-Cloud, a cloud-based framework to quantify, visualize, and integrate mouse brain data. CUBIC-Cloud is a fully automated system where users can upload their whole-brain data, run analyses, and publish the results. We demonstrate the generality of CUBIC-Cloud by a variety of applications. First, we investigated the brain-wide distribution of five cell types. Second, we quantified Aβ plaque deposition in Alzheimer's disease model mouse brains. Third, we reconstructed a neuronal activity profile under LPS-induced inflammation by c-Fos immunostaining. Last, we show brain-wide connectivity mapping by pseudotyped rabies virus. Together, CUBIC-Cloud provides an integrative platform to advance scalable and collaborative whole-brain mapping.}, } @article {pmid35464821, year = {2023}, author = {Hassan, N and Aazam, M and Tahir, M and Yau, KA}, title = {Floating Fog: extending fog computing to vast waters for aerial users.}, journal = {Cluster computing}, volume = {26}, number = {1}, pages = {181-195}, pmid = {35464821}, issn = {1386-7857}, abstract = {There are thousands of flights carrying millions of passengers each day, having three or more Internet-connected devices with them on average. Usually, onboard devices remain idle for most of the journey (which can be of several hours), therefore, we can tap on their underutilized potential. Although these devices are generally becoming more and more resourceful, for complex services (such as related to machine learning, augmented/virtual reality, smart healthcare, and so on) those devices do not suffice standalone. This makes a case for multi-device resource aggregation such as through femto-cloud. As our first contribution, we present the utility of femto-cloud for aerial users. But for that sake, a reliable and faster Internet is required (to access online services or cloud resources), which is currently not the case with satellite-based Internet. That is the second challenge we try to address in our paper, by presenting an adaptive beamforming-based solution for aerial Internet provisioning. However, on average, most of the flight path is above waters. Given that, we propose that beamforming transceivers can be docked on stationery ships deployed in the vast waters (such as the ocean). Nevertheless, certain services would be delay-sensitive, and accessing their on-ground servers or cloud may not be feasible (in terms of delay). Similarly, certain complex services may require resources in addition to the flight-local femto-cloud. That is the third challenge we try to tackle in this paper, by proposing that the traditional fog computing (which is a cloud-like but localized pool of resources) can also be extended to the waters on the ships harboring beamforming transceivers. We name it Floating Fog. In addition to that, Floating Fog will enable several new services such as live black-box. We also present a cost and bandwidth analysis to highlight the potentials of Floating Fog. Lastly, we identify some challenges to tackle the successful deployment of Floating Fog.}, } @article {pmid35463737, year = {2022}, author = {Jyotsna, and Nand, P}, title = {Novel DLSNNC and SBS based framework for improving QoS in healthcare-IoT applications.}, journal = {International journal of information technology : an official journal of Bharati Vidyapeeth's Institute of Computer Applications and Management}, volume = {14}, number = {4}, pages = {2093-2103}, pmid = {35463737}, issn = {2511-2112}, abstract = {Health care system is intended to enhance one's health and as a result, one's quality of life. In order to fulfil its social commitment, health care must focus on producing social profit to sustain itself. Also, due to ever increasing demand of healthcare sector, there is drastic rise in the amount of patient data that is produced and needs to be stored for long duration for clinical reference. The risk of patient data being lost due to a data centre failure can be minimized by including a fog layer into the cloud computing architecture. Furthermore, the burden of such data produced is stored on the cloud. In order to increase service quality, we introduce fog computing based on deep learning sigmoid-based neural network clustering (DLSNNC) and score-based scheduling (SBS). Fog computing begins by collecting and storing healthcare data on the cloud layer, using data collected through sensors. Deep learning sigmoid based neural network clustering and score based Scheduling approaches are used to determine entropy for each fog node in the fog layer. Sensors collect data and send it to the fog layer, while the cloud computing tier is responsible for monitoring the healthcare system. The exploratory findings show promising results in terms of end-to-end latency and network utilization. Also, the proposed system outperforms the existing techniques in terms of average delay.}, } @article {pmid35463290, year = {2022}, author = {He, J}, title = {Cloud Computing Load Balancing Mechanism Taking into Account Load Balancing Ant Colony Optimization Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3120883}, pmid = {35463290}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Computing Methodologies ; }, abstract = {The networking scale and traffic have exploded. At the same time, the rapid development of virtualization and cloud computing technologies not only poses a considerable challenge to the endurance of the network, but also causes more and more problems to the traditional network architecture with IP as the core. Cloud computing is a supercomputing model based on the Internet. With the rapid growth of network access and data traffic, the processing power and computing intensity will also increase, and a single server cannot afford the increase in business. In order to reduce network pressure and improve computing efficiency, load balancing for network computing is particularly important. This paper uses ant colony algorithm to design cloud computing load balance. The ant colony algorithm runs in the controller. According to the real-time network load situation provided by the controller, it calculates the link with the smallest load and provides a dynamic data stream forwarding strategy. The result of the experiments shows that the load-balanced ACO optimized technique can significantly provide an improved computational response. In the ACO algorithm, the average response time is about 30% lower than that in other algorithms. This shows that the use of the ant colony algorithm achieves a good optimization effect.}, } @article {pmid35463282, year = {2022}, author = {Yadav, S and Tiwari, N}, title = {An Efficient and Secure Data Sharing Method Using Asymmetric Pairing with Shorter Ciphertext to Enable Rapid Learning in Healthcare.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4788031}, pmid = {35463282}, issn = {1687-5273}, mesh = {*COVID-19 ; Cloud Computing ; *Computer Security ; Delivery of Health Care ; Humans ; Information Dissemination ; }, abstract = {The recent advent of cloud computing provides a flexible way to effectively share data among multiple users. Cloud computing and cryptographic primitives are changing the way of healthcare unprecedentedly by providing real-time data sharing cost-effectively. Sharing various data items from different users to multiple sets of legitimate subscribers in the cloud environment is a challenging issue. The online electronic healthcare system requires multiple data items to be shared by different users for various purposes. In the present scenario, COVID-19 data is sensitive and must be encrypted to ensure data privacy. Secure sharing of such information is crucial. The standard broadcast encryption system is inefficient for this purpose. Multichannel broadcast encryption is a mechanism that enables secure sharing of different messages to different set of users efficiently. We propose an efficient and secure data sharing method with shorter ciphertext in public key setting using asymmetric (Type-III) pairings. The Type-III setting is the most efficient form among all pairing types regarding operations required and security. The semantic security of this method is proven under decisional BDHE complexity assumption without random oracle model.}, } @article {pmid35463252, year = {2022}, author = {You, L and Sun, H}, title = {Research and Design of Docker Technology Based Authority Management System.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {5325694}, pmid = {35463252}, issn = {1687-5273}, mesh = {*Cloud Computing ; Databases, Factual ; Humans ; *Software ; Technology ; }, abstract = {With the development of mobile Internet technology and the continuous popularization of the network, various kinds of network software come out constantly and people are becoming more and more dependent on them, while role authority management is of great importance for the security of software, the control of management process, and the usability of users. In terms of system implementation, virtual machine technology is often faced with problems such as high virtualization overhead, poor scalability, and long deployment time in spite of its good isolation effect. Container technology represented by Docker can well solve these problems and make it possible to quickly build, deploy, operate, and maintain as well as expand services. Based on Docker technology, this research compares and chooses from various authority control models and finally decides to take the role authority management model as the infrastructure. It designs the role authority control model based on cloud computing and Docker technology in combination with the Task Controller Function, the Project Controller Function, and the User Controller Function and realizes this model by adopting the MongoDB database combined with HTML/CSS/Javascript syntax and the Boor Strap framework. After the test, it is found that the Docker technology based role authority management system has satisfactory test performance consistent with expected outputs as well as strong robustness, which can meet the requirements of different objects and subjects.}, } @article {pmid35463235, year = {2022}, author = {Gong, R and Ge, N and Li, J}, title = {Real-Time Detection of Body Nutrition in Sports Training Based on Cloud Computing and Somatosensory Network.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9911905}, pmid = {35463235}, issn = {1687-5273}, mesh = {Body Composition ; *Cloud Computing ; Glucose/analysis ; Humans ; *Sports ; Sweat/chemistry ; }, abstract = {With the progress of society and the improvement of living standards, sports training has gradually become an area of increasing concern for society and individuals. To more comprehensively grasp the physical function, body shape, and physical fitness of athletes, many researchers have conducted extensive research on the real-time detection of human body nutrition. This study is mainly supported by cloud computing and somatosensory network technology, and the real-time detection of human body composition in sports training is the main research object. In the experiment, two methods of human body composition detection were tested: the BIA method and the body composition analysis method based on the electrochemical sensor of body sweat. It designed a human nutrient composition detection system based on the BIA method. The error rate of the system is relatively small, which is basically maintained at about 2%. It uses a body surface sweat electrochemical sensor to detect changes in glucose concentration during human exercise. After exercising for a period of time, the test subject's sweat glucose concentration remained around 0.5 mM.}, } @article {pmid35459005, year = {2022}, author = {Franchi, F and Marotta, A and Rinaldi, C and Graziosi, F and Fratocchi, L and Parisse, M}, title = {What Can 5G Do for Public Safety? Structural Health Monitoring and Earthquake Early Warning Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {8}, pages = {}, pmid = {35459005}, issn = {1424-8220}, support = {135//Governo Italiano/ ; }, mesh = {Cell Phone ; *Earthquakes ; Reproducibility of Results ; }, abstract = {The 5th generation of mobile networks has come to the market bringing the promise of disruptive performances as low latency, availability and reliability, imposing the development of the so-called "killer applications". This contribution presents a 5G use case in the context of Structural Health Monitoring which guarantees an unprecedented level of reliability when exploited for public safety purposes as Earthquake Early Warning. The interest on this topic is at first justified through a deep market analysis, and subsequently declined in terms of public safety benefits. A specific sensor board, guaranteeing real-time processing and 5G connectivity, is presented as the foundation on which the architecture of the network is designed and developed. Advantages of 5G-enabled urban safety are then discussed and proven in the experimentation results, showing that the proposed architecture guarantees lower latency delays and overcome the impairments of cloud solutions especially in terms of delays variability.}, } @article {pmid35458932, year = {2022}, author = {Jeon, S and Kim, MS}, title = {End-to-End Lip-Reading Open Cloud-Based Speech Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {8}, pages = {}, pmid = {35458932}, issn = {1424-8220}, support = {NRF-2018X1A3A1069795//National Research Foundation of Korea/ ; }, mesh = {*Artificial Intelligence ; Cloud Computing ; Neural Networks, Computer ; *Speech ; Speech Recognition Software ; }, abstract = {Deep learning technology has encouraged research on noise-robust automatic speech recognition (ASR). The combination of cloud computing technologies and artificial intelligence has significantly improved the performance of open cloud-based speech recognition application programming interfaces (OCSR APIs). Noise-robust ASRs for application in different environments are being developed. This study proposes noise-robust OCSR APIs based on an end-to-end lip-reading architecture for practical applications in various environments. Several OCSR APIs, including Google, Microsoft, Amazon, and Naver, were evaluated using the Google Voice Command Dataset v2 to obtain the optimum performance. Based on performance, the Microsoft API was integrated with Google's trained word2vec model to enhance the keywords with more complete semantic information. The extracted word vector was integrated with the proposed lip-reading architecture for audio-visual speech recognition. Three forms of convolutional neural networks (3D CNN, 3D dense connection CNN, and multilayer 3D CNN) were used in the proposed lip-reading architecture. Vectors extracted from API and vision were classified after concatenation. The proposed architecture enhanced the OCSR API average accuracy rate by 14.42% using standard ASR evaluation measures along with the signal-to-noise ratio. The proposed model exhibits improved performance in various noise settings, increasing the dependability of OCSR APIs for practical applications.}, } @article {pmid35456492, year = {2022}, author = {Lim, HG and Hsiao, SH and Fann, YC and Lee, YG}, title = {Robust Mutation Profiling of SARS-CoV-2 Variants from Multiple Raw Illumina Sequencing Data with Cloud Workflow.}, journal = {Genes}, volume = {13}, number = {4}, pages = {}, pmid = {35456492}, issn = {2073-4425}, support = {HHSN261201400008C/NH/NIH HHS/United States ; }, mesh = {*COVID-19/genetics ; High-Throughput Nucleotide Sequencing ; Humans ; Mutation ; *SARS-CoV-2/genetics ; Spike Glycoprotein, Coronavirus/genetics ; Workflow ; }, abstract = {Several variants of the novel severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) are emerging all over the world. Variant surveillance from genome sequencing has become crucial to determine if mutations in these variants are rendering the virus more infectious, potent, or resistant to existing vaccines and therapeutics. Meanwhile, analyzing many raw sequencing data repeatedly with currently available code-based bioinformatics tools is tremendously challenging to be implemented in this unprecedented pandemic time due to the fact of limited experts and computational resources. Therefore, in order to hasten variant surveillance efforts, we developed an installation-free cloud workflow for robust mutation profiling of SARS-CoV-2 variants from multiple Illumina sequencing data. Herein, 55 raw sequencing data representing four early SARS-CoV-2 variants of concern (Alpha, Beta, Gamma, and Delta) from an open-access database were used to test our workflow performance. As a result, our workflow could automatically identify mutated sites of the variants along with reliable annotation of the protein-coding genes at cost-effective and timely manner for all by harnessing parallel cloud computing in one execution under resource-limitation settings. In addition, our workflow can also generate a consensus genome sequence which can be shared with others in public data repositories to support global variant surveillance efforts.}, } @article {pmid35440942, year = {2022}, author = {Meng, S and Zhang, X}, title = {The Use of Internet of Things and Cloud Computing Technology in the Performance Appraisal Management of Innovation Capability of University Scientific Research Team.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {9423718}, pmid = {35440942}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; *Internet of Things ; Technology ; Universities ; }, abstract = {This study aims to speed up the progress of scientific research projects in colleges and universities, continuously improve the innovation ability of scientific research teams in colleges and universities, and optimize the current management methods of performance appraisal of college innovation ability. Firstly, the needs of the innovation performance evaluation system are analyzed, and the corresponding innovation performance evaluation index system of scientific research team is constructed. Secondly, the Internet of Things (IoT) combines the Field Programmable Gate Array (FPGA) to build an innovation capability performance appraisal management terminal. Thirdly, the lightweight deep network has been built into the innovation ability performance assessment management network of university scientific research teams, which relates to the innovation performance assessment index system of scientific research teams. Finally, the system performance is tested. The results show that the proposed method has different degrees of compression for MobileNet, which can significantly reduce the network computation and retain the original recognition ability. Models whose Floating-Point Operations (FLOPs) are reduced by 70% to 90% have 3.6 to 14.3 times fewer parameters. Under different pruning rates, the proposed model has higher model compression rate and recognition accuracy than other models. The results also show that the output of the results is closely related to the interests of the research team. The academic influence score of Team 1 is 0.17, which is the highest among the six groups in this experimental study, indicating that Team 1 has the most significant academic influence. These results provide certain data support and method reference for evaluating the innovation ability of scientific research teams in colleges and universities and contribute to the comprehensive development of efficient scientific research teams.}, } @article {pmid35437463, year = {2022}, author = {Wang, Z}, title = {An Intelligent Collection System of Big Data in Medical and Health Education Based on the Internet of Things.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {3735102}, pmid = {35437463}, issn = {2040-2309}, mesh = {Big Data ; Cloud Computing ; Computer Security ; Health Education ; Humans ; Internet ; *Internet of Things ; }, abstract = {The Internet of Medical Things has developed rapidly as an important direction in the field of Internet of Things, especially through the use of the new generation of information technology, theoretical and applied research on medical IoT intelligent health management that provides "full-service." It has become a research hotspot of many universities and research institutions. Therefore, conducting research on intelligent health management in the network of medical things is of great engineering importance and theoretical guidance to improve the level of medical information. For health management in the network of medical things to conduct research around the goal of health management "Everything full spectrum for everyone," analyzed the lack of sharing of health information in current health management, lack of continuous monitoring and management of health indicators, etc., a new "individual-family-community-hospital" four-level intelligent health management service model is proposed, the hardware architecture of intelligent healthcare management and the software maintenance system have been built. Through methods such as real-time multi-source data collection, mobile sensing, cloud computing, multi-network fusion technology, continuous monitoring and intelligent management of health data is realized convenient, fast and efficient. It solves the problems that the existing medical system cannot meet the multi-level health needs, personal data security and privacy protection, etc., it has achieved the goal of real-time interactive health management of regionalization, multi-level and multi-center, and whole-person, whole-process and all-round.}, } @article {pmid35433703, year = {2022}, author = {Pennington, A and King, ONF and Tun, WM and Ho, EML and Luengo, I and Darrow, MC and Basham, M}, title = {SuRVoS 2: Accelerating Annotation and Segmentation for Large Volumetric Bioimage Workflows Across Modalities and Scales.}, journal = {Frontiers in cell and developmental biology}, volume = {10}, number = {}, pages = {842342}, pmid = {35433703}, issn = {2296-634X}, abstract = {As sample preparation and imaging techniques have expanded and improved to include a variety of options for larger sized and numbers of samples, the bottleneck in volumetric imaging is now data analysis. Annotation and segmentation are both common, yet difficult, data analysis tasks which are required to bring meaning to the volumetric data. The SuRVoS application has been updated and redesigned to provide access to both manual and machine learning-based segmentation and annotation techniques, including support for crowd sourced data. Combining adjacent, similar voxels (supervoxels) provides a mechanism for speeding up segmentation both in the painting of annotation and by training a segmentation model on a small amount of annotation. The support for layers allows multiple datasets to be viewed and annotated together which, for example, enables the use of correlative data (e.g. crowd-sourced annotations or secondary imaging techniques) to guide segmentation. The ability to work with larger data on high-performance servers with GPUs has been added through a client-server architecture and the Pytorch-based image processing and segmentation server is flexible and extensible, and allows the implementation of deep learning-based segmentation modules. The client side has been built around Napari allowing integration of SuRVoS into an ecosystem for open-source image analysis while the server side has been built with cloud computing and extensibility through plugins in mind. Together these improvements to SuRVoS provide a platform for accelerating the annotation and segmentation of volumetric and correlative imaging data across modalities and scales.}, } @article {pmid35432824, year = {2022}, author = {Mir, MH and Jamwal, S and Mehbodniya, A and Garg, T and Iqbal, U and Samori, IA}, title = {IoT-Enabled Framework for Early Detection and Prediction of COVID-19 Suspects by Leveraging Machine Learning in Cloud.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {7713939}, pmid = {35432824}, issn = {2040-2309}, mesh = {Algorithms ; Bayes Theorem ; *COVID-19/diagnosis ; Humans ; Machine Learning ; Support Vector Machine ; }, abstract = {COVID-19 is the repugnant but the most searched word since its outbreak in November 2019 across the globe. The world has to battle with it until an effective solution is developed. Due to the advancement in mobile and sensor technology, it is possible to come up with Internet of things-based healthcare systems. These novel healthcare systems can be proactive and preventive rather than traditional reactive healthcare systems. This article proposes a real-time IoT-enabled framework for the detection and prediction of COVID-19 suspects in early stages, by collecting symptomatic data and analyzing the nature of the virus in a better manner. The framework computes the presence of COVID-19 virus by mining the health parameters collected in real time from sensors and other IoT devices. The framework is comprised of four main components: user system or data collection center, data analytic center, diagnostic system, and cloud system. To point out and detect the COVID-19 suspected in real time, this work proposes the five machine learning techniques, namely support vector machine (SVM), decision tree, naïve Bayes, logistic regression, and neural network. In our proposed framework, the real and primary dataset collected from SKIMS, Srinagar, is used to validate our work. The experiment on the primary dataset was conducted using different machine learning techniques on selected symptoms. The efficiency of algorithms is calculated by computing the results of performance metrics such as accuracy, precision, recall, F1 score, root-mean-square error, and area under the curve score. The employed machine learning techniques have shown the accuracy of above 95% on the primary symptomatic data. Based on the experiment conducted, the proposed framework would be effective in the early identification and prediction of COVID-19 suspect realizing the nature of the disease in better way.}, } @article {pmid35432577, year = {2022}, author = {Mohamed Akram, K and Sihem, S and Okba, K and Harous, S}, title = {IoMT-fog-cloud based architecture for Covid-19 detection.}, journal = {Biomedical signal processing and control}, volume = {76}, number = {}, pages = {103715}, pmid = {35432577}, issn = {1746-8094}, abstract = {Nowadays, coronavirus disease 2019 (COVID-19) is the world-wide pandemic due to its mutation over time. Several works done for covid-19 detection using different techniques however, the use of small datasets and the lack of validation tests still limit their works. Also, they depend only on the increasing the accuracy and the precision of the model without giving attention to their complexity which is one of the main conditions in the healthcare application. Moreover, the majority of healthcare applications with cloud computing use centralization transmission process of various and vast volumes of information what make the privacy and security of personal patient's data easy for hacking. Furthermore, the traditional architecture of the cloud showed many weaknesses such as the latency and the low persistent performance.

In our system, we used Discrete Wavelet transform (DWT) and Principal Component Analysis (PCA) and different energy tracking methods such as Teager Kaiser Energy Operator (TKEO), Shannon Wavelet Entropy Energy (SWEE), Log Energy Entropy (LEE) for preprocessing the dataset. For the first step, DWT used to decompose the image into coefficients where each coefficient is vector of features. Then, we apply PCA for reduction the dimension by choosing the most essential features in features map. Moreover, we used TKEO, SHEE, LEE to track the energy in the features in order to select the best and the most optimal features to reduce the complexity of the model. Also, we used CNN model that contains convolution and pooling layers due to its efficacity in image processing. Furthermore, we depend on deep neurons using small kernel windows which provide better features learning and minimize the model's complexity.The used DWT-PCA technique with TKEO filtering technique showed great results in terms of noise measure where the Peak Signal-to-Noise Ratio (PSNR) was 3.14 dB and the Signal-to-Noise Ratio (SNR) of original and preprocessed image was 1.48, 1.47 respectively which guaranteed the performance of the filtering techniques.The experimental results of the CNN model ensure the high performance of the proposed system in classifying the covid-19, pneumonia and normal cases with 97% of accuracy, 100% of precession, 97% of recall, 99% of F1-score, and 98% of AUC.

The use of DWT-PCA and TKEO optimize the selection of the optimal features and reduce the complexity of the model.The proposed system achieves good results in identifying covid-19, pneumonia and normal cases.The implementation of fog computing as an intermediate layer to solve the latency problem and computational cost which improve the Quality of Service (QoS) of the cloud.Fog computing ensure the privacy and security of the patients' data.With further refinement and validation, the IFC-Covid system will be real-time and effective application for covid-19 detection, which is user friendly and costless.}, } @article {pmid35430649, year = {2022}, author = {Farhadi, H and Mokhtarzade, M and Ebadi, H and Beirami, BA}, title = {Rapid and automatic burned area detection using sentinel-2 time-series images in google earth engine cloud platform: a case study over the Andika and Behbahan Regions, Iran.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {5}, pages = {369}, pmid = {35430649}, issn = {1573-2959}, mesh = {*Cloud Computing ; Environmental Monitoring ; Humans ; Iran ; Search Engine ; Water ; }, abstract = {For proper forest management, accurate detection and mapping of burned areas are needed, yet the practice is difficult to perform due to the lack of an appropriate method, time, and expense. It is also critical to obtain accurate information about the density and distribution of burned areas in a large forest and vegetated areas. For the most efficient and up-to-date mapping of large areas, remote sensing is one of the best technologies. However, the complex image scenario and the similar spectral behavior of classes in multispectral satellite images may lead to many false-positive mistakes, making it challenging to extract the burned areas accurately. This research aims to develop an automated framework in the Google Earth Engine (GEE) cloud computing platform for detecting burned areas in Andika and Behbahan, located in the south and southwest of Iran, using Sentinel-2 time-series images. After importing the images and applying the necessary preprocessing, the Sentinel-2 Burned Areas Index (BAIS2) was used to create a map of the Primary Burned Areas (PBA). Detection accuracy was then improved by masking out disturbing classes (vegetation and water) on the PBA map, which resulted in Final Burned Areas (FBA). The unimodal method is used to calculate the ideal thresholds of indices to make the proposed method automatic. The final results demonstrated that the proposed method performed well in both homogeneous and heterogeneous areas for detecting the burned areas. Based on a test dataset, maps of burned areas were produced in the Andika and Behbahan regions with an overall accuracy of 90.11% and 92.40% and a kappa coefficient of 0.87 and 0.88, respectively, which were highly accurate when compared to the BAIS2, Normalized Burn Ratio (NBR), Normalized Difference Vegetation Index (NDVI), Mid-Infrared Bispectral Index (MIRBI), and Normalized Difference SWIR (NDSWIR) indices. Based on the results, accurate determination of vegetation classes and water zones and eliminating them from the map of burned areas led to a considerable increase in the accuracy of the obtained final map from the BAIS2 spectral index.}, } @article {pmid35428085, year = {2022}, author = {Yaacoby, R and Schaar, N and Kellerhals, L and Raz, O and Hermelin, D and Pugatch, R}, title = {Comparison between a quantum annealer and a classical approximation algorithm for computing the ground state of an Ising spin glass.}, journal = {Physical review. E}, volume = {105}, number = {3-2}, pages = {035305}, doi = {10.1103/PhysRevE.105.035305}, pmid = {35428085}, issn = {2470-0053}, abstract = {Finding the ground state of an Ising spin glass on general graphs belongs to the class of NP-hard problems, widely believed to have no efficient polynomial-time algorithms to solve them. An approach developed in computer science for dealing with such problems is to devise approximation algorithms; these are algorithms, whose run time scales polynomially with the input size, that provide solutions with provable guarantees on their quality in terms of the optimal unknown solution. Recently, several algorithms for the Ising spin-glass problem on a bounded degree graph that provide different approximation guarantees were introduced. D-Wave, a Canadian-based company, has constructed a physical realization of a quantum annealer and has enabled researchers and practitioners to access it via their cloud service. D-Wave is particularly suited for computing an approximation for the ground state of an Ising spin glass on its Chimera and Pegasus graphs-both with a bounded degree. To assess the quality of D-Wave's solution, it is natural to compare it to classical approximation algorithms specifically designed to solve the same problem. In this work, we compare the performance of a recently developed approximation algorithm to solve the Ising spin-glass problem on graphs of bounded degree against the performance of the D-Wave computer. We also compared the performance of D-Wave's computer in the Chimera architecture against the performance of a heuristic tailored specifically to handle the Chimera graph. We found that the D-Wave computer was able to find better approximations for all the random instances of the problem we studied-Gaussian weights, uniform weights, and discrete binary weights. Furthermore, the convergence times of D-Wave's computer were also significantly better. These results indicate the merit of D-Wave's computer under certain specific instances. More broadly, our method is relevant to a wider class of performance comparison studies, and we suggest that it is important to compare the performance of quantum computers not only against exact classical algorithms with exponential run-time scaling, but also against approximation algorithms with polynomial run-time scaling and a provable guarantee of performance.}, } @article {pmid35421313, year = {2022}, author = {Zhang, S and Thompson, JP and Xia, J and Bogetti, AT and York, F and Skillman, AG and Chong, LT and LeBard, DN}, title = {Mechanistic Insights into Passive Membrane Permeability of Drug-like Molecules from a Weighted Ensemble of Trajectories.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {8}, pages = {1891-1904}, pmid = {35421313}, issn = {1549-960X}, support = {R01 GM115805/GM/NIGMS NIH HHS/United States ; }, mesh = {Cell Membrane Permeability ; Diffusion ; *Lipid Bilayers ; Molecular Dynamics Simulation ; Permeability ; *Phosphatidylcholines ; }, abstract = {Passive permeability of a drug-like molecule is a critical property assayed early in a drug discovery campaign that informs a medicinal chemist how well a compound can traverse biological membranes, such as gastrointestinal epithelial or restrictive organ barriers, so it can perform a specific therapeutic function. However, the challenge that remains is the development of a method, experimental or computational, which can both determine the permeation rate and provide mechanistic insights into the transport process to help with the rational design of any given molecule. Typically, one of the following three methods are used to measure the membrane permeability: (1) experimental permeation assays acting on either artificial or natural membranes; (2) quantitative structure-permeability relationship models that rely on experimental values of permeability or related pharmacokinetic properties of a range of molecules to infer those for new molecules; and (3) estimation of permeability from the Smoluchowski equation, where free energy and diffusion profiles along the membrane normal are taken as input from large-scale molecular dynamics simulations. While all these methods provide estimates of permeation coefficients, they provide very little information for guiding rational drug design. In this study, we employ a highly parallelizable weighted ensemble (WE) path sampling strategy, empowered by cloud computing techniques, to generate unbiased permeation pathways and permeability coefficients for a set of drug-like molecules across a neat 1-palmitoyl-2-oleoyl-sn-glycero-3-phosphatidylcholine membrane bilayer. Our WE method predicts permeability coefficients that compare well to experimental values from an MDCK-LE cell line and PAMPA assays for a set of drug-like amines of varying size, shape, and flexibility. Our method also yields a series of continuous permeation pathways weighted and ranked by their associated probabilities. Taken together, the ensemble of reactive permeation pathways, along with the estimate of the permeability coefficient, provides a clearer picture of the microscopic underpinnings of small-molecule membrane permeation.}, } @article {pmid35417349, year = {2023}, author = {Liu, Q and Su, H and Duanmu, Z and Liu, W and Wang, Z}, title = {Perceptual Quality Assessment of Colored 3D Point Clouds.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {29}, number = {8}, pages = {3642-3655}, doi = {10.1109/TVCG.2022.3167151}, pmid = {35417349}, issn = {1941-0506}, mesh = {*Computer Graphics ; Databases, Factual ; *Multimedia ; Research Design ; }, abstract = {3D point clouds have found a wide variety of applications in multimedia processing, remote sensing, and scientific computing. Although most point cloud processing systems are developed to improve viewer experiences, little work has been dedicated to perceptual quality assessment of 3D point clouds. In this work, we build a new 3D point cloud database, namely the Waterloo Point Cloud (WPC) database. In contrast to existing datasets consisting of small-scale and low-quality source content of constrained viewing angles, the WPC database contains 20 high quality, realistic, and omni-directional source point clouds and 740 diversely distorted point clouds. We carry out a subjective quality assessment experiment over the database in a controlled lab environment. Our statistical analysis suggests that existing objective point cloud quality assessment (PCQA) models only achieve limited success in predicting subjective quality ratings. We propose a novel objective PCQA model based on an attention mechanism and a variant of information content-weighted structural similarity, which significantly outperforms existing PCQA models. The database has been made publicly available at https://github.com/qdushl/Waterloo-Point-Cloud-Database.}, } @article {pmid35411128, year = {2022}, author = {Wang, X and Carey, MJ and Tsotras, VJ}, title = {Subscribing to big data at scale.}, journal = {Distributed and parallel databases}, volume = {40}, number = {2-3}, pages = {475-520}, pmid = {35411128}, issn = {1573-7578}, abstract = {Today, data is being actively generated by a variety of devices, services, and applications. Such data is important not only for the information that it contains, but also for its relationships to other data and to interested users. Most existing Big Data systems focus on passively answering queries from users, rather than actively collecting data, processing it, and serving it to users. To satisfy both passive and active requests at scale, application developers need either to heavily customize an existing passive Big Data system or to glue one together with systems like Streaming Engines and Pub-sub services. Either choice requires significant effort and incurs additional overhead. In this paper, we present the BAD (Big Active Data) system as an end-to-end, out-of-the-box solution for this challenge. It is designed to preserve the merits of passive Big Data systems and introduces new features for actively serving Big Data to users at scale. We show the design and implementation of the BAD system, demonstrate how BAD facilitates providing both passive and active data services, investigate the BAD system's performance at scale, and illustrate the complexities that would result from instead providing BAD-like services with a "glued" system.}, } @article {pmid35408281, year = {2022}, author = {Filho, CP and Marques, E and Chang, V and Dos Santos, L and Bernardini, F and Pires, PF and Ochi, L and Delicato, FC}, title = {A Systematic Literature Review on Distributed Machine Learning in Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408281}, issn = {1424-8220}, support = {2015/24144-7//São Paulo Research Foundation/ ; E-26/200.938/2021//Fundação Carlos Chagas Filho de Amparo à Pesquisa do Estado do Rio de Janeiro/ ; }, mesh = {*Algorithms ; Intelligence ; *Machine Learning ; Publications ; }, abstract = {Distributed edge intelligence is a disruptive research area that enables the execution of machine learning and deep learning (ML/DL) algorithms close to where data are generated. Since edge devices are more limited and heterogeneous than typical cloud devices, many hindrances have to be overcome to fully extract the potential benefits of such an approach (such as data-in-motion analytics). In this paper, we investigate the challenges of running ML/DL on edge devices in a distributed way, paying special attention to how techniques are adapted or designed to execute on these restricted devices. The techniques under discussion pervade the processes of caching, training, inference, and offloading on edge devices. We also explore the benefits and drawbacks of these strategies.}, } @article {pmid35408246, year = {2022}, author = {Rakrouki, MA and Alharbe, N}, title = {QoS-Aware Algorithm Based on Task Flow Scheduling in Cloud Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408246}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Gravitation ; }, abstract = {This paper deals with the challenging problem of scheduling users' tasks, while taking into consideration users' quality of service (QoS) requirements, with the objective of reducing the energy consumption of physical machines. This paper presents a model to analyze the current state of the running tasks according to the results of the QoS prediction assigned by an ARIMA prediction model optimized with Kalman filter. Then, we calculate a scheduling policy with a combined particle swarm optimization (PSO) and gravitational search algorithm (GSA) algorithms according to the QoS status analysis. Experimental results show that the proposed HPSO algorithm reduces resources consumption 16.51% more than the original hybrid algorithm, and the violation of service-level agreement (SLA) is 0.053% less when the optimized prediction model is used.}, } @article {pmid35408212, year = {2022}, author = {Ji, X and Wei, H and Chen, Y and Ji, XF and Wu, G}, title = {A Three-Stage Dynamic Assessment Framework for Industrial Control System Security Based on a Method of W-HMM.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408212}, issn = {1424-8220}, support = {2019YFB1312202//the National Key R&D Program of China/ ; }, mesh = {Algorithms ; *Artificial Intelligence ; Big Data ; *Cloud Computing ; Machine Learning ; }, abstract = {Industrial control systems (ICS) are applied in many fields. Due to the development of cloud computing, artificial intelligence, and big data analysis inducing more cyberattacks, ICS always suffers from the risks. If the risks occur during system operations, corporate capital is endangered. It is crucial to assess the security of ICS dynamically. This paper proposes a dynamic assessment framework for industrial control system security (DAF-ICSS) based on machine learning and takes an industrial robot system as an example. The framework conducts security assessment from qualitative and quantitative perspectives, combining three assessment phases: static identification, dynamic monitoring, and security assessment. During the evaluation, we propose a weighted Hidden Markov Model (W-HMM) to dynamically establish the system's security model with the algorithm of Baum-Welch. To verify the effectiveness of DAF-ICSS, we have compared it with two assessment methods to assess industrial robot security. The comparison result shows that the proposed DAF-ICSS can provide a more accurate assessment. The assessment reflects the system's security state in a timely and intuitive manner. In addition, it can be used to analyze the security impact caused by the unknown types of ICS attacks since it infers the security state based on the explicit state of the system.}, } @article {pmid35408111, year = {2022}, author = {Laiton-Bonadiez, C and Branch-Bedoya, JW and Zapata-Cortes, J and Paipa-Sanabria, E and Arango-Serna, M}, title = {Industry 4.0 Technologies Applied to the Rail Transportation Industry: A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408111}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Big Data ; Cloud Computing ; *Internet of Things ; Technology ; }, abstract = {BACKGROUND: Industry 4.0 technologies have been widely used in the railway industry, focusing mainly on maintenance and control tasks necessary in the railway infrastructure. Given the great potential that these technologies offer, the scientific community has come to use them in varied ways to solve a wide range of problems such as train failures, train station security, rail system control and communication in hard-to-reach areas, among others. For this reason, this paper aims to answer the following research questions: what are the main issues in the railway transport industry, what are the technologic strategies that are currently being used to solve these issues and what are the technologies from industry 4.0 that are used in the railway transport industry to solve the aforementioned issues?

METHODS: This study adopts a systematic literature review approach. We searched the Science Direct and Web of Science database inception from January 2017 to November 2021. Studies published in conferences or journals written in English or Spanish were included for initial process evaluation. The initial included papers were analyzed by authors and selected based on whether they helped answer the proposed research questions or not.

RESULTS: Of the recovered 515 articles, 109 were eligible, from which we could identify three main application domains in the railway industry: monitoring, decision and planification techniques, and communication and security. Regarding industry 4.0 technologies, we identified 9 different technologies applied in reviewed studies: Artificial Intelligence (AI), Internet of Things (IoT), Cloud Computing, Big Data, Cybersecurity, Modelling and Simulation, Smart Decision Support Systems (SDSS), Computer Vision and Virtual Reality (VR). This study is, to our knowledge, one of the first to show how industry 4.0 technologies are currently being used to tackle railway industry problems and current application trends in the scientific community, which is highly useful for the development of future studies and more advanced solutions.

FUNDING: Colombian national organizations Minciencias and the Mining-Energy Planning Unit.}, } @article {pmid35408047, year = {2022}, author = {Wang, Z and Wang, W and Zhang, Z and Hu, F and Xia, X and Chen, L}, title = {DeepEdge: A Novel Appliance Identification Edge Platform for Data Gathering, Capturing and Labeling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {7}, pages = {}, pmid = {35408047}, issn = {1424-8220}, support = {62072319//National Natural Science Foundation of China/ ; 2019JDTD0001//Sichuan Science and Technology Program/ ; 2018YFB1601200//National Key Research and Development Program of China/ ; 2018YFB1601201//National Key Research and Development Program of China/ ; 2021CDLZ-11//Luzhou Science and Technology Innovation R&D Program/ ; 2021-YF05-02000-SN//Chengdu Technology Innovation R&D Program/ ; }, mesh = {*Algorithms ; }, abstract = {With the development of the Internet of Things for smart grid, the requirement for appliance monitoring has become an important topic. The first and most important step in appliance monitoring is to identify the type of appliance. Most of the existing appliance identification platforms are cloud based, thus they consume large computing resources and memory. Therefore, it is necessary to explore an edge identification platform with a low cost. In this work, a novel appliance identification edge platform for data gathering, capturing and labeling is proposed. Experiments show that this platform can achieve an average appliance identification accuracy of 98.5% and improve the accuracy of non-intrusive load disaggregation algorithms.}, } @article {pmid35405588, year = {2022}, author = {Feng, X and Jin, X and Zhou, R and Jiang, Q and Wang, Y and Zhang, X and Shang, K and Zhang, J and Yu, C and Shou, J}, title = {Deep learning approach identified a gene signature predictive of the severity of renal damage caused by chronic cadmium accumulation.}, journal = {Journal of hazardous materials}, volume = {433}, number = {}, pages = {128795}, doi = {10.1016/j.jhazmat.2022.128795}, pmid = {35405588}, issn = {1873-3336}, mesh = {Animals ; *Cadmium/metabolism ; Cadmium Chloride/toxicity ; *Deep Learning ; Kidney/metabolism ; Mice ; Mice, Inbred C57BL ; Oxidative Stress ; }, abstract = {Epidemiology studies have indicated that environmental cadmium exposure, even at low levels, will result in chronic cadmium accumulation in the kidney with profound adverse consequences and that the diabetic population is more susceptible. However, the underlying mechanisms are yet not fully understood. In the present study, we applied an animal model to study chronic cadmium exposure-induced renal injury and performed whole transcriptome profiling studies. Repetitive CdCl2 exposure resulted in cadmium accumulation and remarkable renal injuries in the animals. The diabetic ob/ob mice manifested increased severity of renal injury compared with the wild type C57BL/6 J littermate controls. RNA-Seq data showed that cadmium treatment induced dramatic gene expression changes in a dose-dependent manner. Among the differentially expressed genes include the apoptosis hallmark genes which significantly demarcated the treatment effects. Pathway enrichment and network analyses revealed biological oxidation (mainly glucuronidation) as one of the major stress responses induced by cadmium treatment. We next implemented a deep learning algorithm in conjunction with cloud computing and discovered a gene signature that can predict the degree of renal injury induced by cadmium treatment. The present study provided, for the first time, a comprehensive mechanistic understanding of chronic cadmium-induced nephrotoxicity in normal and diabetic populations at the whole genome level.}, } @article {pmid35401026, year = {2022}, author = {Ullah, A and Chakir, A}, title = {Improvement for tasks allocation system in VM for cloud datacenter using modified bat algorithm.}, journal = {Multimedia tools and applications}, volume = {81}, number = {20}, pages = {29443-29457}, pmid = {35401026}, issn = {1380-7501}, abstract = {Since its inception, cloud computing has greatly transformed our lives by connecting the entire world through shared computational resources over the internet. The COVID-19 pandemic has also disrupted the traditional learning and businesses and led us towards an era of cloud-based activities. Virtual machine is one of the main elements of virtualization in cloud computing that represents physical server into the virtual machine. The utilizations of these VM's are important to achieved effective task scheduling mechanism in cloud environment. This paper focuses on improvment of the task distribution system in VM for cloud computing using load balancing technique. For that reason modification took place at Bat algorithm fitness function value this section used in load balancer section. When algorithm iteration are complete then time to distribute the task among different VM therefore in this section of algorithm was modified. The second modification took place at the search process of Bat at dimension section. The proposed algorithm is known as modified Bat algorithm. Four parameter are used to check the performance of the system which are throughput, makespan, degree of imbalance and processing time. The proposed algorithm provides efficient result as compaire to other standard technique. Hence the proposed algorithm improved cloud data center accuracy and efficiency.}, } @article {pmid35396141, year = {2022}, author = {Agrawal, N and Kumar, R}, title = {Security Perspective Analysis of Industrial Cyber Physical Systems (I-CPS): A Decade-wide Survey.}, journal = {ISA transactions}, volume = {130}, number = {}, pages = {10-24}, doi = {10.1016/j.isatra.2022.03.018}, pmid = {35396141}, issn = {1879-2022}, abstract = {Considering the exceptional growth of Cyber Physical Systems (CPSs), multiple and potentially grave security challenges have emerged in this field. Different vulnerabilities and attacks are present in front of new generation CPSs, such as Industrial CPS (I-CPS). The underlying non-uniform standards, device heterogeneity, network complexity, etc., make it difficult to offer a systematized coverage on CPS security in an industrial environment. This work considers the security perspective of I-CPSs, and offers a decade-wide survey including different vulnerabilities, attacks, CPS components, and various other aspects. The comparative year-wise analysis of the existing works w.r.t objective, approach referred, testbed used and derived inference, is also presented over a decade. Additionally, the work details different security issues and research challenges present in I-CPS. This work attempts to offer a concise and precise literature study focused on the state-of-the-art I-CPS security. This work also encourages the young researchers to explore the wide possibilities present in this emerging field.}, } @article {pmid35395654, year = {2022}, author = {Banerjee, AN}, title = {Green syntheses of graphene and its applications in internet of things (IoT)-a status review.}, journal = {Nanotechnology}, volume = {33}, number = {32}, pages = {}, doi = {10.1088/1361-6528/ac6599}, pmid = {35395654}, issn = {1361-6528}, abstract = {Internet of Things (IoT) is a trending technological field that converts any physical object into a communicable smarter one by converging the physical world with the digital world. This innovative technology connects the device to the internet and provides a platform to collect real-time data, cloud storage, and analyze the collected data to trigger smart actions from a remote location via remote notifications, etc. Because of its wide-ranging applications, this technology can be integrated into almost all the industries. Another trending field with tremendous opportunities is Nanotechnology, which provides many benefits in several areas of life, and helps to improve many technological and industrial sectors. So, integration of IoT and Nanotechnology can bring about the very important field of Internet of Nanothings (IoNT), which can re-shape the communication industry. For that, data (collected from trillions of nanosensors, connected to billions of devices) would be the 'ultimate truth', which could be generated from highly efficient nanosensors, fabricated from various novel nanomaterials, one of which is graphene, the so-called 'wonder material' of the 21st century. Therefore, graphene-assisted IoT/IoNT platforms may revolutionize the communication technologies around the globe. In this article, a status review of the smart applications of graphene in the IoT sector is presented. Firstly, various green synthesis of graphene for sustainable development is elucidated, followed by its applications in various nanosensors, detectors, actuators, memory, and nano-communication devices. Also, the future market prospects are discussed to converge various emerging concepts like machine learning, fog/edge computing, artificial intelligence, big data, and blockchain, with the graphene-assisted IoT field to bring about the concept of 'all-round connectivity in every sphere possible'.}, } @article {pmid35394342, year = {2022}, author = {Dall'Alba, G and Casa, PL and Abreu, FP and Notari, DL and de Avila E Silva, S}, title = {A Survey of Biological Data in a Big Data Perspective.}, journal = {Big data}, volume = {10}, number = {4}, pages = {279-297}, doi = {10.1089/big.2020.0383}, pmid = {35394342}, issn = {2167-647X}, mesh = {*Big Data ; Cloud Computing ; *Data Mining/methods ; Machine Learning ; Neural Networks, Computer ; }, abstract = {The amount of available data is continuously growing. This phenomenon promotes a new concept, named big data. The highlight technologies related to big data are cloud computing (infrastructure) and Not Only SQL (NoSQL; data storage). In addition, for data analysis, machine learning algorithms such as decision trees, support vector machines, artificial neural networks, and clustering techniques present promising results. In a biological context, big data has many applications due to the large number of biological databases available. Some limitations of biological big data are related to the inherent features of these data, such as high degrees of complexity and heterogeneity, since biological systems provide information from an atomic level to interactions between organisms or their environment. Such characteristics make most bioinformatic-based applications difficult to build, configure, and maintain. Although the rise of big data is relatively recent, it has contributed to a better understanding of the underlying mechanisms of life. The main goal of this article is to provide a concise and reliable survey of the application of big data-related technologies in biology. As such, some fundamental concepts of information technology, including storage resources, analysis, and data sharing, are described along with their relation to biological data.}, } @article {pmid35392801, year = {2022}, author = {Pallotta, S and Cascianelli, S and Masseroli, M}, title = {RGMQL: scalable and interoperable computing of heterogeneous omics big data and metadata in R/Bioconductor.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {123}, pmid = {35392801}, issn = {1471-2105}, support = {693174//h2020 european research council/ ; }, mesh = {Big Data ; Cloud Computing ; Genomics ; *Metadata ; *Software ; }, abstract = {BACKGROUND: Heterogeneous omics data, increasingly collected through high-throughput technologies, can contain hidden answers to very important and still unsolved biomedical questions. Their integration and processing are crucial mostly for tertiary analysis of Next Generation Sequencing data, although suitable big data strategies still address mainly primary and secondary analysis. Hence, there is a pressing need for algorithms specifically designed to explore big omics datasets, capable of ensuring scalability and interoperability, possibly relying on high-performance computing infrastructures.

RESULTS: We propose RGMQL, a R/Bioconductor package conceived to provide a set of specialized functions to extract, combine, process and compare omics datasets and their metadata from different and differently localized sources. RGMQL is built over the GenoMetric Query Language (GMQL) data management and computational engine, and can leverage its open curated repository as well as its cloud-based resources, with the possibility of outsourcing computational tasks to GMQL remote services. Furthermore, it overcomes the limits of the GMQL declarative syntax, by guaranteeing a procedural approach in dealing with omics data within the R/Bioconductor environment. But mostly, it provides full interoperability with other packages of the R/Bioconductor framework and extensibility over the most used genomic data structures and processing functions.

CONCLUSIONS: RGMQL is able to combine the query expressiveness and computational efficiency of GMQL with a complete processing flow in the R environment, being a fully integrated extension of the R/Bioconductor framework. Here we provide three fully reproducible example use cases of biological relevance that are particularly explanatory of its flexibility of use and interoperability with other R/Bioconductor packages. They show how RGMQL can easily scale up from local to parallel and cloud computing while it combines and analyzes heterogeneous omics data from local or remote datasets, both public and private, in a completely transparent way to the user.}, } @article {pmid35387274, year = {2021}, author = {Reza, MNH and Jayashree, S and Malarvizhi, CAN and Rauf, MA and Jayaraman, K and Shareef, SH}, title = {The implications of Industry 4.0 on supply chains amid the COVID-19 pandemic: a systematic review.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {1008}, pmid = {35387274}, issn = {2046-1402}, mesh = {*COVID-19/epidemiology ; Disease Outbreaks ; Humans ; Pandemics ; Technology ; }, abstract = {Background: COVID-19 has caused significant disruptions in supply chains. It has increased the demand for products and decreased the supply of raw materials. This has interrupted many production processes. The emerging technologies of Industry 4.0 have the potential to streamline supply chains by improving time-sensitive customized solutions during this emergency. Purpose: The study identifies the core technologies of Industry 4.0 and the role and impact of these technologies in managing the disruption caused by the COVID-19 outbreak in strengthening the supply chain resilience. Design/methodology/approach: An extensive literature review using the "Preferred Reporting Items for Systematic Review and Meta-Analysis" method was carried out on the impact of the COVID-19 pandemic on supply chains and Industry 4.0 technologies. The study was undertaken by selecting keywords validated by experts, and a search was conducted in the Scopus, ProQuest, and Google Scholar databases. Publications from the leading journals on these topics were selected. The bibliographical search resulted in 1484 articles, followed by multiple layers of filtering. Finally, the most pertinent articles were selected for review, and a total of 42 articles were analyzed. Findings: The findings of the study showed that the majority of the articles emphasized the digitalization of supply chain management, acknowledging the fundamentals, applications, and prospects, revealing the drivers and challenges of Industry 4.0 technologies to manage disruptions. Most of the authors identified IoT, big data, cloud computing, additive manufacturing, and blockchain to maintain the supply chain resilience. Originality/value: Existing literature on epidemics lacks the basics and practices of utilizing Industry 4.0 technologies in the supply chain recovery process. To fill this research gap, the study summarizes the potential of Industry 4.0 technologies to lessen supply chain disruptions caused by COVID-19. The study findings are valuable for policymakers and practitioners and contribute to supply chain management studies.}, } @article {pmid35387251, year = {2022}, author = {Jain, A and Nadeem, A and Majdi Altoukhi, H and Jamal, SS and Atiglah, HK and Elwahsh, H}, title = {Personalized Liver Cancer Risk Prediction Using Big Data Analytics Techniques with Image Processing Segmentation.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8154523}, pmid = {35387251}, issn = {1687-5273}, mesh = {Algorithms ; Cloud Computing ; *Data Science ; Humans ; Image Processing, Computer-Assisted ; *Liver Neoplasms/diagnostic imaging ; }, abstract = {A technology known as data analytics is a massively parallel processing approach that may be used to forecast a wide range of illnesses. Many scientific research methodologies have the problem of requiring a significant amount of time and processing effort, which has a negative impact on the overall performance of the system. Virtual screening (VS) is a drug discovery approach that makes use of big data techniques and is based on the concept of virtual screening. This approach is utilised for the development of novel drugs, and it is a time-consuming procedure that includes the docking of ligands in several databases in order to build the protein receptor. The proposed work is divided into two modules: image processing-based cancer segmentation and analysis using extracted features using big data analytics, and cancer segmentation and analysis using extracted features using image processing. This statistical approach is critical in the development of new drugs for the treatment of liver cancer. Machine learning methods were utilised in the prediction of liver cancer, including the MapReduce and Mahout algorithms, which were used to prefilter the set of ligand filaments before they were used in the prediction of liver cancer. This work proposes the SMRF algorithm, an improved scalable random forest algorithm built on the MapReduce foundation. Using a computer cluster or cloud computing environment, this new method categorises massive datasets. With SMRF, small amounts of data are processed and optimised over a large number of computers, allowing for the highest possible throughput. When compared to the standard random forest method, the testing findings reveal that the SMRF algorithm exhibits the same level of accuracy deterioration but exhibits superior overall performance. The accuracy range of 80 percent using the performance metrics analysis is included in the actual formulation of the medicine that is utilised for liver cancer prediction in this study.}, } @article {pmid35378813, year = {2022}, author = {Jiang, M and Sun, Y}, title = {An Optimized Decision Method for Smart Teaching Effect Based on Cloud Computing and Deep Learning.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6907172}, pmid = {35378813}, issn = {1687-5273}, mesh = {*Cloud Computing ; *Deep Learning ; Humans ; Students ; Universities ; }, abstract = {In order to improve the effect of intelligent teaching and give full play to the role of intelligent technology in modern physical education, in this paper, cloud computing and deep learning methods are used to comprehensively evaluate the teaching effect of colleges and universities, and calculate the evaluation effect and accuracy. Cloud computing and deep learning algorithm combine the teaching evaluation scale, teaching content, and characteristics to formulate teaching plans for different students and realize targeted teaching evaluation. The results show that the teaching evaluation method proposed in this paper can improve students' learning interest by about 30%, enhance learning initiative by about 20%, and the matching rate between the actual teaching effect and the expected requirements is 98%. Therefore, cloud computing and deep learning model can improve the accuracy of teaching effect evaluation in colleges and universities, provide support for the formulation of teaching evaluation schemes, and promote the development of intelligent teaching in colleges and universities.}, } @article {pmid35371335, year = {2022}, author = {Almurisi, N and Tadisetty, S}, title = {Cloud-based virtualization environment for IoT-based WSN: solutions, approaches and challenges.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {13}, number = {10}, pages = {4681-4703}, pmid = {35371335}, issn = {1868-5137}, abstract = {Internet of Things (IoT) is an ever-growing technology that enables advanced communication among millions of various devices to provide ubiquitous services without human intervention. The potential growth of electronic devices in sensing systems has led to the realization of IoT paradigm where applications depend on sensors to interact with the environment and collect data in a real-time scenario. Nowadays, smart applications require fast data acquisition, parallel processing, and dynamic resource sharing. Unfortunately, these requirements can not be supported efficiently with traditional Wireless Sensor Networks (WSN) due to the deficiency of computing resources and the lack of resource-sharing. Therefore, it is not recommended to develop innovative applications based on these constrained devices without further enhancement and improvement. Hence, this article explores a coeffective solution based on Cloud Computing and Virtualization Techniques to address these challenges. Cloud computing provides efficient computing resources and huge storage space, while the virtualization technique allows resources to be virtualized and shared between various applications. Integrating IoT-WSN with the Cloud-based Virtualization Environment will eliminate the drawbacks and limitations of conventional networks and facilitate the development of novel applications in a more flexible way. Furthermore, this article reviews the recent trends in IoT-WSN, virtualization techniques, and cloud computing. Also, we present the integration process of sensor networks with Cloud-based Virtualization and propose a new general architecture view for the Sensor-Cloud paradigm, and discuss its key elements, basic principles, lifecycle operation, and outline its advantages and disadvantages. Finally, we review the state-of-the-art, present the major challenges, and suggest future work directions.}, } @article {pmid35371196, year = {2022}, author = {Shang, R and Qin, Y}, title = {Research on Humanistic Quality Higher Medical Education Based on Internet of Things and Intelligent Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8633190}, pmid = {35371196}, issn = {1687-5273}, mesh = {*Education, Medical ; Humans ; *Internet of Things ; }, abstract = {The importance of the humanities in promoting economic and social development is becoming increasingly clear. Combining humanities with higher medical education in order to meet the needs of medical talent training in the new situation has become a key component of higher medical education reform and development. Adult higher medical education is an integral part of higher medical education, but it has different training objectives and training objects than regular higher medical education. These technological advancements are certain to hasten the continued emergence of education cloud or industry cloud, create a good information-based environment for education informatization improvement, and pose technical challenges to resource allocation in intelligent computing environments. Humanistic quality higher medical education based on the Internet of Things and intelligent computing makes the efficient intelligent information system more open, interactive, and coordinated, allowing students and teachers to perceive a variety of teaching resources more comprehensively.}, } @article {pmid35371194, year = {2022}, author = {Ma, S and Hao, F and Lin, Y and Liang, Y}, title = {The Construction of Big Data Computational Intelligence System for E-Government in Cloud Computing Environment and Its Development Impact.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7295060}, pmid = {35371194}, issn = {1687-5273}, mesh = {Algorithms ; Artificial Intelligence ; *Big Data ; *Cloud Computing ; Government ; }, abstract = {The traditional E-government big data system fills and classifies algorithms with low accuracy and poor work efficiency. With the development and wide application of big data, the internet of things, and other technologies, the integration of information resources has become the key to information construction. In the process of information resource integration, there are still outstanding problems such as incomplete government information resource system, different standards of government information resource management system construction, and serious threats to network and information security. In order to solve this problem, a new E-government big data system filling and classification algorithm is studied in the cloud computing environment; E-government big data filling is carried out on the basis of complete compatibility theory; and the E-government big data computing intelligence system in the cloud computing environment is constructed and its development impact, so as to parallelize the data, classify the data through decision trees, and realize incremental update decision forest parallelization processing. To verify the effectiveness of the method, comparative experiments are set, and the results demonstrate that experiment one is randomly built into the classification model, and according to the decision forest algorithm, the optimal number of decision trees is 24.}, } @article {pmid35370340, year = {2022}, author = {Kombaya Touckia, J and Hamani, N and Kermad, L}, title = {Digital twin framework for reconfigurable manufacturing systems (RMSs): design and simulation.}, journal = {The International journal, advanced manufacturing technology}, volume = {120}, number = {7-8}, pages = {5431-5450}, pmid = {35370340}, issn = {0268-3768}, abstract = {Faced with the global crisis of COVID-19 and the strong increase in customer demands, competition is becoming more intense between companies, on the one hand, and supply chains on the other. This competition has led to the development of new strategies to manage demand and increase market share. Among these strategies are the growing interest in sustainable manufacturing and the need for customizable products that create an increasingly complex manufacturing environment. Sustainable manufacturing and the need for customizable products create an environment of increased competition and constant change. Indeed, companies are trying to establish more flexible and agile manufacturing systems through several systems of reconfiguration. Reconfiguration contributes to an extension of the manufacturing system's life cycle by modifying its physical, organizational and IT characteristics according to the changing market conditions. Due to the rapid development of new information technology (such as IoT, Big Data analytics, cyber-physical systems, cloud computing and artificial intelligence), digital twins have become intensively used in smart manufacturing. This paper proposes a digital twin design and simulation model for reconfigurable manufacturing systems (RMSs).}, } @article {pmid35369530, year = {2022}, author = {Taniguchi, Y and Ikegami, Y and Fujikawa, H and Pathare, Y and Kutics, A and Massimo, B and Anisetti, M and Damiani, E and Sakurai, Y and Tsuruta, S}, title = {Counseling (ro)bot as a use case for 5G/6G.}, journal = {Complex & intelligent systems}, volume = {8}, number = {5}, pages = {3899-3917}, pmid = {35369530}, issn = {2198-6053}, abstract = {This paper presents a counseling (ro)bot called Visual Counseling Agent (VICA) which focuses on remote mental healthcare. It is an agent system leveraging artificial intelligence (AI) to aid mentally distressed persons through speech conversation. The system terminals are connected to servers by the Internet exploiting Cloud-nativeness, so that anyone who has any type of terminal can use it from anywhere. Despite a promising voice communication interface, VICA shows limitations in conversation continuity on conventional 4G networks. Concretely, the use of the current 4G networks produces word dropping, delayed response, and the occasional connection failure. The objective of this paper is to mitigate these issues by leveraging a 5G/6G slice inclusive of mobile/multiple edge computing (MEC). First, we propose and partly implement the enhanced and advanced version of VICA. Servers of enhanced versions collaborate to increase speech recognition reliability. Although it significantly increases generated data volume, the advanced version enables a recognition of the facial expressions to greatly enhance counseling quality. Then, we propose a quality assurance mechanism using multiple levels of catalog, as well as 5G/6G slice inclusive of MEC, and conduct experiments to uncover issues related to the 4G. Results indicate that the number of speech recognition errors in Internet Cloud is more than twofold compared to edge computing, implying that quality assurance using 5G/6G in conjunction with VICA Counseling (ro)bot has higher efficiency.}, } @article {pmid35368952, year = {2022}, author = {Chen, Y and Wang, J and Gao, W and Yu, D and Shou, X}, title = {Construction and Clinical Application Effect of General Surgery Patient-Oriented Nursing Information Platform Using Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {8273701}, pmid = {35368952}, issn = {2040-2309}, mesh = {*Cloud Computing ; Humans ; *Software ; Technology ; }, abstract = {The paper aims to build a nursing information platform (NIP) for general surgery (GS) patients and explore its clinical application effect based on cloud computing (CC) technology. Specifically, the present work first analyzes and expounds on the characteristics of GS patients, the CC concept, the three-tier service mode of CC, and the cloud data center (CDC). Secondly, based on the principle of the overall system design, the evaluation indexes of medical care end, patient end, family end, and management end are constructed using Visual Studio 2010. Thirdly, the expert evaluation and user evaluation methods are selected to analyze the clinical application effect of the proposed system. Finally, SPSS is used to analyze the effect of the proposed system. The results of the first and second rounds of the expert evaluation show that the authority coefficient of experts is greater than 0.7, which indicates that the degree of expert authority is good. The proposed CC-based GS patient-oriented NIP system is universal. The evaluation results of 20 users have shown 15 doctors and nurses, 14 patients, and 18 family members, who mostly still support applying the proposed CC-based GS patient-oriented NIP system and believe that the system brings convenience and improves work efficiency. In short, more incentives should be taken to build a NIP for GS patients.}, } @article {pmid35368911, year = {2022}, author = {Hayyolalam, V and Otoum, S and Özkasap, Ö}, title = {Dynamic QoS/QoE-aware reliable service composition framework for edge intelligence.}, journal = {Cluster computing}, volume = {25}, number = {3}, pages = {1695-1713}, pmid = {35368911}, issn = {1386-7857}, abstract = {Edge intelligence has become popular recently since it brings smartness and copes with some shortcomings of conventional technologies such as cloud computing, Internet of Things (IoT), and centralized AI adoptions. However, although utilizing edge intelligence contributes to providing smart systems such as automated driving systems, smart cities, and connected healthcare systems, it is not free from limitations. There exist various challenges in integrating AI and edge computing, one of which is addressed in this paper. Our main focus is to handle the adoption of AI methods on resource-constrained edge devices. In this regard, we introduce the concept of Edge devices as a Service (EdaaS) and propose a quality of service (QoS) and quality of experience (QoE)-aware dynamic and reliable framework for AI subtasks composition. The proposed framework is evaluated utilizing three well-known meta-heuristics in terms of various metrics for a connected healthcare application scenario. The experimental results confirm the applicability of the proposed framework. Moreover, the results reveal that black widow optimization (BWO) can handle the issue more efficiently compared to particle swarm optimization (PSO) and simulated annealing (SA). The overall efficiency of BWO over PSO is 95%, and BWO outperforms SA with 100% efficiency. It means that BWO prevails SA and PSO in all and 95% of the experiments, respectively.}, } @article {pmid35365721, year = {2022}, author = {Osipov, V and Zhukova, N and Subbotin, A and Glebovskiy, P and Evnevich, E}, title = {Intelligent escalator passenger safety management.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {5506}, pmid = {35365721}, issn = {2045-2322}, mesh = {Algorithms ; *Elevators and Escalators ; *Neural Networks, Computer ; Safety Management ; Software ; }, abstract = {This article addresses an approach to intelligent safety control of passengers on escalators. The aim is to improve the accuracy of detecting threatening situations on escalators in the subway to make decisions to prevent threats and eliminate the consequences. The novelty of the approach lies in the complex processing of information from three types of sources (video, audio, sensors) using machine learning methods and recurrent neural networks with controlled elements. The conditions and indicators of safety assurance efficiency are clarified. New methods and algorithms for managing the safety of passengers on escalators are proposed. The architecture of a promising safety software system is developed, and implementation of its components for cloud and fog computing environments is provided. Modeling results confirm the capabilities and advantages of the proposed technological solutions for enhancing the safety of escalator passengers, efficiency of control decision making, and system usability. Due to the proposed solutions, it has become possible to increase the speed of identifying situations 3.5 times and increase the accuracy of their determination by 26%. The efficiency of decision making has increased by almost 30%.}, } @article {pmid35360481, year = {2022}, author = {Yang, Y and Chang, Q and Chen, J and Zhou, X and Xue, Q and Song, A}, title = {Construction of a Health Management Model for Early Identification of Ischaemic Stroke in Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {1018056}, pmid = {35360481}, issn = {2040-2309}, mesh = {*Brain Ischemia/diagnosis ; Cloud Computing ; Humans ; *Ischemic Stroke ; *Stroke/diagnosis ; }, abstract = {Knowledge discovery and cloud computing can help early identification of ischaemic stroke and provide intelligent, humane, and preventive healthcare services for patients at high risk of stroke. This study proposes constructing a health management model for early identification and warning of ischaemic stroke based on IoT and cloud computing, and discusses its connotation, constructive ideas, and research content so as to provide reference for its health management in order to develop and implement countermeasures and to compare the awareness of early stroke symptoms and first aid knowledge among stroke patients and their families before and after the activity. The rate of awareness of early symptoms and first aid among stroke patients and their families increased from 36% to 78%, and the difference was statistically significant (P < 0.05) before and after the activity.}, } @article {pmid35360368, year = {2022}, author = {Brotherton, T and Brotherton, S and Ashworth, H and Kadambi, A and Ebrahim, H and Ebrahim, S}, title = {Development of an Offline, Open-Source, Electronic Health Record System for Refugee Care.}, journal = {Frontiers in digital health}, volume = {4}, number = {}, pages = {847002}, pmid = {35360368}, issn = {2673-253X}, abstract = {While electronic health records (EHRs) have been shown to be effective in improving patient care in low-resource settings, there are still barriers to implementing them, including adaptability, usability, and sustainability. Taking a user-centered design process we developed the Hikma Health EHR for low resourced clinics caring for displaced populations. This EHR was built using React Native and Typescript that sync to a Python backend repository which is deployed on Google Cloud SQL. To date the Hikma Health EHR has been deployed for 26,000 patients. The positive impacts of the system reported by clinician users are 3-fold: (1) improved continuity of care; (2) improved visualization of clinical data; and (3) improved efficiency, resulting in a higher volume of patients being treated. While further development is needed, our open-source model will allow any organization to modify this system to meet their clinical and administrative needs.}, } @article {pmid35353698, year = {2022}, author = {Gauthier, B and Painchaud-April, G and Le Duff, A and Belanger, P}, title = {Lightweight and Amplitude-Free Ultrasonic Imaging Using Single-Bit Digitization and Instantaneous Phase Coherence.}, journal = {IEEE transactions on ultrasonics, ferroelectrics, and frequency control}, volume = {69}, number = {5}, pages = {1763-1774}, doi = {10.1109/TUFFC.2022.3163621}, pmid = {35353698}, issn = {1525-8955}, mesh = {*Algorithms ; Signal-To-Noise Ratio ; *Ultrasonics ; Ultrasonography/methods ; }, abstract = {In the field of ultrasonic nondestructive testing (NDT), the total focusing method (TFM) and its derivatives are now commercially available on portable devices and are getting more popular within the NDT community. However, its implementation requires the collection of a very large amount of data with the full matrix capture (FMC) as the worst case scenario. Analyzing all the data also requires significant processing power, and consequently, there is an interest in: 1) reducing the required storage capacity used by imaging algorithms, such as delay-and-sum (DAS) imaging and 2) allowing the transmission and postprocessing of inspection data remotely. In this study, a different implementation of the TFM algorithm is used based on the vector coherence factor (VCF) that is used as an image itself. This method, also generally known as phase coherence imaging, presents certain advantages, such as a better sensitivity to diffracting geometries, consistency of defect restitution among different views, and an amplitude-free behavior as only the instantaneous phase of the signal is considered. Some drawbacks of this method must also be mentioned, including the fact that it poorly reproduces planar reflectors and presents a lower signal-to-noise ratio (SNR) than amplitude-based methods. However, previous studies showed that it can be used as a reliable tool for crack-like defect sizing. Thus, a lightweight acquisition process is proposed through single-bit digitization of the signal, followed by a phase retrieval method based on the rising and falling edge locations, allowing to feed the phase coherence imaging algorithm. Simulated and experimental tests were first performed in this study on several side-drilled holes (SDHs) in a stainless steel block and then extended to an experimental study on angled notches in a 19.05-mm (3/4")-thick steel sample plate through multiview imaging. Results obtained using the array performance indicator (API) and the contrast-to-noise ratio (CNR) as quantitative evaluation parameters showed that the proposed lightweight acquisition process, which relies on binary signals, allows a reduction of the data throughput of up to 47 times. This throughput reduction is achieved while still presenting very similar results to phase coherence imaging based on the instantaneous phase derived from the Hilbert transform of the full waveform. In an era of increasing wireless network speed and cloud computing, these results allow considering interesting perspectives for the reduction of inspection hardware costs and remote postprocessing.}, } @article {pmid35353508, year = {2022}, author = {Kutzner, C and Kniep, C and Cherian, A and Nordstrom, L and Grubmüller, H and de Groot, BL and Gapsys, V}, title = {GROMACS in the Cloud: A Global Supercomputer to Speed Up Alchemical Drug Design.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {7}, pages = {1691-1711}, pmid = {35353508}, issn = {1549-960X}, mesh = {Cloud Computing ; *Computers ; *Computing Methodologies ; Drug Design ; Ligands ; Molecular Dynamics Simulation ; }, abstract = {We assess costs and efficiency of state-of-the-art high-performance cloud computing and compare the results to traditional on-premises compute clusters. Our use case is atomistic simulations carried out with the GROMACS molecular dynamics (MD) toolkit with a particular focus on alchemical protein-ligand binding free energy calculations. We set up a compute cluster in the Amazon Web Services (AWS) cloud that incorporates various different instances with Intel, AMD, and ARM CPUs, some with GPU acceleration. Using representative biomolecular simulation systems, we benchmark how GROMACS performs on individual instances and across multiple instances. Thereby we assess which instances deliver the highest performance and which are the most cost-efficient ones for our use case. We find that, in terms of total costs, including hardware, personnel, room, energy, and cooling, producing MD trajectories in the cloud can be about as cost-efficient as an on-premises cluster given that optimal cloud instances are chosen. Further, we find that high-throughput ligand-screening can be accelerated dramatically by using global cloud resources. For a ligand screening study consisting of 19 872 independent simulations or ∼200 μs of combined simulation trajectory, we made use of diverse hardware available in the cloud at the time of the study. The computations scaled-up to reach peak performance using more than 4 000 instances, 140 000 cores, and 3 000 GPUs simultaneously. Our simulation ensemble finished in about 2 days in the cloud, while weeks would be required to complete the task on a typical on-premises cluster consisting of several hundred nodes.}, } @article {pmid35345804, year = {2022}, author = {He, J}, title = {Decision Scheduling for Cloud Computing Tasks Relying on Solving Large Linear Systems of Equations.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3411959}, pmid = {35345804}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Humans ; Reproducibility of Results ; }, abstract = {With the continuous reform and innovation of Internet technology and the continuous development and progress of social economy, Big Data cloud computing technology is more and more widely used in people's work and life. Many parallel algorithms play a very important role in solving large linear equations in various applications. To this end, this article aims to propose and summarize a cloud computing task scheduling model that relies on the solution of large linear equations. The method of this paper is to study the technology of solving large-scale linear equations and propose an M-QoS-OCCSM scheduling model. The function of the experimental method is to solve the problem of efficiently executing N mutually dependent parallel tasks within limited resources, while fully satisfying users' expectations of task completion time, bandwidth rate, reliability, and cost. In this paper, the application experiment of large-scale linear equations in task scheduling is used to study task scheduling algorithms. The results show that when the task load is 10 and 20, the convergence speed of the MPQGA algorithm is 32 seconds and 95 seconds faster than that of the BGA algorithm, respectively.}, } @article {pmid35345578, year = {2023}, author = {Swain, AK and Garza, VR}, title = {Key Factors in Achieving Service Level Agreements (SLA) for Information Technology (IT) Incident Resolution.}, journal = {Information systems frontiers : a journal of research and innovation}, volume = {25}, number = {2}, pages = {819-834}, pmid = {35345578}, issn = {1387-3326}, abstract = {In this paper, we analyze the impact of various factors on meeting service level agreements (SLAs) for information technology (IT) incident resolution. Using a large IT services incident dataset, we develop and compare multiple models to predict the value of a target Boolean variable indicating whether an incident met its SLA. Logistic regression and neural network models are found to have the best performance in terms of misclassification rates and average squared error. From the best-performing models, we identify a set of key variables that influence the achievement of SLAs. Based on model insights, we provide a thorough discussion of IT process management implications. We suggest several strategies that can be adopted by incident management teams to improve the quality and effectiveness of incident management processes, and recommend avenues for future research.}, } @article {pmid35341205, year = {2022}, author = {Gunjan, VK and Vijayalata, Y and Valli, S and Kumar, S and Mohamed, MO and Saravanan, V}, title = {Machine Learning and Cloud-Based Knowledge Graphs to Recognize Suicidal Mental Tendencies.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3604113}, pmid = {35341205}, issn = {1687-5273}, mesh = {*Cloud Computing ; Humans ; Machine Learning ; Pattern Recognition, Automated ; *Suicidal Ideation ; }, abstract = {To improve the quality of knowledge service selection in a cloud manufacturing environment, this paper proposes a cloud manufacturing knowledge service optimization decision method based on users' psychological behavior. Based on the characteristic analysis of cloud manufacturing knowledge service, establish the optimal evaluation index system of cloud manufacturing knowledge service, use the rough set theory to assign initial weights to each evaluation index, and adjust the initial weights according to the user's multiattribute preference to ensure that the consequences are allocated correctly. The system can help counselors acquire psychological knowledge in time and identify counselors with suicidal tendencies to prevent danger. This paper collected some psychological information data and built a knowledge graph by creating a dictionary and generating entities and relationships. The Han language processing word segmentation tool generates keywords, and CHI (Chi-square) feature selection is used to classify the problem. This feature selection is a statistical premise test that is acceptable when the chi-square test results are distributed with the null hypothesis. It includes the Pearson chi-square test and its variations. The Chi-square test has several benefits, including its distributed processing resilience, ease of computation, broad information gained from the test, usage in research when statistical assumptions are not satisfied, and adaptability in organizing information from multiple or many more group investigations. For improving question and answer efficiency, compared with other models, the BiLSTM (bidirectional long short-term memory) model is preferred to build suicidal tendencies. The Han language processing is a method that is used for word segmentation, and the advantage of this method is that it plays a key role in the word segmentation tool and generates keywords, and CHI (Chi-square) feature selection is used to classify the problem. Text classifier detects dangerous user utterances, question template matching, and answer generation by computing similarity scores. Finally, the system accuracy test is carried out, proving that the system can effectively answer the questions related to psychological counseling. The extensive experiments reveal that the method in this paper's accuracy rate, recall rate, and F1 value is much superior to other standard models in detecting psychological issues.}, } @article {pmid35341063, year = {2022}, author = {Grassi, L and Recchiuto, CT and Sgorbissa, A}, title = {Knowledge-Grounded Dialogue Flow Management for Social Robots and Conversational Agents.}, journal = {International journal of social robotics}, volume = {14}, number = {5}, pages = {1273-1293}, pmid = {35341063}, issn = {1875-4791}, abstract = {The article proposes a system for knowledge-based conversation designed for Social Robots and other conversational agents. The proposed system relies on an Ontology for the description of all concepts that may be relevant conversation topics, as well as their mutual relationships. The article focuses on the algorithm for Dialogue Management that selects the most appropriate conversation topic depending on the user input. Moreover, it discusses strategies to ensure a conversation flow that captures, as more coherently as possible, the user intention to drive the conversation in specific directions while avoiding purely reactive responses to what the user says. To measure the quality of the conversation, the article reports the tests performed with 100 recruited participants, comparing five conversational agents: (i) an agent addressing dialogue flow management based only on the detection of keywords in the speech, (ii) an agent based both on the detection of keywords and the Content Classification feature of Google Cloud Natural Language, (iii) an agent that picks conversation topics randomly, (iv) a human pretending to be a chatbot, and (v) one of the most famous chatbots worldwide: Replika. The subjective perception of the participants is measured both with the SASSI (Subjective Assessment of Speech System Interfaces) tool, as well as with a custom survey for measuring the subjective perception of coherence.}, } @article {pmid35340260, year = {2022}, author = {Elhadad, A and Alanazi, F and Taloba, AI and Abozeid, A}, title = {Fog Computing Service in the Healthcare Monitoring System for Managing the Real-Time Notification.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {5337733}, pmid = {35340260}, issn = {2040-2309}, mesh = {Cloud Computing ; Computers ; Delivery of Health Care ; Humans ; *Internet of Things ; Monitoring, Physiologic ; }, abstract = {A new computing paradigm that has been growing in computing systems is fog computing. In the healthcare industry, Internet of Things (IoT) driven fog computing is being developed to speed up the services for the general public and save billions of lives. This new computing platform, based on the fog computing paradigm, may reduce latency when transmitting and communicating signals with faraway servers, allowing medical services to be delivered more quickly in both spatial and temporal dimensions. One of the necessary qualities of computing systems that can enable the completion of healthcare operations is latency reduction. Fog computing can provide reduced latency when compared to cloud computing due to the use of only low-end computers, mobile phones, and personal devices in fog computing. In this paper, a new framework for healthcare monitoring for managing real-time notification based on fog computing has been proposed. The proposed system monitors the patient's body temperature, heart rate, and blood pressure values obtained from the sensors that are embedded into a wearable device and notifies the doctors or caregivers in real time if there occur any contradictions in the normal threshold value using the machine learning algorithms. The notification can also be set for the patients to alert them about the periodical medications or diet to be maintained by the patients. The cloud layer stores the big data into the cloud for future references for the hospitals and the researchers.}, } @article {pmid35340246, year = {2022}, author = {Mishra, AK and Govil, MC and Pilli, ES and Bijalwan, A}, title = {Digital Forensic Investigation of Healthcare Data in Cloud Computing Environment.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {9709101}, pmid = {35340246}, issn = {2040-2309}, mesh = {*Cloud Computing ; Delivery of Health Care ; Humans ; *Information Storage and Retrieval ; Software ; }, abstract = {Cloud computing is widely used in various sectors such as finance, health care, and education. Factors such as cost optimization, interoperability, data analysis, and data ownership functionalities are attracting healthcare industry to use cloud services. Security and forensic concerns are associated in cloud environments as sensitive healthcare data can attract the outside attacker and inside malicious events. Storage is the most used service in cloud computing environments. Data stored in iCloud (Apple Inc. Cloud Service Provider) is accessible via a Web browser, cloud client application, or mobile application. Apple Inc. provides iCloud service to synchronize data from MacBook, iPhone, iPad, etc. Core applications such as Mail, Contacts, Calendar, Photos, Notes, Reminders, and Keynote are synced with iCloud. Various operations can be performed on cloud data, including editing, deleting, uploading, and downloading data, as well as synchronizing data between devices. These operations generate log files and directories that are essential from an investigative perspective. This paper presents a taxonomy of iCloud forensic tools that provides a searchable catalog for forensic practitioners to identify the tools that meet their technical requirements. A case study involving healthcare data storage on iCloud service demonstrates that artifacts related to environmental information, browser activities (history, cookies, cache), synchronization activities, log files, directories, data content, and iCloud user activities are stored on a MacBook system. A GUI-based dashboard is developed to support iCloud forensics, specifically the collection of artifacts from a MacBook system.}, } @article {pmid35336536, year = {2022}, author = {He, X and Zhang, X and Wang, Y and Ji, H and Duan, X and Guo, F}, title = {Spatial Attention Frustum: A 3D Object Detection Method Focusing on Occluded Objects.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336536}, issn = {1424-8220}, abstract = {Achieving the accurate perception of occluded objects for autonomous vehicles is a challenging problem. Human vision can always quickly locate important object regions in complex external scenes, while other regions are only roughly analysed or ignored, defined as the visual attention mechanism. However, the perception system of autonomous vehicles cannot know which part of the point cloud is in the region of interest. Therefore, it is meaningful to explore how to use the visual attention mechanism in the perception system of autonomous driving. In this paper, we propose the model of the spatial attention frustum to solve object occlusion in 3D object detection. The spatial attention frustum can suppress unimportant features and allocate limited neural computing resources to critical parts of the scene, thereby providing greater relevance and easier processing for higher-level perceptual reasoning tasks. To ensure that our method maintains good reasoning ability when faced with occluded objects with only a partial structure, we propose a local feature aggregation module to capture more complex local features of the point cloud. Finally, we discuss the projection constraint relationship between the 3D bounding box and the 2D bounding box and propose a joint anchor box projection loss function, which will help to improve the overall performance of our method. The results of the KITTI dataset show that our proposed method can effectively improve the detection accuracy of occluded objects. Our method achieves 89.46%, 79.91% and 75.53% detection accuracy in the easy, moderate, and hard difficulty levels of the car category, and achieves a 6.97% performance improvement especially in the hard category with a high degree of occlusion. Our one-stage method does not need to rely on another refining stage, comparable to the accuracy of the two-stage method.}, } @article {pmid35336483, year = {2022}, author = {Jian, MS and Pan, CJ}, title = {Blockchained Industry Information Handoff Based on Internet of Things Devices with Intelligent Customized Object Recognition.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336483}, issn = {1424-8220}, abstract = {To determine the quality and safety of each product used in manufacturing, the exchange of measured data between machines, operators, production lines, and manufacturing companies is crucial. In this study, we developed a system with customized object recognition capability for the secure blockchain-based transfer of industry information through Internet of Things (IoT) devices. In the proposed system, product history data are transferred through blockchains through artificial intelligence (AI)-based object recognition. Individual objects are recognized and represented using a unique number sequence for use as a private key on a blockchain. The data history can be automatically secured, and all the data are traceable and trackable. The reliability and validity of the proposed system were verified using the Jetson Nano Developer Kit. The proposed AI-based system is a low-cost embedded system. Based on the open-source cloud computing platform, the required computing resources for blockchain computing and storage are available. In an experiment, the proposed system achieved >99% accuracy within 1 s. Furthermore, the computational cost of the proposed system was 10% that of traditional AI systems. The proposed device can be rapidly connected to IoT devices that require limited manual operation and can be adopted in manufacturing and production lines.}, } @article {pmid35336357, year = {2022}, author = {Silva, J and Pereira, P and Machado, R and Névoa, R and Melo-Pinto, P and Fernandes, D}, title = {Customizable FPGA-Based Hardware Accelerator for Standard Convolution Processes Empowered with Quantization Applied to LiDAR Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336357}, issn = {1424-8220}, support = {POCI-01-0247-FEDER-037902//European Structural and Investment Funds in the 487 FEDER component, through the Operational Competitiveness and Internationalization Programme 488 (COMPETE 2020) [/ ; }, mesh = {*Algorithms ; *Computers ; }, abstract = {In recent years there has been an increase in the number of research and developments in deep learning solutions for object detection applied to driverless vehicles. This application benefited from the growing trend felt in innovative perception solutions, such as LiDAR sensors. Currently, this is the preferred device to accomplish those tasks in autonomous vehicles. There is a broad variety of research works on models based on point clouds, standing out for being efficient and robust in their intended tasks, but they are also characterized by requiring point cloud processing times greater than the minimum required, given the risky nature of the application. This research work aims to provide a design and implementation of a hardware IP optimized for computing convolutions, rectified linear unit (ReLU), padding, and max pooling. This engine was designed to enable the configuration of features such as varying the size of the feature map, filter size, stride, number of inputs, number of filters, and the number of hardware resources required for a specific convolution. Performance results show that by resorting to parallelism and quantization approach, the proposed solution could reduce the amount of logical FPGA resources by 40 to 50%, enhancing the processing time by 50% while maintaining the deep learning operation accuracy.}, } @article {pmid35336335, year = {2022}, author = {Loseto, G and Scioscia, F and Ruta, M and Gramegna, F and Ieva, S and Fasciano, C and Bilenchi, I and Loconte, D}, title = {Osmotic Cloud-Edge Intelligence for IoT-Based Cyber-Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336335}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; Intelligence ; Osmosis ; *Software ; }, abstract = {Artificial Intelligence (AI) in Cyber-Physical Systems allows machine learning inference on acquired data with ever greater accuracy, thanks to models trained with massive amounts of information generated by Internet of Things devices. Edge Intelligence is increasingly adopted to execute inference on data at the border of local networks, exploiting models trained in the Cloud. However, the training tasks on Edge nodes are not supported yet with flexible dynamic migration between Edge and Cloud. This paper proposes a Cloud-Edge AI microservice architecture, based on Osmotic Computing principles. Notable features include: (i) containerized architecture enabling training and inference on the Edge, Cloud, or both, exploiting computational resources opportunistically to reach the best prediction accuracy; and (ii) microservice encapsulation of each architectural module, allowing a direct mapping with Commercial-Off-The-Shelf (COTS) components. Grounding on the proposed architecture: (i) a prototype has been realized with commodity hardware leveraging open-source software technologies; and (ii) it has been then used in a small-scale intelligent manufacturing case study, carrying out experiments. The obtained results validate the feasibility and key benefits of the approach.}, } @article {pmid35336322, year = {2022}, author = {Kim, YJ and Park, CH and Yoon, M}, title = {FILM: Filtering and Machine Learning for Malware Detection in Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336322}, issn = {1424-8220}, support = {2018-0-00429//Institute for Information and Communications Technology Promotion/ ; IITP-2020-0-01826//Institute for Information and Communications Technology Promotion/ ; NRF-2020R1A2C1006135//National Research Foundation of Korea/ ; }, mesh = {Humans ; *Machine Learning ; }, abstract = {Machine learning with static-analysis features extracted from malware files has been adopted to detect malware variants, which is desirable for resource-constrained edge computing and Internet-of-Things devices with sensors; however, this learned model suffers from a misclassification problem because some malicious files have almost the same static-analysis features as benign ones. In this paper, we present a new detection method for edge computing that can utilize existing machine learning models to classify a suspicious file into either benign, malicious, or unpredictable categories while existing models make only a binary decision of either benign or malicious. The new method can utilize any existing deep learning models developed for malware detection after appending a simple sigmoid function to the models. When interpreting the sigmoid value during the testing phase, the new method determines if the model is confident about its prediction; therefore, the new method can take only the prediction of high accuracy, which reduces incorrect predictions on ambiguous static-analysis features. Through experiments on real malware datasets, we confirm that the new scheme significantly enhances the accuracy, precision, and recall of existing deep learning models. For example, the accuracy is enhanced from 0.96 to 0.99, while some files are classified as unpredictable that can be entrusted to the cloud for further dynamic or human analysis.}, } @article {pmid35336277, year = {2022}, author = {Chen, L and Wei, L and Wang, Y and Wang, J and Li, W}, title = {Monitoring and Predictive Maintenance of Centrifugal Pumps Based on Smart Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {6}, pages = {}, pmid = {35336277}, issn = {1424-8220}, mesh = {Equipment Failure ; Monitoring, Physiologic ; Temperature ; *Vibration ; }, abstract = {Centrifugal pumps have a wide range of applications in industrial and municipal water affairs. During the use of centrifugal pumps, failures such as bearing wear, blade damage, impeller imbalance, shaft misalignment, cavitation, water hammer, etc., often occur. It is of great importance to use smart sensors and digital Internet of Things (IoT) systems to monitor the real-time operating status of pumps and predict potential failures for achieving predictive maintenance of pumps and improving the intelligence level of machine health management. Firstly, the common fault forms of centrifugal pumps and the characteristics of vibration signals when a fault occurs are introduced. Secondly, the centrifugal pump monitoring IoT system is designed. The system is mainly composed of wireless sensors, wired sensors, data collectors, and cloud servers. Then, the microelectromechanical system (MEMS) chip is used to design a wireless vibration temperature integrated sensor, a wired vibration temperature integrated sensor, and a data collector to monitor the running state of the pump. The designed wireless sensor communicates with the server through Narrow Band Internet of Things (NB-IoT). The output of the wired sensor is connected to the data collector, and the designed collector can communicate with the server through 4G communication. Through cloud-side collaboration, real-time monitoring of the running status of centrifugal pumps and intelligent diagnosis of centrifugal pump faults are realized. Finally, on-site testing and application verification of the system was conducted. The test results show that the designed sensors and sensor application system can make good use of the centrifugal pump failure mechanism to automatically diagnose equipment failures. Moreover, the diagnostic accuracy rate is above 85% by using the method of wired sensor and collector. As a low-cost and easy-to-implement solution, wireless sensors can also monitor gradual failures well. The research on the sensors and pump monitoring system provides feasible methods and an effective means for the application of centrifugal pump health management and predictive maintenance.}, } @article {pmid35332213, year = {2022}, author = {Moshiri, N and Fisch, KM and Birmingham, A and DeHoff, P and Yeo, GW and Jepsen, K and Laurent, LC and Knight, R}, title = {The ViReflow pipeline enables user friendly large scale viral consensus genome reconstruction.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {5077}, pmid = {35332213}, issn = {2045-2322}, support = {75D30120C09795/CC/CDC HHS/United States ; UL1 TR001442/TR/NCATS NIH HHS/United States ; S10 OD026929/OD/NIH HHS/United States ; 2038509//National Science Foundation/ ; 2028040//National Science Foundation/ ; }, mesh = {*COVID-19/epidemiology ; Genome, Viral/genetics ; Humans ; Pandemics ; SARS-CoV-2/genetics ; *Software ; }, abstract = {Throughout the COVID-19 pandemic, massive sequencing and data sharing efforts enabled the real-time surveillance of novel SARS-CoV-2 strains throughout the world, the results of which provided public health officials with actionable information to prevent the spread of the virus. However, with great sequencing comes great computation, and while cloud computing platforms bring high-performance computing directly into the hands of all who seek it, optimal design and configuration of a cloud compute cluster requires significant system administration expertise. We developed ViReflow, a user-friendly viral consensus sequence reconstruction pipeline enabling rapid analysis of viral sequence datasets leveraging Amazon Web Services (AWS) cloud compute resources and the Reflow system. ViReflow was developed specifically in response to the COVID-19 pandemic, but it is general to any viral pathogen. Importantly, when utilized with sufficient compute resources, ViReflow can trim, map, call variants, and call consensus sequences from amplicon sequence data from 1000 SARS-CoV-2 samples at 1000X depth in < 10 min, with no user intervention. ViReflow's simplicity, flexibility, and scalability make it an ideal tool for viral molecular epidemiological efforts.}, } @article {pmid35327820, year = {2022}, author = {Zhu, H and Xue, Q and Li, T and Xie, D}, title = {Traceable Scheme of Public Key Encryption with Equality Test.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {3}, pages = {}, pmid = {35327820}, issn = {1099-4300}, abstract = {Public key encryption supporting equality test (PKEwET) schemes, because of their special function, have good applications in many fields, such as in cloud computing services, blockchain, and the Internet of Things. The original PKEwET has no authorization function. Subsequently, many PKEwET schemes have been proposed with the ability to perform authorization against various application scenarios. However, these schemes are incapable of traceability to the ciphertexts. In this paper, the ability of tracing to the ciphertexts is introduced into a PKEwET scheme. For the ciphertexts, the presented scheme supports not only the equality test, but also has the function of traceability. Meanwhile, the security of the proposed scheme is revealed by a game between an adversary and a simulator, and it achieves a desirable level of security. Depending on the attacker's privileges, it can resist OW-CCA security against an adversary with a trapdoor, and can resist IND-CCA security against an adversary without a trapdoor. Finally, the performance of the presented scheme is discussed.}, } @article {pmid35320088, year = {2023}, author = {Fan, H and Yang, Y and Kankanhalli, M}, title = {Point Spatio-Temporal Transformer Networks for Point Cloud Video Modeling.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {45}, number = {2}, pages = {2181-2192}, doi = {10.1109/TPAMI.2022.3161735}, pmid = {35320088}, issn = {1939-3539}, abstract = {Due to the inherent unorderliness and irregularity of point cloud, points emerge inconsistently across different frames in a point cloud video. To capture the dynamics in point cloud videos, tracking points and limiting temporal modeling range are usually employed to preserve spatio-temporal structure. However, as points may flow in and out across frames, computing accurate point trajectories is extremely difficult, especially for long videos. Moreover, when points move fast, even in a small temporal window, points may still escape from a region. Besides, using the same temporal range for different motions may not accurately capture the temporal structure. In this paper, we propose a Point Spatio-Temporal Transformer (PST-Transformer). To preserve the spatio-temporal structure, PST-Transformer adaptively searches related or similar points across the entire video by performing self-attention on point features. Moreover, our PST-Transformer is equipped with an ability to encode spatio-temporal structure. Because point coordinates are irregular and unordered but point timestamps exhibit regularities and order, the spatio-temporal encoding is decoupled to reduce the impact of the spatial irregularity on the temporal modeling. By properly preserving and encoding spatio-temporal structure, our PST-Transformer effectively models point cloud videos and shows superior performance on 3D action recognition and 4D semantic segmentation.}, } @article {pmid35319833, year = {2022}, author = {ElZarrad, MK and Lee, AY and Purcell, R and Steele, SJ}, title = {Advancing an agile regulatory ecosystem to respond to the rapid development of innovative technologies.}, journal = {Clinical and translational science}, volume = {15}, number = {6}, pages = {1332-1339}, pmid = {35319833}, issn = {1752-8062}, mesh = {*Artificial Intelligence ; *Ecosystem ; Humans ; Policy Making ; }, abstract = {Technological advancements are dramatically changing the landscape of therapeutic development. The convergence of advances in computing power, analytical methods, artificial intelligence, novel digital health tools, and cloud-based platforms has the potential to power an exponential acceleration of evidence generation. For regulatory agencies responsible for evidence evaluation and oversight of medical products, these advances present both promises and challenges. Ultimately, realizing the translation and impact of these innovations that could potentially enhance therapeutic development and improve the health of individuals and the public will require a nimble and responsive regulatory approach. Supporting an adaptive policy-making infrastructure that is poised to address novel regulatory considerations, creating a workforce to ensure relevant expertise, and fostering more diverse collaborations with a broader group of stakeholders are steps toward the goal of modernizing the regulatory ecosystem. This article outlines approaches that can help provide the flexibility and tools needed to foster innovation, while ensuring the safety and effectiveness of medical products.}, } @article {pmid35317470, year = {2022}, author = {Pathak, S and Raj, R and Singh, K and Verma, PK and Kumar, B}, title = {Development of portable and robust cataract detection and grading system by analyzing multiple texture features for Tele-Ophthalmology.}, journal = {Multimedia tools and applications}, volume = {81}, number = {16}, pages = {23355-23371}, pmid = {35317470}, issn = {1380-7501}, abstract = {This paper presents a low cost, robust, portable and automated cataract detection system which can detect the presence of cataract from the colored digital eye images and grade their severity. Ophthalmologists detect cataract through visual screening using ophthalmoscope and slit lamps. Conventionally a patient has to visit an ophthalmologist for eye screening and treatment follows the course. Developing countries lack the proper health infrastructure and face huge scarcity of trained medical professionals as well as technicians. The condition is not very satisfactory with the rural and remote areas of developed nations. To bridge this barrier between the patient and the availability of resources, current work focuses on the development of portable low-cost, robust cataract screening and grading system. Similar works use fundus and retinal images which use costly imaging modules and image based detection algorithms which use much complex neural network models. Current work derives its benefit from the advancements in digital image processing techniques. A set of preprocessing has been done on the colored eye image and later texture information in form of mean intensity, uniformity, standard deviation and randomness has been calculated and mapped with the diagnostic opinion of doctor for cataract screening of over 200 patients. For different grades of cataract severity edge pixel count was calculated as per doctor's opinion and later these data are used for calculating the thresholds using hybrid k-means algorithm, for giving a decision on the presence of cataract and grade its severity. Low value of uniformity and high value of other texture parameters confirm the presence of cataract as clouding in eye lens causes the uniformity function to take lower value due to presence of coarse texture. Higher the edge pixel count value, this confirms the presence of starting of cataract as solidified regions in lens are nonuniform. Lower value corresponds to fully solidified region or matured cataract. Proposed algorithm was initially developed on MATLAB, and tested on over 300 patients in an eye camp. The system has shown more than 98% accuracy in detection and grading of cataract. Later a cloud based system was developed with 3D printed image acquisition module to manifest an automated, portable and efficient cataract detection system for Tele-Ophthalmology. The proposed system uses a very simple and efficient technique by mapping the diagnostic opinion of the doctor as well, giving very promising results which suggest its potential use in teleophthalmology applications to reduce the cost of delivering eye care services and increasing its reach effectively. Developed system is simple in design and easy to operate and suitable for mass screening of cataracts. Due to non-invasive and non-mydriatic and mountable nature of device, in person screening is not required. Hence, social distancing norms are easy to follow and device is very useful in COVID-19 like situation.}, } @article {pmid35317343, year = {2022}, author = {Byeon, H}, title = {Screening dementia and predicting high dementia risk groups using machine learning.}, journal = {World journal of psychiatry}, volume = {12}, number = {2}, pages = {204-211}, pmid = {35317343}, issn = {2220-3206}, abstract = {New technologies such as artificial intelligence, the internet of things, big data, and cloud computing have changed the overall society and economy, and the medical field particularly has tried to combine traditional examination methods and new technologies. The most remarkable field in medical research is the technology of predicting high dementia risk group using big data and artificial intelligence. This review introduces: (1) the definition, main concepts, and classification of machine learning and overall distinction of it from traditional statistical analysis models; and (2) the latest studies in mental science to detect dementia and predict high-risk groups in order to help competent researchers who are challenging medical artificial intelligence in the field of psychiatry. As a result of reviewing 4 studies that used machine learning to discriminate high-risk groups of dementia, various machine learning algorithms such as boosting model, artificial neural network, and random forest were used for predicting dementia. The development of machine learning algorithms will change primary care by applying advanced machine learning algorithms to detect high dementia risk groups in the future.}, } @article {pmid35311003, year = {2022}, author = {Wu, J and Turner, N and Bae, JA and Vishwanathan, A and Seung, HS}, title = {RealNeuralNetworks.jl: An Integrated Julia Package for Skeletonization, Morphological Analysis, and Synaptic Connectivity Analysis of Terabyte-Scale 3D Neural Segmentations.}, journal = {Frontiers in neuroinformatics}, volume = {16}, number = {}, pages = {828169}, pmid = {35311003}, issn = {1662-5196}, abstract = {Benefiting from the rapid development of electron microscopy imaging and deep learning technologies, an increasing number of brain image datasets with segmentation and synapse detection are published. Most of the automated segmentation methods label voxels rather than producing neuron skeletons directly. A further skeletonization step is necessary for quantitative morphological analysis. Currently, several tools are published for skeletonization as well as morphological and synaptic connectivity analysis using different computer languages and environments. Recently the Julia programming language, notable for elegant syntax and high performance, has gained rapid adoption in the scientific computing community. Here, we present a Julia package, called RealNeuralNetworks.jl, for efficient sparse skeletonization, morphological analysis, and synaptic connectivity analysis. Based on a large-scale Zebrafish segmentation dataset, we illustrate the software features by performing distributed skeletonization in Google Cloud, clustering the neurons using the NBLAST algorithm, combining morphological similarity and synaptic connectivity to study their relationship. We demonstrate that RealNeuralNetworks.jl is suitable for use in terabyte-scale electron microscopy image segmentation datasets.}, } @article {pmid35310887, year = {2022}, author = {Kumar, A}, title = {A cloud-based buyer-seller watermarking protocol (CB-BSWP) using semi-trusted third party for copy deterrence and privacy preserving.}, journal = {Multimedia tools and applications}, volume = {81}, number = {15}, pages = {21417-21448}, pmid = {35310887}, issn = {1380-7501}, abstract = {Nowadays, cloud computing provides a platform infrastructure for the secure dealing of digital data, but privacy and copy control are the two important issues in it over a network. Cloud data is available to the end user and requires enormous security and privacy techniques to protect the data. Moreover, the access control mechanism with encryption-based technique protects the digital rights for participants in a transaction, but they do not protect the media from being illegally redistributed and do not restrict an authorized user to reveal their secret information this is referred to as you can access but you cannot leak. This brought out a need for controlling copy deterrence and preserving the privacy of digital media over the internet. To overlook this, we proposed a cloud-based buyer-seller watermarking protocol (CB-BSWP) with the use of a semi-trusted third party for copy deterrence and privacy-preserving in the cloud environment. The suggested scheme uses 1) a privacy homomorphism cryptosystem with Diffie-Hellman key exchange algorithm to provide an encrypted domain for the secure exchange of digital media 2) adopt robust and fair watermarking techniques to ensure high imperceptibility and robustness for the watermarked images against attacks 3) two services of cloud Infrastructure as a service (IaaS) to support virtualized computing infrastructure and Watermarking as a service (WaaS) to execute the speedy process of watermarking, this process is supported by watermarking generation and signing phase (WGSP) and watermark extraction and verifying phase reported in 4th section. 4) cloud service provider (CSP) considered as a "semi-trusted" third party to reduce the burden from the trusted third party (TTP) server and provide storage for the encrypted digital media on cloud databases, this frees content owner from not having a separate storage infrastructure. The proposed scheme encrypts the digital content by using SHA-512 algorithm with key size 512-bits to ensure that it doesn't affect computational time during the process of encryption. The suggested scheme addresses the problems of piracy tracing, anonymity, tamper resistance, non-framing, customer rights problem. The role of cloud is crucial because it reduces communication overhead, provides unlimited storage, supports the watermarking process and offers a solution for the secure distribution of end-to-end security of digital content over cloud. To check the performance of the suggested CB-BSWP protocol against common image processing attacks, we have conducted experiments in which the perceptual quality of watermarked digital media was found enhanced, resulting in a robust watermark.}, } @article {pmid35310585, year = {2022}, author = {Guo, Y}, title = {Contextualized Design of IoT (Internet of Things) Finance for Edge Artificial Intelligence Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {6046957}, pmid = {35310585}, issn = {1687-5273}, mesh = {*Artificial Intelligence ; China ; Cloud Computing ; *Internet of Things ; }, abstract = {With the widespread application of IoT technology in the world, the new industry of IoT finance has emerged. Under this new business model, commercial banks and other financial institutions can realize safer and more convenient financial services such as payment, financing and asset management through the application of IoT technology and communication network technology. In the cloud computing model, the local terminal device of IOT will transmit the collected data to the cloud server through the network, and the cloud server will complete the data operation. Cloud computing model can well solve the problem of poor performance of IoT devices, but with the increasing number of IoT terminal devices and huge number of devices accessing the network, cloud computing model is constrained by network bandwidth and performance bottleneck, which brings a series of problems such as high latency, poor real-time and low security. In this paper, based on the new industry of IoT finance which is developing rapidly, we construct a POT (Peaks Over Threshold) over threshold model to empirically analyze the operational risk of commercial banks by using the risk loss data of commercial banks, and estimate the corresponding ES values by using the control variables method to measure the operational risk of traditional commercial banks and IoT finance respectively, and compare the total ES values of the two. This paper adopts the control variable method to reduce the frequency of each type of loss events of operational risk of commercial banks in China respectively.}, } @article {pmid35310579, year = {2022}, author = {Shi, F and Lin, J}, title = {Virtual Machine Resource Allocation Optimization in Cloud Computing Based on Multiobjective Genetic Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7873131}, pmid = {35310579}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Resource Allocation ; }, abstract = {Cloud computing is an important milestone in the development of distributed computing as a commercial implementation, and it has good prospects. Infrastructure as a service (IaaS) is an important service mode in cloud computing. It combines massive resources scattered in different spaces into a unified resource pool by means of virtualization technology, facilitating the unified management and use of resources. In IaaS mode, all resources are provided in the form of virtual machines (VM). To achieve efficient resource utilization, reduce users' costs, and save users' computing time, VM allocation must be optimized. This paper proposes a new multiobjective optimization method of dynamic resource allocation for multivirtual machine distribution stability. Combining the current state and future predicted data of each application load, the cost of virtual machine relocation and the stability of new virtual machine placement state are considered comprehensively. A multiobjective optimization genetic algorithm (MOGANS) was designed to solve the problem. The simulation results show that compared with the genetic algorithm (GA-NN) for energy saving and multivirtual machine redistribution overhead, the virtual machine distribution method obtained by MOGANS has a longer stability time. Aiming at this shortage, this paper proposes a multiobjective optimization dynamic resource allocation method (MOGA-C) based on MOEA/D for virtual machine distribution. It is illustrated by experimental simulation that moGA-D can converge faster and obtain similar multiobjective optimization results at the same calculation scale.}, } @article {pmid35300555, year = {2023}, author = {Ahouanmenou, S and Van Looy, A and Poels, G}, title = {Information security and privacy in hospitals: a literature mapping and review of research gaps.}, journal = {Informatics for health & social care}, volume = {48}, number = {1}, pages = {30-46}, doi = {10.1080/17538157.2022.2049274}, pmid = {35300555}, issn = {1753-8165}, mesh = {Humans ; *Privacy ; *Evidence Gaps ; Hospitals ; Computer Security ; Cloud Computing ; }, abstract = {Information security and privacy are matters of concern in every industry. The healthcare sector has lagged in terms of implementing cybersecurity measures. Therefore, hospitals are more exposed to cyber events due to the criticality of patient data. Currently, little is known about state-of-the-art research on information security and privacy in hospitals. The purpose of this study is to report the outcome of a systematic literature review on research about the application of information security and privacy in hospitals. A systematic literature review following the PRISMA methodology was conducted. To reference our sample according to cybersecurity domains, we benchmarked each article against two cybersecurity frameworks: ISO 27001 Annex A and the NIST framework core. Limited articles in our papers referred to the policies and compliance sections of ISO 27001. In addition, most of our sample is classified by the NIST function "Protect," meaning activities related to identity management, access control and data security. Furthermore, we have identified key domains where research in security and privacy are critical, such as big data, IOT, cloud computing, standards and regulations. The results indicate that although cybersecurity is a growing concern in hospitals, research is still weak in some areas. Considering the recrudescence of cyber-attacks in the healthcare sector, we call for more research in hospitals in managerial and non-technical domains of information security and privacy that are uncovered by our analysis.}, } @article {pmid35298506, year = {2022}, author = {Witt, C and Davis, RJ and Yang, Z and Ganey, JL and Gutiérrez, RJ and Healey, S and Hedwall, S and Hoagland, S and Maes, R and Malcolm, K and Sanderlin, J and Seamans, M and Jones, GM}, title = {Linking robust spatiotemporal datasets to assess and monitor habitat attributes of a threatened species.}, journal = {PloS one}, volume = {17}, number = {3}, pages = {e0265175}, pmid = {35298506}, issn = {1932-6203}, mesh = {Animals ; Conservation of Natural Resources/methods ; Ecosystem ; *Endangered Species ; Forests ; *Strigiformes ; }, abstract = {Accessibility of multispectral, multitemporal imagery combined with recent advances in cloud computing and machine learning approaches have enhanced our ability to model habitat characteristics across broad spatial and temporal scales. We integrated a large dataset of known nest and roost sites of a threatened species, the Mexican spotted owl (Strix occidentalis lucida), in the southwestern USA with Landsat imagery processed using the Continuous Change Detection and Classification (CCDC) time series algorithm on Google Earth Engine. We then used maximum entropy modeling (Maxent) to classify the landscape into four 'spectral similarity' classes that reflected the degree to which 30-m pixels contained a multispectral signature similar to that found at known owl nest/roost sites and mapped spectral similarity classes from 1986-2020. For map interpretation, we used nationally consistent forest inventory data to evaluate the structural and compositional characteristics of each spectral similarity class. We found a monotonic increase of structural characteristics typically associated with owl nesting and roosting over classes of increasing similarity, with the 'very similar' class meeting or exceeding published minimum desired management conditions for owl nesting and roosting. We also found an increased rate of loss of forest vegetation typical of owl nesting and roosting since the beginning of the 21st century that can be partly attributed to increased frequency and extent of large (≥400 ha) wildfires. This loss resulted in a 38% reduction over the 35-year study period in forest vegetation most similar to that used for owl nesting and roosting. Our modelling approach using cloud computing with time series of Landsat imagery provided a cost-effective tool for landscape-scale, multidecadal monitoring of vegetative components of a threatened species' habitat. Our approach could be used to monitor trends in the vegetation favored by any other species, provided that high-quality location data such as we presented here are available.}, } @article {pmid35289370, year = {2022}, author = {Knosp, BM and Craven, CK and Dorr, DA and Bernstam, EV and Campion, TR}, title = {Understanding enterprise data warehouses to support clinical and translational research: enterprise information technology relationships, data governance, workforce, and cloud computing.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {671-676}, pmid = {35289370}, issn = {1527-974X}, support = {UL1 TR002537/TR/NCATS NIH HHS/United States ; UL1 TR003167/TR/NCATS NIH HHS/United States ; UL1 TR001433/TR/NCATS NIH HHS/United States ; UL1TR002384/TR/NCATS NIH HHS/United States ; UL1 TR002369/TR/NCATS NIH HHS/United States ; U24 TR002260/TR/NCATS NIH HHS/United States ; }, mesh = {Cloud Computing ; *Data Warehousing ; Humans ; Information Technology ; *Translational Research, Biomedical ; Workforce ; }, abstract = {OBJECTIVE: Among National Institutes of Health Clinical and Translational Science Award (CTSA) hubs, effective approaches for enterprise data warehouses for research (EDW4R) development, maintenance, and sustainability remain unclear. The goal of this qualitative study was to understand CTSA EDW4R operations within the broader contexts of academic medical centers and technology.

MATERIALS AND METHODS: We performed a directed content analysis of transcripts generated from semistructured interviews with informatics leaders from 20 CTSA hubs.

RESULTS: Respondents referred to services provided by health system, university, and medical school information technology (IT) organizations as "enterprise information technology (IT)." Seventy-five percent of respondents stated that the team providing EDW4R service at their hub was separate from enterprise IT; strong relationships between EDW4R teams and enterprise IT were critical for success. Managing challenges of EDW4R staffing was made easier by executive leadership support. Data governance appeared to be a work in progress, as most hubs reported complex and incomplete processes, especially for commercial data sharing. Although nearly all hubs (n = 16) described use of cloud computing for specific projects, only 2 hubs reported using a cloud-based EDW4R. Respondents described EDW4R cloud migration facilitators, barriers, and opportunities.

DISCUSSION: Descriptions of approaches to how EDW4R teams at CTSA hubs work with enterprise IT organizations, manage workforces, make decisions about data, and approach cloud computing provide insights for institutions seeking to leverage patient data for research.

CONCLUSION: Identification of EDW4R best practices is challenging, and this study helps identify a breadth of viable options for CTSA hubs to consider when implementing EDW4R services.}, } @article {pmid35289369, year = {2022}, author = {Barnes, C and Bajracharya, B and Cannalte, M and Gowani, Z and Haley, W and Kass-Hout, T and Hernandez, K and Ingram, M and Juvvala, HP and Kuffel, G and Martinov, P and Maxwell, JM and McCann, J and Malhotra, A and Metoki-Shlubsky, N and Meyer, C and Paredes, A and Qureshi, J and Ritter, X and Schumm, P and Shao, M and Sheth, U and Simmons, T and VanTol, A and Zhang, Z and Grossman, RL}, title = {The Biomedical Research Hub: a federated platform for patient research data.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {619-625}, pmid = {35289369}, issn = {1527-974X}, support = {U2CHL138346/NH/NIH HHS/United States ; /HL/NHLBI NIH HHS/United States ; }, mesh = {*Biomedical Research ; *Cloud Computing ; Humans ; Software ; }, abstract = {OBJECTIVE: The objective was to develop and operate a cloud-based federated system for managing, analyzing, and sharing patient data for research purposes, while allowing each resource sharing patient data to operate their component based upon their own governance rules. The federated system is called the Biomedical Research Hub (BRH).

MATERIALS AND METHODS: The BRH is a cloud-based federated system built over a core set of software services called framework services. BRH framework services include authentication and authorization, services for generating and assessing findable, accessible, interoperable, and reusable (FAIR) data, and services for importing and exporting bulk clinical data. The BRH includes data resources providing data operated by different entities and workspaces that can access and analyze data from one or more of the data resources in the BRH.

RESULTS: The BRH contains multiple data commons that in aggregate provide access to over 6 PB of research data from over 400 000 research participants.

DISCUSSION AND CONCLUSION: With the growing acceptance of using public cloud computing platforms for biomedical research, and the growing use of opaque persistent digital identifiers for datasets, data objects, and other entities, there is now a foundation for systems that federate data from multiple independently operated data resources that expose FAIR application programming interfaces, each using a separate data model. Applications can be built that access data from one or more of the data resources.}, } @article {pmid35286502, year = {2022}, author = {Bhattacharya, S and Ghosh, S and Bhattacharyya, S}, title = {Analytical hierarchy process tool in Google Earth Engine platform: a case study of a tropical landfill site suitability.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {4}, pages = {276}, pmid = {35286502}, issn = {1573-2959}, mesh = {Analytic Hierarchy Process ; Environmental Monitoring/methods ; Geographic Information Systems ; *Refuse Disposal/methods ; Search Engine ; Waste Disposal Facilities ; }, abstract = {Kolkata being a metropolitan city in India has its main municipal solid waste dumpsite situated at Dhapa just adjacent to the East Kolkata Wetlands (Ramsar site). The current prevalent situation at Dhapa is open dumping leading to various contaminations and hazards putting forth the need to look for alternative sites where the landfiilling operation can be shifted to using scientific methods. A user interface (UI)-based analytical hierarchy process (AHP) tool has been developed within the Google Earth Engine (GEE) cloud platform to find out the alternative dumping sites using geospatial layers. AHP function is not available as a native algorithm or developed by any researcher in GEE. The tool has three major functionalities, of which the first one handles the UI elements. The AHP procedure is within another function, and the last function integrates the AHP coefficients to the layers generating the final suitability layer. Users can also upload comparison matrix as GEE asset in the form of CSV file which gets automatically integrated into the AHP to calculate the coefficients and consistency ratio to generate the spatial suitability layers. This approach showcases a generalized AHP function within the GEE environment, which has been done for the first time. The tool is designed in the cloud platform which is dynamic, robust and suitable for use in various AHP-based suitability analysis in environmental monitoring and assessment.}, } @article {pmid35284203, year = {2022}, author = {Chandra, M and Kumar, K and Thakur, P and Chattopadhyaya, S and Alam, F and Kumar, S}, title = {Digital technologies, healthcare and Covid-19: insights from developing and emerging nations.}, journal = {Health and technology}, volume = {12}, number = {2}, pages = {547-568}, pmid = {35284203}, issn = {2190-7188}, abstract = {COVID-19 pandemic created a global health crisis affecting every nation. The essential smart medical devices/accessories, quarantine facilities, surveillance systems, and related digital technologies are in huge demand. Healthcare, manufacturing industries, and educational institutions need technologies that allow working from a safe location. Digital technologies and Industry 4.0 tools have the potential to fulfil these customized requirements during and post COVID-19 crisis. The purpose of this research is to provide understanding to healthcare professionals, government policymakers, researchers, industry professionals, academics, and students/learners of the paradigm of different Digital technologies, Industry 4.0 tools, and their applications during the COVID-19 pandemic. Digital technologies, Industry 4.0 tools and their current and potential applications have been reviewed. The use of different Digital technologies and Industry 4.0 tools is identified. Digital technologies and Industry 4.0 tools (3D Printing, Artificial Intelligence, Cloud Computing, Autonomous Robot, Biosensor, Telemedicine service, Internet of Things (IoT), Virtual reality, and holography) offer opportunities for effective delivery of healthcare service(s), online education, and Work from Home (WFH) environment. The article emphasises the usefulness, most recent development, and implementation of Digital technologies, Industry 4.0 techniques, and tools in fighting the COVID-19 pandemic worldwide.}, } @article {pmid35281749, year = {2022}, author = {Yulianto, F and Kushardono, D and Budhiman, S and Nugroho, G and Chulafak, GA and Dewi, EK and Pambudi, AI}, title = {Evaluation of the Threshold for an Improved Surface Water Extraction Index Using Optical Remote Sensing Data.}, journal = {TheScientificWorldJournal}, volume = {2022}, number = {}, pages = {4894929}, pmid = {35281749}, issn = {1537-744X}, abstract = {In this study, we proposed an automatic water extraction index (AWEI) threshold improvement model that can be used to detect lake surface water based on optical remote sensing data. An annual Landsat 8 mosaic was created using the Google Earth Engine (GEE) platform to obtain cloud-free satellite image data. The challenge of this study was to determine the threshold value, which is essential to show the boundary between water and nonwater. The AWEI was selected for the study to address this challenge. The AWEI approach was developed by adding a threshold water value based on the split-based approach (SBA) calculation analysis for Landsat 8 satellite images. The SBA was used to determine local threshold variations in data scenes that were used to classify water and nonwater. The class threshold between water and nonwater in each selected subscene image can be determined based on the calculation of class intervals generated by geostatistical analysis, initially referred to as smart quantiles. It was used to determine the class separation between water and nonwater in the resulting subscene images. The objectives of this study were (a) to increase the accuracy of automatic lake surface water detection by improvising the determination of threshold values based on analysis and calculations using the SBA and (b) to conduct a test case study of AWEI threshold improvement on several lakes' surface water, which has a variety of different or heterogeneous characteristics. The results show that the threshold value obtained based on the smart quantile calculation from the natural break approach (AWEI ≥ -0.23) gave an overall accuracy of close to 100%. Those results were better than the normal threshold (AWEI ≥ 0.00), with an overall accuracy of 98%. It shows that there has been an increase of 2% in the accuracy based on the confusion matrix calculation. In addition to that, the results obtained when classifying water and nonwater classes for the different national priority lakes in Indonesia vary in overall accuracy from 94% to 100%.}, } @article {pmid35280732, year = {2022}, author = {Bonmatí, LM and Miguel, A and Suárez, A and Aznar, M and Beregi, JP and Fournier, L and Neri, E and Laghi, A and França, M and Sardanelli, F and Penzkofer, T and Lambin, P and Blanquer, I and Menzel, MI and Seymour, K and Figueiras, S and Krischak, K and Martínez, R and Mirsky, Y and Yang, G and Alberich-Bayarri, Á}, title = {CHAIMELEON Project: Creation of a Pan-European Repository of Health Imaging Data for the Development of AI-Powered Cancer Management Tools.}, journal = {Frontiers in oncology}, volume = {12}, number = {}, pages = {742701}, pmid = {35280732}, issn = {2234-943X}, support = {MC_PC_21013/MRC_/Medical Research Council/United Kingdom ; MR/V023799/1/MRC_/Medical Research Council/United Kingdom ; PG/16/78/32402/BHF_/British Heart Foundation/United Kingdom ; }, abstract = {The CHAIMELEON project aims to set up a pan-European repository of health imaging data, tools and methodologies, with the ambition to set a standard and provide resources for future AI experimentation for cancer management. The project is a 4 year long, EU-funded project tackling some of the most ambitious research in the fields of biomedical imaging, artificial intelligence and cancer treatment, addressing the four types of cancer that currently have the highest prevalence worldwide: lung, breast, prostate and colorectal. To allow this, clinical partners and external collaborators will populate the repository with multimodality (MR, CT, PET/CT) imaging and related clinical data. Subsequently, AI developers will enable a multimodal analytical data engine facilitating the interpretation, extraction and exploitation of the information stored at the repository. The development and implementation of AI-powered pipelines will enable advancement towards automating data deidentification, curation, annotation, integrity securing and image harmonization. By the end of the project, the usability and performance of the repository as a tool fostering AI experimentation will be technically validated, including a validation subphase by world-class European AI developers, participating in Open Challenges to the AI Community. Upon successful validation of the repository, a set of selected AI tools will undergo early in-silico validation in observational clinical studies coordinated by leading experts in the partner hospitals. Tool performance will be assessed, including external independent validation on hallmark clinical decisions in response to some of the currently most important clinical end points in cancer. The project brings together a consortium of 18 European partners including hospitals, universities, R&D centers and private research companies, constituting an ecosystem of infrastructures, biobanks, AI/in-silico experimentation and cloud computing technologies in oncology.}, } @article {pmid35271207, year = {2022}, author = {Tzanettis, I and Androna, CM and Zafeiropoulos, A and Fotopoulou, E and Papavassiliou, S}, title = {Data Fusion of Observability Signals for Assisting Orchestration of Distributed Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35271207}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {Nowadays, various frameworks are emerging for supporting distributed tracing techniques over microservices-based distributed applications. The objective is to improve observability and management of operational problems of distributed applications, considering bottlenecks in terms of high latencies in the interaction among the deployed microservices. However, such frameworks provide information that is disjoint from the management information that is usually collected by cloud computing orchestration platforms. There is a need to improve observability by combining such information to easily produce insights related to performance issues and to realize root cause analyses to tackle them. In this paper, we provide a modern observability approach and pilot implementation for tackling data fusion aspects in edge and cloud computing orchestration platforms. We consider the integration of signals made available by various open-source monitoring and observability frameworks, including metrics, logs and distributed tracing mechanisms. The approach is validated in an experimental orchestration environment based on the deployment and stress testing of a proof-of-concept microservices-based application. Helpful results are produced regarding the identification of the main causes of latencies in the various application parts and the better understanding of the behavior of the application under different stressing conditions.}, } @article {pmid35271184, year = {2022}, author = {Jassas, MS and Mahmoud, QH}, title = {Analysis of Job Failure and Prediction Model for Cloud Computing Using Machine Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35271184}, issn = {1424-8220}, support = {DDG2020-00032//Natural Sciences and Engineering Research Council/ ; }, mesh = {Algorithms ; Animals ; *Cloud Computing ; Horses ; Machine Learning ; Reproducibility of Results ; *Software ; }, abstract = {Modern applications, such as smart cities, home automation, and eHealth, demand a new approach to improve cloud application dependability and availability. Due to the enormous scope and diversity of the cloud environment, most cloud services, including hardware and software, have encountered failures. In this study, we first analyze and characterize the behaviour of failed and completed jobs using publicly accessible traces. We have designed and developed a failure prediction model to determine failed jobs before they occur. The proposed model aims to enhance resource consumption and cloud application efficiency. Based on three publicly available traces: the Google cluster, Mustang, and Trinity, we evaluate the proposed model. In addition, the traces were also subjected to various machine learning models to find the most accurate one. Our results indicate a significant correlation between unsuccessful tasks and requested resources. The evaluation results also revealed that our model has high precision, recall, and F1-score. Several solutions, such as predicting job failure, developing scheduling algorithms, changing priority policies, or limiting re-submission of tasks, can improve the reliability and availability of cloud services.}, } @article {pmid35271000, year = {2022}, author = {Janbi, N and Mehmood, R and Katib, I and Albeshri, A and Corchado, JM and Yigitcanlar, T}, title = {Imtidad: A Reference Architecture and a Case Study on Developing Distributed AI Services for Skin Disease Diagnosis over Cloud, Fog and Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35271000}, issn = {1424-8220}, support = {DSR Grant No. RG-10-611-38//King Abdulaziz University/ ; }, mesh = {Artificial Intelligence ; *COVID-19/diagnosis ; Humans ; SARS-CoV-2 ; *Skin Diseases/diagnosis ; Software ; }, abstract = {Several factors are motivating the development of preventive, personalized, connected, virtual, and ubiquitous healthcare services. These factors include declining public health, increase in chronic diseases, an ageing population, rising healthcare costs, the need to bring intelligence near the user for privacy, security, performance, and costs reasons, as well as COVID-19. Motivated by these drivers, this paper proposes, implements, and evaluates a reference architecture called Imtidad that provides Distributed Artificial Intelligence (AI) as a Service (DAIaaS) over cloud, fog, and edge using a service catalog case study containing 22 AI skin disease diagnosis services. These services belong to four service classes that are distinguished based on software platforms (containerized gRPC, gRPC, Android, and Android Nearby) and are executed on a range of hardware platforms (Google Cloud, HP Pavilion Laptop, NVIDIA Jetson nano, Raspberry Pi Model B, Samsung Galaxy S9, and Samsung Galaxy Note 4) and four network types (Fiber, Cellular, Wi-Fi, and Bluetooth). The AI models for the diagnosis include two standard Deep Neural Networks and two Tiny AI deep models to enable their execution at the edge, trained and tested using 10,015 real-life dermatoscopic images. The services are evaluated using several benchmarks including model service value, response time, energy consumption, and network transfer time. A DL service on a local smartphone provides the best service in terms of both energy and speed, followed by a Raspberry Pi edge device and a laptop in fog. The services are designed to enable different use cases, such as patient diagnosis at home or sending diagnosis requests to travelling medical professionals through a fog device or cloud. This is the pioneering work that provides a reference architecture and such a detailed implementation and treatment of DAIaaS services, and is also expected to have an extensive impact on developing smart distributed service infrastructures for healthcare and other sectors.}, } @article {pmid35270901, year = {2022}, author = {Orive, A and Agirre, A and Truong, HL and Sarachaga, I and Marcos, M}, title = {Quality of Service Aware Orchestration for Cloud-Edge Continuum Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {5}, pages = {}, pmid = {35270901}, issn = {1424-8220}, support = {825473//European Commission/ ; RTI2018-096116-B-I00//Spanish Ministry of Science, Innovation and Universities/ ; KK-2020/00042//Basque Government/ ; IT1324-19//Basque Government/ ; }, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {The fast growth in the amount of connected devices with computing capabilities in the past years has enabled the emergence of a new computing layer at the Edge. Despite being resource-constrained if compared with cloud servers, they offer lower latencies than those achievable by Cloud computing. The combination of both Cloud and Edge computing paradigms can provide a suitable infrastructure for complex applications' quality of service requirements that cannot easily be achieved with either of these paradigms alone. These requirements can be very different for each application, from achieving time sensitivity or assuring data privacy to storing and processing large amounts of data. Therefore, orchestrating these applications in the Cloud-Edge computing raises new challenges that need to be solved in order to fully take advantage of this layered infrastructure. This paper proposes an architecture that enables the dynamic orchestration of applications in the Cloud-Edge continuum. It focuses on the application's quality of service by providing the scheduler with input that is commonly used by modern scheduling algorithms. The architecture uses a distributed scheduling approach that can be customized in a per-application basis, which ensures that it can scale properly even in setups with high number of nodes and complex scheduling algorithms. This architecture has been implemented on top of Kubernetes and evaluated in order to asses its viability to enable more complex scheduling algorithms that take into account the quality of service of applications.}, } @article {pmid35267078, year = {2022}, author = {Vinci-Booher, S and Caron, B and Bullock, D and James, K and Pestilli, F}, title = {Development of white matter tracts between and within the dorsal and ventral streams.}, journal = {Brain structure & function}, volume = {227}, number = {4}, pages = {1457-1477}, pmid = {35267078}, issn = {1863-2661}, support = {R01 EB029272/EB/NIBIB NIH HHS/United States ; R01 EB030896/EB/NIBIB NIH HHS/United States ; }, mesh = {Adult ; Child ; Child, Preschool ; Diffusion Tensor Imaging ; Humans ; Learning ; *White Matter/diagnostic imaging ; }, abstract = {The degree of interaction between the ventral and dorsal visual streams has been discussed in multiple scientific domains for decades. Recently, several white matter tracts that directly connect cortical regions associated with the dorsal and ventral streams have become possible to study due to advancements in automated and reproducible methods. The developmental trajectory of this set of tracts, here referred to as the posterior vertical pathway (PVP), has yet to be described. We propose an input-driven model of white matter development and provide evidence for the model by focusing on the development of the PVP. We used reproducible, cloud-computing methods and diffusion imaging from adults and children (ages 5-8 years) to compare PVP development to that of tracts within the ventral and dorsal pathways. PVP microstructure was more adult-like than dorsal stream microstructure, but less adult-like than ventral stream microstructure. Additionally, PVP microstructure was more similar to the microstructure of the ventral than the dorsal stream and was predicted by performance on a perceptual task in children. Overall, results suggest a potential role for the PVP in the development of the dorsal visual stream that may be related to its ability to facilitate interactions between ventral and dorsal streams during learning. Our results are consistent with the proposed model, suggesting that the microstructural development of major white matter pathways is related, at least in part, to the propagation of sensory information within the visual system.}, } @article {pmid35259122, year = {2022}, author = {Chen, Y and Mao, Q and Wang, B and Duan, P and Zhang, B and Hong, Z}, title = {Privacy-Preserving Multi-Class Support Vector Machine Model on Medical Diagnosis.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {7}, pages = {3342-3353}, doi = {10.1109/JBHI.2022.3157592}, pmid = {35259122}, issn = {2168-2208}, mesh = {Algorithms ; Cloud Computing ; Computer Security ; Confidentiality ; Humans ; *Privacy ; *Support Vector Machine ; }, abstract = {With the rapid development of machine learning in the medical cloud system, cloud-assisted medical computing provides a concrete platform for remote rapid medical diagnosis services. Support vector machine (SVM), as one of the important algorithms of machine learning, has been widely used in the field of medical diagnosis for its high classification accuracy and efficiency. In some existing schemes, healthcare providers train diagnostic models with SVM algorithms and provide online diagnostic services to doctors. Doctors send the patient's case report to the diagnostic models to obtain the results and assist in clinical diagnosis. However, case report involves patients' privacy, and patients do not want their sensitive information to be leaked. Therefore, the protection of patient's privacy has become an important research direction in the field of online medical diagnosis. In this paper, we propose a privacy-preserving medical diagnosis scheme based on multi-class SVMs. The scheme is based on the distributed two trapdoors public key cryptosystem (DT-PKC) and Boneh-Goh-Nissim (BGN) cryptosystem. We design a secure computing protocol to compute the core process of the SVM classification algorithm. Our scheme can deal with both linearly separable data and nonlinear data while protecting the privacy of user data and support vectors. The results show that our scheme is secure, reliable, scalable with high accuracy.}, } @article {pmid35256689, year = {2022}, author = {Gaikwad, A and Shende, K and Arvind, and Dorai, K}, title = {Implementing efficient selective quantum process tomography of superconducting quantum gates on IBM quantum experience.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {3688}, pmid = {35256689}, issn = {2045-2322}, support = {PMRF Fellowship//Ministry of Education, India/ ; DST/ICPS/QuST/Theme-1/2019/General Project number Q-68.//Department of Science and Technology, Ministry of Science and Technology, India/ ; DST/ICPS/QuST/Theme-1/2019/General Project number Q-74.//Department of Science and Technology, Ministry of Science and Technology, India/ ; }, abstract = {The experimental implementation of selective quantum process tomography (SQPT) involves computing individual elements of the process matrix with the help of a special set of states called quantum 2-design states. However, the number of experimental settings required to prepare input states from quantum 2-design states to selectively and precisely compute a desired element of the process matrix is still high, and hence constructing the corresponding unitary operations in the lab is a daunting task. In order to reduce the experimental complexity, we mathematically reformulated the standard SQPT problem, which we term the modified SQPT (MSQPT) method. We designed the generalized quantum circuit to prepare the required set of input states and formulated an efficient measurement strategy aimed at minimizing the experimental cost of SQPT. We experimentally demonstrated the MSQPT protocol on the IBM QX2 cloud quantum processor and selectively characterized various two- and three-qubit quantum gates.}, } @article {pmid35251564, year = {2022}, author = {Kamruzzaman, MM and Alrashdi, I and Alqazzaz, A}, title = {New Opportunities, Challenges, and Applications of Edge-AI for Connected Healthcare in Internet of Medical Things for Smart Cities.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {2950699}, pmid = {35251564}, issn = {2040-2309}, mesh = {*Artificial Intelligence ; Cities ; Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {Revolution in healthcare can be experienced with the advancement of smart sensorial things, Artificial Intelligence (AI), Machine Learning (ML), Deep Learning (DL), Internet of Medical Things (IoMT), and edge analytics with the integration of cloud computing. Connected healthcare is receiving extraordinary contemplation from the industry, government, and the healthcare communities. In this study, several studies published in the last 6 years, from 2016 to 2021, have been selected. The selection process is represented through the Prisma flow chart. It has been identified that these increasing challenges of healthcare can be overcome by the implication of AI, ML, DL, Edge AI, IoMT, 6G, and cloud computing. Still, limited areas have implemented these latest advancements and also experienced improvements in the outcomes. These implications have shown successful results not only in resolving the issues from the perspective of the patient but also from the perspective of healthcare professionals. It has been recommended that the different models that have been proposed in several studies must be validated further and implemented in different domains, to validate the effectiveness of these models and to ensure that these models can be implemented in several regions effectively.}, } @article {pmid35247967, year = {2022}, author = {Kuśmirek, W and Nowak, R}, title = {CNVind: an open source cloud-based pipeline for rare CNVs detection in whole exome sequencing data based on the depth of coverage.}, journal = {BMC bioinformatics}, volume = {23}, number = {1}, pages = {85}, pmid = {35247967}, issn = {1471-2105}, support = {2019/35/N/ST6/01983//Polish National Science Center/ ; }, mesh = {Algorithms ; Cloud Computing ; *DNA Copy Number Variations ; *Exome ; High-Throughput Nucleotide Sequencing/methods ; Exome Sequencing ; }, abstract = {BACKGROUND: A typical Copy Number Variations (CNVs) detection process based on the depth of coverage in the Whole Exome Sequencing (WES) data consists of several steps: (I) calculating the depth of coverage in sequencing regions, (II) quality control, (III) normalizing the depth of coverage, (IV) calling CNVs. Previous tools performed one normalization process for each chromosome-all the coverage depths in the sequencing regions from a given chromosome were normalized in a single run.

METHODS: Herein, we present the new CNVind tool for calling CNVs, where the normalization process is conducted separately for each of the sequencing regions. The total number of normalizations is equal to the number of sequencing regions in the investigated dataset. For example, when analyzing a dataset composed of n sequencing regions, CNVind performs n independent depth of coverage normalizations. Before each normalization, the application selects the k most correlated sequencing regions with the depth of coverage Pearson's Correlation as distance metric. Then, the resulting subgroup of [Formula: see text] sequencing regions is normalized, the results of all n independent normalizations are combined; finally, the segmentation and CNV calling process is performed on the resultant dataset.

RESULTS AND CONCLUSIONS: We used WES data from the 1000 Genomes project to evaluate the impact of independent normalization on CNV calling performance and compared the results with state-of-the-art tools: CODEX and exomeCopy. The results proved that independent normalization allows to improve the rare CNVs detection specificity significantly. For example, for the investigated dataset, we reduced the number of FP calls from over 15,000 to around 5000 while maintaining a constant number of TP calls equal to about 150 CNVs. However, independent normalization of each sequencing region is a computationally expensive process, therefore our pipeline is customized and can be easily run in the cloud computing environment, on the computer cluster, or the single CPU server. To our knowledge, the presented application is the first attempt to implement an innovative approach to independent normalization of the depth of WES data coverage.}, } @article {pmid35240812, year = {2022}, author = {Zhao, J and Wu, D}, title = {The risk assessment on the security of industrial internet infrastructure under intelligent convergence with the case of G.E.'s intellectual transformation.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {3}, pages = {2896-2912}, doi = {10.3934/mbe.2022133}, pmid = {35240812}, issn = {1551-0018}, mesh = {*Artificial Intelligence ; Big Data ; *Cloud Computing ; Internet ; Risk Assessment ; }, abstract = {The industrial internet depends on the development of cloud computing, artificial intelligence, and big data analysis. Intelligent fusion is dependent on the architecture and security features of the industrial internet. Firstly, the paper studies the infrastructure mode that needs to be solved urgently in the industrial internet and provides a possible infrastructure mode and related security evaluation system. Secondly, it analyses the digital transformation process with the case of G.E.os industrial nternet development practice. It clarifies that G.E. is forming a new value closed-loop through digital and strategy mixed channels. Thirdly, industrial internet security research is described within multiple viewpoints based on industrial internet applications, the security service and security assurance defense systemos architecture, and the non-user entrance probability model. Finally, the paper illustrates the changes in knowledge workflow and social collaboration caused by the industrial internet under intelligent manufacture.}, } @article {pmid35230953, year = {2023}, author = {Wang, X and Ren, L and Yuan, R and Yang, LT and Deen, MJ}, title = {QTT-DLSTM: A Cloud-Edge-Aided Distributed LSTM for Cyber-Physical-Social Big Data.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {34}, number = {10}, pages = {7286-7298}, doi = {10.1109/TNNLS.2022.3140238}, pmid = {35230953}, issn = {2162-2388}, abstract = {Cyber-physical-social systems (CPSS), an emerging cross-disciplinary research area, combines cyber-physical systems (CPS) with social networking for the purpose of providing personalized services for humans. CPSS big data, recording various aspects of human lives, should be processed to mine valuable information for CPSS services. To efficiently deal with CPSS big data, artificial intelligence (AI), an increasingly important technology, is used for CPSS data processing and analysis. Meanwhile, the rapid development of edge devices with fast processors and large memories allows local edge computing to be a powerful real-time complement to global cloud computing. Therefore, to facilitate the processing and analysis of CPSS big data from the perspective of multi-attributes, a cloud-edge-aided quantized tensor-train distributed long short-term memory (QTT-DLSTM) method is presented in this article. First, a tensor is used to represent the multi-attributes CPSS big data, which will be decomposed into the QTT form to facilitate distributed training and computing. Second, a distributed cloud-edge computing model is used to systematically process the CPSS data, including global large-scale data processing in the cloud, and local small-scale data processed at the edge. Third, a distributed computing strategy is used to improve the efficiency of training via partitioning the weight matrix and large amounts of input data in the QTT form. Finally, the performance of the proposed QTT-DLSTM method is evaluated using experiments on a public discrete manufacturing process dataset, the Li-ion battery dataset, and a public social dataset.}, } @article {pmid35228830, year = {2022}, author = {Verma, A and Agarwal, G and Gupta, AK}, title = {A novel generalized fuzzy intelligence-based ant lion optimization for internet of things based disease prediction and diagnosis.}, journal = {Cluster computing}, volume = {25}, number = {5}, pages = {3283-3298}, pmid = {35228830}, issn = {1386-7857}, abstract = {In the modern healthcare system, the function of the Internet of Things (IoT) and the data mining methods with cloud computing plays an essential role in controlling a large number of big data for predicting and diagnosing various categories of diseases. However, when the patients suffer from more than one disease, the physician may not identify it properly. Therefore, in this research, the predictive method using the cloud with IoT-based database is proposed for forecasting the diseases that utilized the biosensors to estimate the constraints of patients. In addition, a novel Generalized Fuzzy Intelligence-based Ant Lion Optimization (GFIbALO) classifier along with a regression rule is proposed for predicting the diseases accurately. Initially, the dataset is filtered and feature extracted using the regression rule that data is processed on the proposed GFIbALO approach for classifying diseases. Moreover, suppose the patient has been affected by any diseases, in that case, the warning signal will be alerted to the patients via text or any other way, and the patients can get advice from doctors or any other medical support. The implementation of the proposed GFIbALO classifier is done with the use of the MATLAB tool. Subsequently, the results from the presented model are compared with state of the art techniques, and it shows that the presented method is more beneficial in diagnosis and disease forecast.}, } @article {pmid35225946, year = {2022}, author = {Celesti, A and Cimino, V and Naro, A and Portaro, S and Fazio, M and Villari, M and Calabró, RS}, title = {Recent Considerations on Gaming Console Based Training for Multiple Sclerosis Rehabilitation.}, journal = {Medical sciences (Basel, Switzerland)}, volume = {10}, number = {1}, pages = {}, pmid = {35225946}, issn = {2076-3271}, mesh = {Humans ; *Multiple Sclerosis/therapy ; Pilot Projects ; Postural Balance/physiology ; Quality of Life ; *Video Games ; Young Adult ; }, abstract = {Multiple Sclerosis (MS) is a well-known, chronic demyelinating disease of the Central Nervous System (CNS) and one of the most common causes of disability in young adults. In this context, one of the major challenges in patients' rehabilitation is to maintain the gained motor abilities in terms of functional independence. This could be partially obtained by applying new emerging and cutting-edge virtual/augmented reality and serious game technologies for a playful, noninvasive treatment that was demonstrated to be quite efficient and effective in enhancing the clinical status of patients and their (re)integration into society. Recently, Cloud computing and Internet of Things (IoT) emerged as technologies that can potentially revolutionize patients' care. To achieve such a goal, a system that on one hand gathers patients' clinical parameters through a network of medical IoT devices equipped with sensors and that, on the other hand, sends the collected data to a hospital Cloud for processing and analytics is required. In this paper, we assess the effectiveness of a Nintendo Wii Fit[®] Plus Balance Board (WFBB) used as an IoT medical device adopted in a rehabilitation training program aimed at improving the physical abilities of MS patients (pwMS). In particular, the main scientific contribution of this paper is twofold: (i) to present a preliminary new pilot study investigating whether exercises based on the Nintendo Wii Fit[®] balance board included in a rehabilitation training program could improve physical abilities and Quality of Life (QoL) of patients compared to that of a conventional four-week rehabilitation training program; (ii) to discuss how such a rehabilitation training program could be adopted in the perspective of near future networks of medical IoT-based rehabilitation devices, interconnected with a hospital Cloud system for big data processing to improve patients' therapies and support the scientific research about motor rehabilitation. Results demonstrate the advantages of our approach from both health and technological points of view.}, } @article {pmid35224941, year = {2022}, author = {Wang, H and Chen, WB and He, L and Li, HF}, title = {[Responses of aquatic vegetation coverage to interannual variations of water level in different hydrologically connected sub-lakes of Poyang Lake, China].}, journal = {Ying yong sheng tai xue bao = The journal of applied ecology}, volume = {33}, number = {1}, pages = {191-200}, doi = {10.13287/j.1001-9332.202201.013}, pmid = {35224941}, issn = {1001-9332}, mesh = {China ; *Ecosystem ; Floods ; Hydrology ; *Lakes ; Water ; }, abstract = {The variation of water level is the main environmental factor controlling the growth of aquatic vegetation. It is of significance to understand its influences on aquatic vegetation coverage in sub-lakes under different hydrolo-gical control modes. Taking the free connected sub-lake Bang Lake and locally controlled sub-lake Dahuchi Lake of Poyang Lake as a case and based on remote sensing cloud computing platform of the Google Earth Engine (GEE), we used the pixel binary model to estimate aquatic vegetation coverage from 2000 to 2019, and analyzed the temporal and spatial differentiation characteristics, and the variation trend was simulated by combining the method of Sen+M-K. We analyzed the water level change characteristics during the study period and the relationship between the hydrological parameters and the aquatic vegetation coverage area of sub-lakes with different hydrological connectivity was explored by setting up the water level fluctuation parameters. The results showed that the aquatic vegetation coverage of Bang Lake was more susceptible to water level changes, while Dahuchi Lake was more stable. The aquatic vegetation was patchily and sporadically distributed in the years with low vegetation coverage. In the years with high vegetation coverage, it was distributed in a ring-like pattern, spreading from the center of the lake to the shore. The aquatic vegetation coverage of Bang Lake was more likely influenced by water level fluctuation rate, while the aquatic vegetation coverage of Dahuchi Lake was more likely influenced by the flooding duration of 17 m characteristic water level. The flooding duration of 19 m characteristic water level had a strong negative correlation with the aquatic vegetation coverage of Bang Lake and Dahuchi Lake. The trend of aquatic vegetation in Bang Lake was dominated by stabilization and slight improvement, while that in Dahuchi Lake was dominated by stabilization and significant degradation. Our results could help to further understand the dynamics of water hydrological ecosystem with different hydrological connectivity and provide a reference for lake management and conservation.}, } @article {pmid35224632, year = {2022}, author = {Bradshaw, RL and Kawamoto, K and Kaphingst, KA and Kohlmann, WK and Hess, R and Flynn, MC and Nanjo, CJ and Warner, PB and Shi, J and Morgan, K and Kimball, K and Ranade-Kharkar, P and Ginsburg, O and Goodman, M and Chambers, R and Mann, D and Narus, SP and Gonzalez, J and Loomis, S and Chan, P and Monahan, R and Borsato, EP and Shields, DE and Martin, DK and Kessler, CM and Del Fiol, G}, title = {GARDE: a standards-based clinical decision support platform for identifying population health management cohorts.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {5}, pages = {928-936}, pmid = {35224632}, issn = {1527-974X}, support = {R18 DK123372/DK/NIDDK NIH HHS/United States ; U01 CA232826/CA/NCI NIH HHS/United States ; U24 CA204800/CA/NCI NIH HHS/United States ; }, mesh = {*Decision Support Systems, Clinical ; Delivery of Health Care ; Electronic Health Records ; Humans ; Information Storage and Retrieval ; *Population Health Management ; }, abstract = {UNLABELLED: Population health management (PHM) is an important approach to promote wellness and deliver health care to targeted individuals who meet criteria for preventive measures or treatment. A critical component for any PHM program is a data analytics platform that can target those eligible individuals.

OBJECTIVE: The aim of this study was to design and implement a scalable standards-based clinical decision support (CDS) approach to identify patient cohorts for PHM and maximize opportunities for multi-site dissemination.

MATERIALS AND METHODS: An architecture was established to support bidirectional data exchanges between heterogeneous electronic health record (EHR) data sources, PHM systems, and CDS components. HL7 Fast Healthcare Interoperability Resources and CDS Hooks were used to facilitate interoperability and dissemination. The approach was validated by deploying the platform at multiple sites to identify patients who meet the criteria for genetic evaluation of familial cancer.

RESULTS: The Genetic Cancer Risk Detector (GARDE) platform was created and is comprised of four components: (1) an open-source CDS Hooks server for computing patient eligibility for PHM cohorts, (2) an open-source Population Coordinator that processes GARDE requests and communicates results to a PHM system, (3) an EHR Patient Data Repository, and (4) EHR PHM Tools to manage patients and perform outreach functions. Site-specific deployments were performed on onsite virtual machines and cloud-based Amazon Web Services.

DISCUSSION: GARDE's component architecture establishes generalizable standards-based methods for computing PHM cohorts. Replicating deployments using one of the established deployment methods requires minimal local customization. Most of the deployment effort was related to obtaining site-specific information technology governance approvals.}, } @article {pmid35218029, year = {2022}, author = {Fan, ZG and Tian, NL and He, SH and Ma, GS}, title = {Maintained P2Y12 inhibitor monotherapy after shorter-duration of dual antiplatelet therapy in patients undergoing coronary drug-eluting stents implantation: An updated meta-analysis of randomized trials.}, journal = {Journal of clinical pharmacy and therapeutics}, volume = {47}, number = {7}, pages = {860-869}, doi = {10.1111/jcpt.13626}, pmid = {35218029}, issn = {1365-2710}, support = {ZDXKA2016023//Jiangsu Provincial Key Medical Discipline/ ; }, mesh = {Drug Therapy, Combination ; *Drug-Eluting Stents/adverse effects ; Humans ; *Myocardial Infarction/drug therapy ; *Percutaneous Coronary Intervention/methods ; Platelet Aggregation Inhibitors/adverse effects ; Randomized Controlled Trials as Topic ; *Stroke/etiology/prevention & control ; *Thrombosis/drug therapy ; Treatment Outcome ; }, abstract = {WHAT IS KNOWN AND OBJECTIVE: It is well known that high in-stent thrombotic risk due to the superimposition of a platelet-rich thrombus was considered as the main origin of major adverse cardiac events after stent implantation. The clinical management of antiplatelet therapy strategy after percutaneous coronary intervention (PCI) remains controversial. This study is sought to explore the efficacy and safety of a maintained P2Y12 inhibitor monotherapy after shorter-duration of dual antiplatelet therapy (DAPT) in these patients.

METHODS: Medline, Google Scholar, Web of Science, and the Cochrane Controlled Trials Registry were searched online for retrieving eligible citations. A composite of all-cause death, myocardial infarction (MI) and stroke was defined as major adverse cardio- and cerebro-vascular events (MACCE), which is analysed as the primary efficacy endpoint. The risk of bleeding events was chosen as safety endpoint.

RESULTS: Five randomized clinical trials (RCT) with 32,143 patients were finally analysed. A maintained P2Y12 inhibitor monotherapy after shorter-duration of DAPT cloud not only reduce the incidence of MACCE [odds ratios (OR): 0.89, 95% confidence intervals (CI): 0.79-0.99, p = 0.037], but also the bleeding risk (OR 0.61, 95% CI: 0.44-0.85, p = 0.003). No higher incidence of any ischaemic events, including MI, stroke or definite stent thrombosis (ST) was observed with respect to this new antiplatelet therapy option.

CONCLUSIONS: A maintained P2Y12 inhibitor monotherapy after shorter-duration of DAPT was suggested as a more preferable antiplatelet therapy option in patients undergoing coronary drug-eluting stents (DES) placement. Larger and more powerful randomized trials with precise sub-analyses are still necessary for further confirming these relevant benefits.}, } @article {pmid35214574, year = {2022}, author = {Zubair, AA and Razak, SA and Ngadi, MA and Al-Dhaqm, A and Yafooz, WMS and Emara, AM and Saad, A and Al-Aqrabi, H}, title = {A Cloud Computing-Based Modified Symbiotic Organisms Search Algorithm (AI) for Optimal Task Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214574}, issn = {1424-8220}, support = {RSP-2021/260//King Saud University/ ; }, mesh = {Algorithms ; Artificial Intelligence ; *Cloud Computing ; Ecosystem ; *Symbiosis ; }, abstract = {The search algorithm based on symbiotic organisms' interactions is a relatively recent bio-inspired algorithm of the swarm intelligence field for solving numerical optimization problems. It is meant to optimize applications based on the simulation of the symbiotic relationship among the distinct species in the ecosystem. The task scheduling problem is NP complete, which makes it hard to obtain a correct solution, especially for large-scale tasks. This paper proposes a modified symbiotic organisms search-based scheduling algorithm for the efficient mapping of heterogeneous tasks to access cloud resources of different capacities. The significant contribution of this technique is the simplified representation of the algorithm's mutualism process, which uses equity as a measure of relationship characteristics or efficiency of species in the current ecosystem to move to the next generation. These relational characteristics are achieved by replacing the original mutual vector, which uses an arithmetic mean to measure the mutual characteristics with a geometric mean that enhances the survival advantage of two distinct species. The modified symbiotic organisms search algorithm (G_SOS) aims to minimize the task execution time (makespan), cost, response time, and degree of imbalance, and improve the convergence speed for an optimal solution in an IaaS cloud. The performance of the proposed technique was evaluated using a CloudSim toolkit simulator, and the percentage of improvement of the proposed G_SOS over classical SOS and PSO-SA in terms of makespan minimization ranges between 0.61-20.08% and 1.92-25.68% over a large-scale task that spans between 100 to 1000 Million Instructions (MI). The solutions are found to be better than the existing standard (SOS) technique and PSO.}, } @article {pmid35214506, year = {2022}, author = {Chen, L and Lu, Y and He, Z and Chen, Y}, title = {Online Trajectory Estimation Based on a Network-Wide Cellular Fingerprint Map.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214506}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {Cellular signaling data is widely available in mobile communications and contains abundant movement sensing information of individual travelers. Using cellular signaling data to estimate the trajectories of mobile users can benefit many location-based applications, including infectious disease tracing and screening, network flow sensing, traffic scheduling, etc. However, conventional methods rely too much on heuristic hypotheses or hardware-dependent network fingerprinting approaches. To address the above issues, NF-Track (Network-wide Fingerprinting based Tracking) is proposed to realize accurate online map-matching of cellular location sequences. In particular, neither prior assumptions such as arterial preference and less-turn preference or extra hardware-relevant parameters such as RSS and SNR are required for the proposed framework. Therefore, it has a strong generalization ability to be flexibly deployed in the cloud computing environment of telecom operators. In this architecture, a novel segment-granularity fingerprint map is put forward to provide sufficient prior knowledge. Then, a real-time trajectory estimation process is developed for precise positioning and tracking. In our experiments implemented on the urban road network, NF-Track can achieve a recall rate of 91.68% and a precision rate of 90.35% in sophisticated traffic scenes, which are superior to the state-of-the-art model-based unsupervised learning approaches.}, } @article {pmid35214456, year = {2022}, author = {Yin, Z and Xu, F and Li, Y and Fan, C and Zhang, F and Han, G and Bi, Y}, title = {A Multi-Objective Task Scheduling Strategy for Intelligent Production Line Based on Cloud-Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214456}, issn = {1424-8220}, support = {2017YFE0125300//National Key R&D Program of China/ ; }, abstract = {With the widespread use of industrial Internet technology in intelligent production lines, the number of task requests generated by smart terminals is growing exponentially. Achieving rapid response to these massive tasks becomes crucial. In this paper we focus on the multi-objective task scheduling problem of intelligent production lines and propose a task scheduling strategy based on task priority. First, we set up a cloud-fog computing architecture for intelligent production lines and built the multi-objective function for task scheduling, which minimizes the service delay and energy consumption of the tasks. In addition, the improved hybrid monarch butterfly optimization and improved ant colony optimization algorithm (HMA) are used to search for the optimal task scheduling scheme. Finally, HMA is evaluated by rigorous simulation experiments, showing that HMA outperformed other algorithms in terms of task completion rate. When the number of nodes exceeds 10, the completion rate of all tasks is greater than 90%, which well meets the real-time requirements of the corresponding tasks in the intelligent production lines. In addition, the algorithm outperforms other algorithms in terms of maximum completion rate and power consumption.}, } @article {pmid35214384, year = {2022}, author = {Shaukat, M and Alasmary, W and Alanazi, E and Shuja, J and Madani, SA and Hsu, CH}, title = {Balanced Energy-Aware and Fault-Tolerant Data Center Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214384}, issn = {1424-8220}, abstract = {Fault tolerance, performance, and throughput have been major areas of research and development since the evolution of large-scale networks. Internet-based applications are rapidly growing, including large-scale computations, search engines, high-definition video streaming, e-commerce, and video on demand. In recent years, energy efficiency and fault tolerance have gained significant importance in data center networks and various studies directed the attention towards green computing. Data centers consume a huge amount of energy and various architectures and techniques have been proposed to improve the energy efficiency of data centers. However, there is a tradeoff between energy efficiency and fault tolerance. The objective of this study is to highlight a better tradeoff between the two extremes: (a) high energy efficiency and (b) ensuring high availability through fault tolerance and redundancy. The main objective of the proposed Energy-Aware Fault-Tolerant (EAFT) approach is to keep one level of redundancy for fault tolerance while scheduling resources for energy efficiency. The resultant energy-efficient data center network provides availability as well as fault tolerance at reduced operating cost. The main contributions of this article are: (a) we propose an Energy-Aware Fault-Tolerant (EAFT) data center network scheduler; (b) we compare EAFT with energy efficient resource scheduling techniques to provide analysis of parameters such as, workload distribution, average task per servers, and energy consumption; and (c) we highlight effects of energy efficiency techniques on the network performance of the data center.}, } @article {pmid35214297, year = {2022}, author = {Kareem, SS and Mostafa, RR and Hashim, FA and El-Bakry, HM}, title = {An Effective Feature Selection Model Using Hybrid Metaheuristic Algorithms for IoT Intrusion Detection.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214297}, issn = {1424-8220}, mesh = {Algorithms ; Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Machine Learning ; }, abstract = {The increasing use of Internet of Things (IoT) applications in various aspects of our lives has created a huge amount of data. IoT applications often require the presence of many technologies such as cloud computing and fog computing, which have led to serious challenges to security. As a result of the use of these technologies, cyberattacks are also on the rise because current security methods are ineffective. Several artificial intelligence (AI)-based security solutions have been presented in recent years, including intrusion detection systems (IDS). Feature selection (FS) approaches are required for the development of intelligent analytic tools that need data pretreatment and machine-learning algorithm-performance enhancement. By reducing the number of selected features, FS aims to improve classification accuracy. This article presents a new FS method through boosting the performance of Gorilla Troops Optimizer (GTO) based on the algorithm for bird swarms (BSA). This BSA is used to boost performance exploitation of GTO in the newly developed GTO-BSA because it has a strong ability to find feasible regions with optimal solutions. As a result, the quality of the final output will increase, improving convergence. GTO-BSA's performance was evaluated using a variety of performance measures on four IoT-IDS datasets: NSL-KDD, CICIDS-2017, UNSW-NB15 and BoT-IoT. The results were compared to those of the original GTO, BSA, and several state-of-the-art techniques in the literature. According to the findings of the experiments, GTO-BSA had a better convergence rate and higher-quality solutions.}, } @article {pmid35214282, year = {2022}, author = {Arikumar, KS and Prathiba, SB and Alazab, M and Gadekallu, TR and Pandya, S and Khan, JM and Moorthy, RS}, title = {FL-PMI: Federated Learning-Based Person Movement Identification through Wearable Devices in Smart Healthcare Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214282}, issn = {1424-8220}, mesh = {Artificial Intelligence ; Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; *Wearable Electronic Devices ; }, abstract = {Recent technological developments, such as the Internet of Things (IoT), artificial intelligence, edge, and cloud computing, have paved the way in transforming traditional healthcare systems into smart healthcare (SHC) systems. SHC escalates healthcare management with increased efficiency, convenience, and personalization, via use of wearable devices and connectivity, to access information with rapid responses. Wearable devices are equipped with multiple sensors to identify a person's movements. The unlabeled data acquired from these sensors are directly trained in the cloud servers, which require vast memory and high computational costs. To overcome this limitation in SHC, we propose a federated learning-based person movement identification (FL-PMI). The deep reinforcement learning (DRL) framework is leveraged in FL-PMI for auto-labeling the unlabeled data. The data are then trained using federated learning (FL), in which the edge servers allow the parameters alone to pass on the cloud, rather than passing vast amounts of sensor data. Finally, the bidirectional long short-term memory (BiLSTM) in FL-PMI classifies the data for various processes associated with the SHC. The simulation results proved the efficiency of FL-PMI, with 99.67% accuracy scores, minimized memory usage and computational costs, and reduced transmission data by 36.73%.}, } @article {pmid35214212, year = {2022}, author = {Alkhateeb, A and Catal, C and Kar, G and Mishra, A}, title = {Hybrid Blockchain Platforms for the Internet of Things (IoT): A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {35214212}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Delivery of Health Care ; Information Dissemination ; *Internet of Things ; }, abstract = {In recent years, research into blockchain technology and the Internet of Things (IoT) has grown rapidly due to an increase in media coverage. Many different blockchain applications and platforms have been developed for different purposes, such as food safety monitoring, cryptocurrency exchange, and secure medical data sharing. However, blockchain platforms cannot store all the generated data. Therefore, they are supported with data warehouses, which in turn is called a hybrid blockchain platform. While several systems have been developed based on this idea, a current state-of-the-art systematic overview on the use of hybrid blockchain platforms is lacking. Therefore, a systematic literature review (SLR) study has been carried out by us to investigate the motivations for adopting them, the domains at which they were used, the adopted technologies that made this integration effective, and, finally, the challenges and possible solutions. This study shows that security, transparency, and efficiency are the top three motivations for adopting these platforms. The energy, agriculture, health, construction, manufacturing, and supply chain domains are the top domains. The most adopted technologies are cloud computing, fog computing, telecommunications, and edge computing. While there are several benefits of using hybrid blockchains, there are also several challenges reported in this study.}, } @article {pmid35207676, year = {2022}, author = {Lee, J and Jeong, J and Jung, S and Moon, J and Rho, S}, title = {Verification of De-Identification Techniques for Personal Information Using Tree-Based Methods with Shapley Values.}, journal = {Journal of personalized medicine}, volume = {12}, number = {2}, pages = {}, pmid = {35207676}, issn = {2075-4426}, support = {P0008703//Korea Institute for Advancement of Technology/ ; 2021-2018-0-01799//Institute for Information and Communications Technology Promotion/ ; }, abstract = {With the development of big data and cloud computing technologies, the importance of pseudonym information has grown. However, the tools for verifying whether the de-identification methodology is correctly applied to ensure data confidentiality and usability are insufficient. This paper proposes a verification of de-identification techniques for personal healthcare information by considering data confidentiality and usability. Data are generated and preprocessed by considering the actual statistical data, personal information datasets, and de-identification datasets based on medical data to represent the de-identification technique as a numeric dataset. Five tree-based regression models (i.e., decision tree, random forest, gradient boosting machine, extreme gradient boosting, and light gradient boosting machine) are constructed using the de-identification dataset to effectively discover nonlinear relationships between dependent and independent variables in numerical datasets. Then, the most effective model is selected from personal information data in which pseudonym processing is essential for data utilization. The Shapley additive explanation, an explainable artificial intelligence technique, is applied to the most effective model to establish pseudonym processing policies and machine learning to present a machine-learning process that selects an appropriate de-identification methodology.}, } @article {pmid35205036, year = {2022}, author = {Li, Z and Gurgel, H and Xu, L and Yang, L and Dong, J}, title = {Improving Dengue Forecasts by Using Geospatial Big Data Analysis in Google Earth Engine and the Historical Dengue Information-Aided Long Short Term Memory Modeling.}, journal = {Biology}, volume = {11}, number = {2}, pages = {}, pmid = {35205036}, issn = {2079-7737}, support = {41801336//National Natural Science Foundation of China/ ; 42061134019//National Natural Science Foundation of China/ ; QYZDB-SSW-DQC005)//Key Research Program of Frontier Sciences of the Chinese Academy of Sciences/ ; E0V00110YZ//Institute of Geographic Sciences and Natural Resources Research (IGNSRR), Chinese Academy of Sciences (CAS)/ ; }, abstract = {Timely and accurate forecasts of dengue cases are of great importance for guiding disease prevention strategies, but still face challenges from (1) time-effectiveness due to time-consuming satellite data downloading and processing, (2) weak spatial representation capability due to data dependence on administrative unit-based statistics or weather station-based observations, and (3) stagnant accuracy without the application of historical case information. Geospatial big data, cloud computing platforms (e.g., Google Earth Engine, GEE), and emerging deep learning algorithms (e.g., long short term memory, LSTM) provide new opportunities for advancing these efforts. Here, we focused on the dengue epidemics in the urban agglomeration of the Federal District of Brazil (FDB) during 2007-2019. A new framework was proposed using geospatial big data analysis in the Google Earth Engine (GEE) platform and long short term memory (LSTM) modeling for dengue case forecasts over an epidemiological week basis. We first defined a buffer zone around an impervious area as the main area of dengue transmission by considering the impervious area as a human-dominated area and used the maximum distance of the flight range of Aedes aegypti and Aedes albopictus as a buffer distance. Those zones were used as units for further attribution analyses of dengue epidemics by aggregating the pixel values into the zones. The near weekly composite of potential driving factors was generated in GEE using the epidemiological weeks during 2007-2019, from the relevant geospatial data with daily or sub-daily temporal resolution. A multi-step-ahead LSTM model was used, and the time-differenced natural log-transformed dengue cases were used as outcomes. Two modeling scenarios (with and without historical dengue cases) were set to examine the potential of historical information on dengue forecasts. The results indicate that the performance was better when historical dengue cases were used and the 5-weeks-ahead forecast had the best performance, and the peak of a large outbreak in 2019 was accurately forecasted. The proposed framework in this study suggests the potential of the GEE platform, the LSTM algorithm, as well as historical information for dengue risk forecasting, which can easily be extensively applied to other regions or globally for timely and practical dengue forecasts.}, } @article {pmid35199087, year = {2022}, author = {Schatz, MC and Philippakis, AA and Afgan, E and Banks, E and Carey, VJ and Carroll, RJ and Culotti, A and Ellrott, K and Goecks, J and Grossman, RL and Hall, IM and Hansen, KD and Lawson, J and Leek, JT and Luria, AO and Mosher, S and Morgan, M and Nekrutenko, A and O'Connor, BD and Osborn, K and Paten, B and Patterson, C and Tan, FJ and Taylor, CO and Vessio, J and Waldron, L and Wang, T and Wuichet, K}, title = {Inverting the model of genomics data sharing with the NHGRI Genomic Data Science Analysis, Visualization, and Informatics Lab-space.}, journal = {Cell genomics}, volume = {2}, number = {1}, pages = {}, pmid = {35199087}, issn = {2666-979X}, support = {U24 HG006620/HG/NHGRI NIH HHS/United States ; U24 HG010262/HG/NHGRI NIH HHS/United States ; U24 HG010263/HG/NHGRI NIH HHS/United States ; }, abstract = {The NHGRI Genomic Data Science Analysis, Visualization, and Informatics Lab-space (AnVIL; https://anvilproject.org) was developed to address a widespread community need for a unified computing environment for genomics data storage, management, and analysis. In this perspective, we present AnVIL, describe its ecosystem and interoperability with other platforms, and highlight how this platform and associated initiatives contribute to improved genomic data sharing efforts. The AnVIL is a federated cloud platform designed to manage and store genomics and related data, enable population-scale analysis, and facilitate collaboration through the sharing of data, code, and analysis results. By inverting the traditional model of data sharing, the AnVIL eliminates the need for data movement while also adding security measures for active threat detection and monitoring and provides scalable, shared computing resources for any researcher. We describe the core data management and analysis components of the AnVIL, which currently consists of Terra, Gen3, Galaxy, RStudio/Bioconductor, Dockstore, and Jupyter, and describe several flagship genomics datasets available within the AnVIL. We continue to extend and innovate the AnVIL ecosystem by implementing new capabilities, including mechanisms for interoperability and responsible data sharing, while streamlining access management. The AnVIL opens many new opportunities for analysis, collaboration, and data sharing that are needed to drive research and to make discoveries through the joint analysis of hundreds of thousands to millions of genomes along with associated clinical and molecular data types.}, } @article {pmid35194579, year = {2022}, author = {Sengupta, K and Srivastava, PR}, title = {HRNET: AI-on-Edge for Mask Detection and Social Distancing Calculation.}, journal = {SN computer science}, volume = {3}, number = {2}, pages = {157}, pmid = {35194579}, issn = {2661-8907}, abstract = {The purpose of the paper is to provide innovative emerging technology framework for community to combat epidemic situations. The paper proposes a unique outbreak response system framework based on artificial intelligence and edge computing for citizen centric services to help track and trace people eluding safety policies like mask detection and social distancing measure in public or workplace setup. The framework further provides implementation guideline in industrial setup as well for governance and contact tracing tasks. The adoption will thus lead in smart city planning and development focusing on citizen health systems contributing to improved quality of life. The conceptual framework presented is validated through quantitative data analysis via secondary data collection from researcher's public websites, GitHub repositories and renowned journals and further benchmarking were conducted for experimental results in Microsoft Azure cloud environment. The study includes selective AI models for benchmark analysis and were assessed on performance and accuracy in edge computing environment for large-scale societal setup. Overall YOLO model outperforms in object detection task and is faster enough for mask detection and HRNetV2 outperform semantic segmentation problem applied to solve social distancing task in AI-Edge inferencing environmental setup. The paper proposes new Edge-AI algorithm for building technology-oriented solutions for detecting mask in human movement and social distance. The paper enriches the technological advancement in artificial intelligence and edge computing applied to problems in society and healthcare systems. The framework further equips government agency, system providers to design and construct technology-oriented models in community setup to increase the quality of life using emerging technologies into smart urban environments.}, } @article {pmid35193188, year = {2022}, author = {Hahn, M and Arthanayaka, T and Beiersdorfer, P and Brown, GV and Savin, DW}, title = {Ion energy distribution in an electron beam ion trap inferred from simulations of the trapped ion cloud.}, journal = {Physical review. E}, volume = {105}, number = {1-2}, pages = {015204}, doi = {10.1103/PhysRevE.105.015204}, pmid = {35193188}, issn = {2470-0053}, abstract = {We have inferred the energy distribution of trapped ions in an electron beam ion trap (EBIT) from simulations of the spatial distribution of Fe^{13+} ions and a comparison with measured visible light images of the ion cloud. We simulated the cloud of Fe^{13+} ions by computing ion trajectories in the EBIT for different ion energy distributions used to initialize the trajectories. We then performed a least-squares fit to infer the ion energy distribution that best reproduced the measured ion cloud. These best-fit distributions were typically non-Maxwellian. For electron beam energies of 395-475 eV and electron beam currents of 1-9 mA, we find that the average ion energy is in the range of 10-300 eV. We also find that the average ion energy increases with increasing beam current approximately as 〈E〉≈25I_{e}eV, where I_{e} is the electron beam current in mA. We have also compared our results to Maxwell-Boltzmann-distribution ion clouds. We find that our best-fit non-thermal distributions have an 〈E〉 that is less than half that of the T from the best-fit Maxwell-Boltzmann distributions (〈E〉/q)/T=0.41±0.05.}, } @article {pmid35186242, year = {2022}, author = {Yan, M and Yan, M}, title = {Monitoring and Early Warning Analysis of the Epidemic Situation of Escherichia coli Based on Big Data Technology and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {8739447}, pmid = {35186242}, issn = {2040-2309}, mesh = {*Big Data ; *Cloud Computing ; Escherichia coli ; Humans ; Technology ; }, abstract = {The purpose of this study is to analyze the molecular epidemiological characteristics and resistance mechanisms of Escherichia coli. The study established a big data cloud computing prediction model for the epidemic mechanism of the pathogen. The study establishes the early warning, control parameters, and mathematical model of Escherichia coli infectious disease and monitors the molecular sequence of the pathogen based on discrete indicators. A nonlinear mathematical model equation was used to establish the epidemic trend model of Escherichia coli. The study shows that the use of the model can control the relative error at about 5%. The experiment proves the effectiveness of the combined model.}, } @article {pmid35186061, year = {2022}, author = {Yin, H}, title = {Public Security Video Image Detection System Construction Platform in Cloud Computing Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4113803}, pmid = {35186061}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; Data Collection ; }, abstract = {The public security image detection system is an important way to assist the police in the investigation. In today's cloud computing environment, the processing power of cloud computing is gradually improving. In order to explore its application in the investigation system, this paper constructs a public security video image investigation system based on cloud computing environment. This paper uses cloud computing technology to improve the processing capacity of the system. It then combines the storage capabilities of the backend server implementation technology and the working principle of Hadoop to construct a basic model. Then combined with the storage capability of the backend server implementation technology and the working principle of Hadoop and CP-ABE encryption, decryption, and reconstruction, a basic model is constructed. This paper also designs public security video surveillance system vehicle detection and test experiments, cloud storage encryption algorithm experiments, and computational storage requirements analysis experiments. It optimizes the system based on the results of the experiment and finally compares it with the traditional investigation system. The experimental results show that the public security video image detection system based on cloud computing can improve the accuracy by 5%-25% compared with the traditional detection system. And the public security video image detection system based on cloud computing can increase the efficiency by 2%-17% compared with the traditional detection system.}, } @article {pmid35180272, year = {2022}, author = {Nai, R}, title = {The design of smart classroom for modern college English teaching under Internet of Things.}, journal = {PloS one}, volume = {17}, number = {2}, pages = {e0264176}, doi = {10.1371/journal.pone.0264176}, pmid = {35180272}, issn = {1932-6203}, mesh = {Academic Performance ; Computer-Assisted Instruction/*methods ; Humans ; *Internet of Things ; *Language ; Students/psychology ; }, abstract = {This study aims to improve the efficiency of modern college English teaching. With interactive teaching as the core element, smart classrooms as technical support, and informationization, automation, and interaction as the main body, a smart system for college English teaching is established based on cloud computing and Internet of Things (IoT). The system is built using the B/S architecture and verified by specific example data, to prove the effectiveness of the proposed smart system for college English teaching based on the IoT. It is found that the smart platform for English teaching based on the IoT not only effectively improves the stability of the system, but also enhances the personal experience of students. The coordinated operation of the various modules reduces the response time of the system. When the number of users reaches 500, the average response time of the system is 3.65 seconds, and the memory and occupancy rate of the system are reduced. Students who receive smart classrooms for teaching have a greater improvement in the test results of various aspects of English without teacher intervention. The proposed model can significantly improve the performance of poor students and reduce the gap in learning performance in the class, which provides reliable research ideas for smart teaching in modern colleges and universities.}, } @article {pmid35177634, year = {2022}, author = {Li, H and Li, M}, title = {Patent data access control and protection using blockchain technology.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {2772}, pmid = {35177634}, issn = {2045-2322}, abstract = {The purposes are to develop the patent data profoundly, control the data access process effectively, and protect the patent information and content. The traditional patent review systems are analyzed. For the present patent data security and privacy protection technologies and algorithms, the patent information data are stored on different block nodes after data fragmentation using blockchain technology. Then the data are shared using the data encryption algorism. In this way, data access control can be restricted to particular users. Finally, a patent data protection scheme based on privacy protection is proposed. The security of the scheme and the model performance are verified through simulation experiments. The time required to encrypt 10 MB files with 64-bit and 128-bit data is 35 ms and 105 ms, respectively. The proposed re-encryption algorithm only needs 1 s to decrypt 64 KB data, and only 1% of the data needs asymmetric encryption. This greatly reduces the computational overhead of encryption. Results demonstrate that the system can effectively control the access methods of users, efficiently protect the personal privacy and patent content of patent applicants, and reduce the patent office cloud computing overhead using the local resources of branches. The distributed storage methods can reduce the cloud system interaction of the patent office, thereby greatly improving the speed of encryption and ensuring data security. Compared with the state of the art methods, the proposed patent data access and protection system based on blockchain technology have greater advantages in data security and model performance. The research results can provide a research foundation and practical value for the protection and review systems of patent data.}, } @article {pmid35175690, year = {2022}, author = {Mangold, KE and Zhou, Z and Schoening, M and Moreno, JD and Silva, JR}, title = {Creating Ion Channel Kinetic Models Using Cloud Computing.}, journal = {Current protocols}, volume = {2}, number = {2}, pages = {e374}, pmid = {35175690}, issn = {2691-1299}, support = {R01 HL136553/HL/NHLBI NIH HHS/United States ; /NH/NIH HHS/United States ; R01HL136553/HB/NHLBI NIH HHS/United States ; T32-HL134635/HB/NHLBI NIH HHS/United States ; T32 HL134635/HL/NHLBI NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computer Simulation ; *Ion Channels/metabolism ; Kinetics ; Software ; }, abstract = {Computational modeling of ion channels provides key insight into experimental electrophysiology results and can be used to connect channel dynamics to emergent phenomena observed at the tissue and organ levels. However, creation of these models requires substantial mathematical and computational background. This tutorial seeks to lower the barrier to creating these models by providing an automated pipeline for creating Markov models of an ion channel kinetics dataset. We start by detailing how to encode sample voltage-clamp protocols and experimental data into the program and its implementation in a cloud computing environment. We guide the reader on how to build a containerized instance, push the machine image, and finally run the routine on cluster nodes. While providing open-source code has become more standard in computational studies, this tutorial provides unprecedented detail on the use of the program and the creation of channel models, starting from inputting the raw experimental data. © 2022 Wiley Periodicals LLC. Basic Protocol: Creation of ion channel kinetic models with a cloud computing environment Alternate Protocol: Instructions for use in a standard high-performance compute cluster.}, } @article {pmid35174762, year = {2022}, author = {Sheeba, A and Padmakala, S and Subasini, CA and Karuppiah, SP}, title = {MKELM: Mixed Kernel Extreme Learning Machine using BMDA optimization for web services based heart disease prediction in smart healthcare.}, journal = {Computer methods in biomechanics and biomedical engineering}, volume = {25}, number = {10}, pages = {1180-1194}, doi = {10.1080/10255842.2022.2034795}, pmid = {35174762}, issn = {1476-8259}, mesh = {Algorithms ; Amines ; Delivery of Health Care ; *Heart Diseases ; Humans ; *Machine Learning ; }, abstract = {In recent years, cardiovascular disease becomes a prominent source of death. The web services connect other medical equipments and the computers via internet for exchanging and combining the data in novel ways. The accurate prediction of heart disease is important to prevent cardiac patients prior to heart attack. The main drawback of heart disease is delay in identifying the disease in the early stage. This objective is obtained by using the machine learning method with rich healthcare information on heart diseases. In this paper, the smart healthcare method is proposed for the prediction of heart disease using Biogeography optimization algorithm and Mexican hat wavelet to enhance Dragonfly algorithm optimization with mixed kernel based extreme learning machine (BMDA-MKELM) approach. Here, data is gathered from the two devices such as sensor nodes as well as the electronic medical records. The android based design is utilized to gather the patient data and the reliable cloud-based scheme for the data storage. For further evaluation for the prediction of heart disease, data are gathered from cloud computing services. At last, BMDA-MKELM based prediction scheme is capable to classify cardiovascular diseases. In addition to this, the proposed prediction scheme is compared with another method with respect to measures such as accuracy, precision, specificity, and sensitivity. The experimental results depict that the proposed approach achieves better results for the prediction of heart disease when compared with other methods.}, } @article {pmid35174270, year = {2022}, author = {Sang, Y and Cheng, J and Wang, B and Chen, M}, title = {A three-stage heuristic task scheduling for optimizing the service level agreement satisfaction in device-edge-cloud cooperative computing.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e851}, pmid = {35174270}, issn = {2376-5992}, abstract = {Device-edge-cloud cooperative computing is increasingly popular as it can effectively address the problem of the resource scarcity of user devices. It is one of the most challenging issues to improve the resource efficiency by task scheduling in such computing environments. Existing works used limited resources of devices and edge servers in preference, which can lead to not full use of the abundance of cloud resources. This article studies the task scheduling problem to optimize the service level agreement satisfaction in terms of the number of tasks whose hard-deadlines are met for device-edge-cloud cooperative computing. This article first formulates the problem into a binary nonlinear programming, and then proposes a heuristic scheduling method with three stages to solve the problem in polynomial time. The first stage is trying to fully exploit the abundant cloud resources, by pre-scheduling user tasks in the resource priority order of clouds, edge servers, and local devices. In the second stage, the proposed heuristic method reschedules some tasks from edges to devices, to provide more available shared edge resources for other tasks cannot be completed locally, and schedules these tasks to edge servers. At the last stage, our method reschedules as many tasks as possible from clouds to edges or devices, to improve the resource cost. Experiment results show that our method has up to 59% better performance in service level agreement satisfaction without decreasing the resource efficiency, compared with eight of classical methods and state-of-the-art methods.}, } @article {pmid35171378, year = {2022}, author = {Zhang, L and Hu, Q and Tang, Z}, title = {Assessing the contemporary status of Nebraska's eastern saline wetlands by using a machine learning algorithm on the Google Earth Engine cloud computing platform.}, journal = {Environmental monitoring and assessment}, volume = {194}, number = {3}, pages = {193}, pmid = {35171378}, issn = {1573-2959}, mesh = {Cloud Computing ; *Ecosystem ; Environmental Monitoring/methods ; Machine Learning ; Nebraska ; Search Engine ; Soil ; *Wetlands ; }, abstract = {Nebraska's eastern saline wetlands are globally unique and highly vulnerable inland salt marsh ecosystems. This research aims to evaluate the status of the saline wetlands in eastern Nebraska to discover the conditions of saline wetland hydrology, hydrophytes, and hydraulic soil. The research adopts machine learning and Google Earth Engine to classify Sentinel-2 imagery for water and vegetation classification and the National Agriculture Imagery Program imagery for salinity conditions. Six machine learning models are applied in water, soil, and vegetation detection in the study area. The optimal model (linear kernel SVM) generates an overall accuracy of 99.95% for water classification. For saline vegetation classification, the optimal model is the gradient tree boost with an overall accuracy of 94.07%. The overall accuracy values of saline soil classification using the optimal model (linear kernel SVM) varied among different years. The results of this study show the possibility of an observation approach for continuously monitoring Nebraska's eastern saline wetlands. The water classification results show that the saline wetlands in this area all have a similar temporal water cover pattern within each year. For saline vegetation, the peak season in this area is between June and July. The years 2019 (19.00%) and 2018 (17.69%) had higher saline vegetation cover rates than 2017 (10.54%). The saline soil classification shows that the saline soil area is highly variable in response to changes in the water and vegetation conditions. The research findings provide solid scientific evidence for conservation decision-making in these saline wetland areas.}, } @article {pmid35166338, year = {2022}, author = {Fahrner, M and Föll, MC and Grüning, BA and Bernt, M and Röst, H and Schilling, O}, title = {Democratizing data-independent acquisition proteomics analysis on public cloud infrastructures via the Galaxy framework.}, journal = {GigaScience}, volume = {11}, number = {}, pages = {}, pmid = {35166338}, issn = {2047-217X}, mesh = {*Computational Biology/methods ; Mass Spectrometry ; *Proteomics/methods ; Reproducibility of Results ; Software ; }, abstract = {BACKGROUND: Data-independent acquisition (DIA) has become an important approach in global, mass spectrometric proteomic studies because it provides in-depth insights into the molecular variety of biological systems. However, DIA data analysis remains challenging owing to the high complexity and large data and sample size, which require specialized software and vast computing infrastructures. Most available open-source DIA software necessitates basic programming skills and covers only a fraction of a complete DIA data analysis. In consequence, DIA data analysis often requires usage of multiple software tools and compatibility thereof, severely limiting the usability and reproducibility.

FINDINGS: To overcome this hurdle, we have integrated a suite of open-source DIA tools in the Galaxy framework for reproducible and version-controlled data processing. The DIA suite includes OpenSwath, PyProphet, diapysef, and swath2stats. We have compiled functional Galaxy pipelines for DIA processing, which provide a web-based graphical user interface to these pre-installed and pre-configured tools for their use on freely accessible, powerful computational resources of the Galaxy framework. This approach also enables seamless sharing workflows with full configuration in addition to sharing raw data and results. We demonstrate the usability of an all-in-one DIA pipeline in Galaxy by the analysis of a spike-in case study dataset. Additionally, extensive training material is provided to further increase access for the proteomics community.

CONCLUSION: The integration of an open-source DIA analysis suite in the web-based and user-friendly Galaxy framework in combination with extensive training material empowers a broad community of researches to perform reproducible and transparent DIA data analysis.}, } @article {pmid35165304, year = {2022}, author = {Touma, S and Antaki, F and Duval, R}, title = {Development of a code-free machine learning model for the classification of cataract surgery phases.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {2398}, pmid = {35165304}, issn = {2045-2322}, mesh = {Cataract Extraction/methods/*standards ; Deep Learning ; Humans ; Lens, Crystalline/*surgery ; *Machine Learning ; Ophthalmology/*standards ; }, abstract = {This study assessed the performance of automated machine learning (AutoML) in classifying cataract surgery phases from surgical videos. Two ophthalmology trainees without coding experience designed a deep learning model in Google Cloud AutoML Video Classification for the classification of 10 different cataract surgery phases. We used two open-access publicly available datasets (total of 122 surgeries) for model training, validation and testing. External validation was performed on 10 surgeries issued from another dataset. The AutoML model demonstrated excellent discriminating performance, even outperforming bespoke deep learning models handcrafter by experts. The area under the precision-recall curve was 0.855. At the 0.5 confidence threshold cut-off, the overall performance metrics were as follows: sensitivity (81.0%), recall (77.1%), accuracy (96.0%) and F1 score (0.79). The per-segment metrics varied across the surgical phases: precision 66.7-100%, recall 46.2-100% and specificity 94.1-100%. Hydrodissection and phacoemulsification were the most accurately predicted phases (100 and 92.31% correct predictions, respectively). During external validation, the average precision was 54.2% (0.00-90.0%), the recall was 61.1% (0.00-100%) and specificity was 96.2% (91.0-99.0%). In conclusion, a code-free AutoML model can accurately classify cataract surgery phases from videos with an accuracy comparable or better than models developed by experts.}, } @article {pmid35161987, year = {2022}, author = {Bal, PK and Mohapatra, SK and Das, TK and Srinivasan, K and Hu, YC}, title = {A Joint Resource Allocation, Security with Efficient Task Scheduling in Cloud Computing Using Hybrid Machine Learning Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161987}, issn = {1424-8220}, support = {MOST 110-2622-E-197-009//Ministry of Science and Technology/ ; }, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; Machine Learning ; Resource Allocation ; }, abstract = {The rapid growth of cloud computing environment with many clients ranging from personal users to big corporate or business houses has become a challenge for cloud organizations to handle the massive volume of data and various resources in the cloud. Inefficient management of resources can degrade the performance of cloud computing. Therefore, resources must be evenly allocated to different stakeholders without compromising the organization's profit as well as users' satisfaction. A customer's request cannot be withheld indefinitely just because the fundamental resources are not free on the board. In this paper, a combined resource allocation security with efficient task scheduling in cloud computing using a hybrid machine learning (RATS-HM) technique is proposed to overcome those problems. The proposed RATS-HM techniques are given as follows: First, an improved cat swarm optimization algorithm-based short scheduler for task scheduling (ICS-TS) minimizes the make-span time and maximizes throughput. Second, a group optimization-based deep neural network (GO-DNN) for efficient resource allocation using different design constraints includes bandwidth and resource load. Third, a lightweight authentication scheme, i.e., NSUPREME is proposed for data encryption to provide security to data storage. Finally, the proposed RATS-HM technique is simulated with a different simulation setup, and the results are compared with state-of-art techniques to prove the effectiveness. The results regarding resource utilization, energy consumption, response time, etc., show that the proposed technique is superior to the existing one.}, } @article {pmid35161968, year = {2022}, author = {Fé, I and Matos, R and Dantas, J and Melo, C and Nguyen, TA and Min, D and Choi, E and Silva, FA and Maciel, PRM}, title = {Performance-Cost Trade-Off in Auto-Scaling Mechanisms for Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161968}, issn = {1424-8220}, support = {2020R1A6A1A03046811//Basic Science Research Program through the National Research Foundation of Korea(NRF) funded by the Ministry of Education/ ; 2021R1A2C209494311//the National Foundation of Korea (NRF) grant funded by the Korea government (Ministry of Science and ICT (MIST))/ ; 309335/2017-5//Brazilian National Council for Scientific and Technological Development - CNPq/ ; N0002428//'The Competency Development Program for Industry Specialist' of the Korean Ministry of Trade, Industry and Energy (MOTIE), operated by Korea Institute for Advancement of Technology (KIAT)/ ; IITP-2020-2016-0-00465//the MSIT(Ministry of Science, ICT), Korea, under the ITRC(Information Technology Research Center) support program supervised by the IITP(Institute for Information & communications Technology Planning & Evaluation)/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Workload ; }, abstract = {Cloud computing has been widely adopted over the years by practitioners and companies with a variety of requirements. With a strong economic appeal, cloud computing makes possible the idea of computing as a utility, in which computing resources can be consumed and paid for with the same convenience as electricity. One of the main characteristics of cloud as a service is elasticity supported by auto-scaling capabilities. The auto-scaling cloud mechanism allows adjusting resources to meet multiple demands dynamically. The elasticity service is best represented in critical web trading and transaction systems that must satisfy a certain service level agreement (SLA), such as maximum response time limits for different types of inbound requests. Nevertheless, existing cloud infrastructures maintained by different cloud enterprises often offer different cloud service costs for equivalent SLAs upon several factors. The factors might be contract types, VM types, auto-scaling configuration parameters, and incoming workload demand. Identifying a combination of parameters that results in SLA compliance directly in the system is often sophisticated, while the manual analysis is prone to errors due to the huge number of possibilities. This paper proposes the modeling of auto-scaling mechanisms in a typical cloud infrastructure using a stochastic Petri net (SPN) and the employment of a well-established adaptive search metaheuristic (GRASP) to discover critical trade-offs between performance and cost in cloud services.The proposed SPN models enable cloud designers to estimate the metrics of cloud services in accordance with each required SLA such as the best configuration, cost, system response time, and throughput.The auto-scaling SPN model was extensively validated with 95% confidence against a real test-bed scenario with 18.000 samples. A case-study of cloud services was used to investigate the viability of this method and to evaluate the adoptability of the proposed auto-scaling model in practice. On the other hand, the proposed optimization algorithm enables the identification of economic system configuration and parameterization to satisfy required SLA and budget constraints. The adoption of the metaheuristic GRASP approach and the modeling of auto-scaling mechanisms in this work can help search for the optimized-quality solution and operational management for cloud services in practice.}, } @article {pmid35161951, year = {2022}, author = {Jaber, MM and Alameri, T and Ali, MH and Alsyouf, A and Al-Bsheish, M and Aldhmadi, BK and Ali, SY and Abd, SK and Ali, SM and Albaker, W and Jarrar, M}, title = {Remotely Monitoring COVID-19 Patient Health Condition Using Metaheuristics Convolute Networks from IoT-Based Wearable Device Health Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161951}, issn = {1424-8220}, mesh = {*COVID-19 ; Delivery of Health Care ; Humans ; Monitoring, Physiologic ; SARS-CoV-2 ; *Wearable Electronic Devices ; }, abstract = {Today, COVID-19-patient health monitoring and management are major public health challenges for technologies. This research monitored COVID-19 patients by using the Internet of Things. IoT-based collected real-time GPS helps alert the patient automatically to reduce risk factors. Wearable IoT devices are attached to the human body, interconnected with edge nodes, to investigate data for making health-condition decisions. This system uses the wearable IoT sensor, cloud, and web layers to explore the patient's health condition remotely. Every layer has specific functionality in the COVID-19 symptoms' monitoring process. The first layer collects the patient health information, which is transferred to the second layer that stores that data in the cloud. The network examines health data and alerts the patients, thus helping users take immediate actions. Finally, the web layer notifies family members to take appropriate steps. This optimized deep-learning model allows for the management and monitoring for further analysis.}, } @article {pmid35161853, year = {2022}, author = {Adee, R and Mouratidis, H}, title = {A Dynamic Four-Step Data Security Model for Data in Cloud Computing Based on Cryptography and Steganography.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161853}, issn = {1424-8220}, abstract = {Cloud computing is a rapidly expanding field. It allows users to access computer system resources as needed, particularly data storage and computational power, without managing them directly. This paper aims to create a data security model based on cryptography and steganography for data in cloud computing that seeks to reduce existing security and privacy concerns, such as data loss, data manipulation, and data theft. To identify the problem and determine its core cause, we studied various literature on existing cloud computing security models. This study utilizes design science research methodology. The design science research approach includes problem identification, requirements elicitation, artifact design and development, demonstration, and assessment. Design thinking and the Python programming language are used to build the artifact, and discussion about its working is represented using histograms, tables, and algorithms. This paper's output is a four-step data security model based on Rivest-Shamir-Adleman, Advanced Encryption Standard, and identity-based encryption algorithms alongside Least Significant Bit steganography. The four steps are data protection and security through encryption algorithms, steganography, data backup and recovery, and data sharing. This proposed approach ensures more cloud data redundancy, flexibility, efficiency, and security by protecting data confidentiality, privacy, and integrity from attackers.}, } @article {pmid35161775, year = {2022}, author = {Popović, I and Radovanovic, I and Vajs, I and Drajic, D and Gligorić, N}, title = {Building Low-Cost Sensing Infrastructure for Air Quality Monitoring in Urban Areas Based on Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161775}, issn = {1424-8220}, support = {451-03-68/2020-14/200223//Ministry of Education, Science and Technological Development of the Republic of Serbia/ ; }, mesh = {*Air Pollution ; *Cloud Computing ; }, abstract = {Because the number of air quality measurement stations governed by a public authority is limited, many methodologies have been developed in order to integrate low-cost sensors and to improve the spatial density of air quality measurements. However, at the large-scale level, the integration of a huge number of sensors brings many challenges. The volume, velocity and processing requirements regarding the management of the sensor life cycle and the operation of system services overcome the capabilities of the centralized cloud model. In this paper, we present the methodology and the architectural framework for building large-scale sensing infrastructure for air quality monitoring applicable in urban scenarios. The proposed tiered architectural solution based on the adopted fog computing model is capable of handling the processing requirements of a large-scale application, while at the same time sustaining real-time performance. Furthermore, the proposed methodology introduces the collection of methods for the management of edge-tier node operation through different phases of the node life cycle, including the methods for node commission, provision, fault detection and recovery. The related sensor-side processing is encapsulated in the form of microservices that reside on the different tiers of system architecture. The operation of system microservices and their collaboration was verified through the presented experimental case study.}, } @article {pmid35161745, year = {2022}, author = {Lăcătușu, F and Ionita, AD and Lăcătușu, M and Olteanu, A}, title = {Performance Evaluation of Information Gathering from Edge Devices in a Complex of Smart Buildings.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161745}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Computers ; Monitoring, Physiologic ; }, abstract = {The use of monitoring systems based on cloud computing has become common for smart buildings. However, the dilemma of centralization versus decentralization, in terms of gathering information and making the right decisions based on it, remains. Performance, dependent on the system design, does matter for emergency detection, where response time and loading behavior become very important. We studied several design options based on edge computing and containers for a smart building monitoring system that sends alerts to the responsible personnel when necessary. The study evaluated performance, including a qualitative analysis and load testing, for our experimental settings. From 700+ edge nodes, we obtained response times that were 30% lower for the public cloud versus the local solution. For up to 100 edge nodes, the values were better for the latter, and in between, they were rather similar. Based on an interpretation of the results, we developed recommendations for five real-world configurations, and we present the design choices adopted in our development for a complex of smart buildings.}, } @article {pmid35161741, year = {2022}, author = {Kasnesis, P and Doulgerakis, V and Uzunidis, D and Kogias, DG and Funcia, SI and González, MB and Giannousis, C and Patrikakis, CZ}, title = {Deep Learning Empowered Wearable-Based Behavior Recognition for Search and Rescue Dogs.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161741}, issn = {1424-8220}, support = {833507//European Commission/ ; }, mesh = {Animals ; *Deep Learning ; Dogs ; Neural Networks, Computer ; *Wearable Electronic Devices ; Working Dogs ; }, abstract = {Search and Rescue (SaR) dogs are important assets in the hands of first responders, as they have the ability to locate the victim even in cases where the vision and or the sound is limited, due to their inherent talents in olfactory and auditory senses. In this work, we propose a deep-learning-assisted implementation incorporating a wearable device, a base station, a mobile application, and a cloud-based infrastructure that can first monitor in real-time the activity, the audio signals, and the location of a SaR dog, and second, recognize and alert the rescuing team whenever the SaR dog spots a victim. For this purpose, we employed deep Convolutional Neural Networks (CNN) both for the activity recognition and the sound classification, which are trained using data from inertial sensors, such as 3-axial accelerometer and gyroscope and from the wearable's microphone, respectively. The developed deep learning models were deployed on the wearable device, while the overall proposed implementation was validated in two discrete search and rescue scenarios, managing to successfully spot the victim (i.e., obtained F1-score more than 99%) and inform the rescue team in real-time for both scenarios.}, } @article {pmid35161675, year = {2022}, author = {Ometov, A and Molua, OL and Komarov, M and Nurmi, J}, title = {A Survey of Security in Cloud, Edge, and Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161675}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Security ; *Ecosystem ; Privacy ; Surveys and Questionnaires ; }, abstract = {The field of information security and privacy is currently attracting a lot of research interest. Simultaneously, different computing paradigms from Cloud computing to Edge computing are already forming a unique ecosystem with different architectures, storage, and processing capabilities. The heterogeneity of this ecosystem comes with certain limitations, particularly security and privacy challenges. This systematic literature review aims to identify similarities, differences, main attacks, and countermeasures in the various paradigms mentioned. The main determining outcome points out the essential security and privacy threats. The presented results also outline important similarities and differences in Cloud, Edge, and Fog computing paradigms. Finally, the work identified that the heterogeneity of such an ecosystem does have issues and poses a great setback in the deployment of security and privacy mechanisms to counter security attacks and privacy leakages. Different deployment techniques were found in the review studies as ways to mitigate and enhance security and privacy shortcomings.}, } @article {pmid35161665, year = {2022}, author = {Nabi, S and Ahmad, M and Ibrahim, M and Hamam, H}, title = {AdPSO: Adaptive PSO-Based Task Scheduling Approach for Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161665}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Heuristics ; Industry ; }, abstract = {Cloud computing has emerged as the most favorable computing platform for researchers and industry. The load balanced task scheduling has emerged as an important and challenging research problem in the Cloud computing. Swarm intelligence-based meta-heuristic algorithms are considered more suitable for Cloud scheduling and load balancing. The optimization procedure of swarm intelligence-based meta-heuristics consists of two major components that are the local and global search. These algorithms find the best position through the local and global search. To achieve an optimized mapping strategy for tasks to the resources, a balance between local and global search plays an effective role. The inertia weight is an important control attribute to effectively adjust the local and global search process. There are many inertia weight strategies; however, the existing approaches still require fine-tuning to achieve optimum scheduling. The selection of a suitable inertia weight strategy is also an important factor. This paper contributed an adaptive Particle Swarm Optimisation (PSO) based task scheduling approach that reduces the task execution time, and increases throughput and Average Resource Utilization Ratio (ARUR). Moreover, an adaptive inertia weight strategy namely Linearly Descending and Adaptive Inertia Weight (LDAIW) is introduced. The proposed scheduling approach provides a better balance between local and global search leading to an optimized task scheduling. The performance of the proposed approach has been evaluated and compared against five renown PSO based inertia weight strategies concerning makespan and throughput. The experiments are then extended and compared the proposed approach against the other four renowned meta-heuristic scheduling approaches. Analysis of the simulated experimentation reveals that the proposed approach attained up to 10%, 12% and 60% improvement for makespan, throughput and ARUR respectively.}, } @article {pmid35161645, year = {2022}, author = {Pincheira, M and Antonini, M and Vecchio, M}, title = {Integrating the IoT and Blockchain Technology for the Next Generation of Mining Inspection Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161645}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Technology ; }, abstract = {Inspection of mining assets is a crucial part of the maintenance process and is of interest to several stakeholders (e.g., OEMs, owners, users, and inspectors). Inspections require an inspector to verify several characteristics of the assets onsite, typically using legacy and poorly digitized procedures. Thus, many research opportunities arise from the adoption of digital technologies to make these procedures more efficient, reliable, and straightforward. In addition to cloud computing, the ubiquitous presence of modern mobile devices, new measurement tools with embedded connectivity capabilities, and blockchain technologies could greatly improve trust and transparency between the stakeholders interested in the inspection. However, there has been little discussion on integrating these technologies into the mining domain. This paper presents and evaluates an end-to-end system to conduct inspections using mobile devices that directly interact with constrained IoT sensor devices. Furthermore, our proposal provides a method to integrate constrained IoT devices as smart measuring tools that directly interact with a blockchain system, guaranteeing data integrity and increasing the trustworthiness of the data. Finally, we highlight the benefits of our proposed architecture by evaluating a real case study in a mining inspection scenario.}, } @article {pmid35161623, year = {2022}, author = {Qafzezi, E and Bylykbashi, K and Ampririt, P and Ikeda, M and Matsuo, K and Barolli, L}, title = {An Intelligent Approach for Cloud-Fog-Edge Computing SDN-VANETs Based on Fuzzy Logic: Effect of Different Parameters on Coordination and Management of Resources.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161623}, issn = {1424-8220}, abstract = {The integration of cloud-fog-edge computing in Software-Defined Vehicular Ad hoc Networks (SDN-VANETs) brings a new paradigm that provides the needed resources for supporting a myriad of emerging applications. While an abundance of resources may offer many benefits, it also causes management problems. In this work, we propose an intelligent approach to flexibly and efficiently manage resources in these networks. The proposed approach makes use of an integrated fuzzy logic system that determines the most appropriate resources that vehicles should use when set under various circumstances. These circumstances cover the quality of the network created between the vehicles, its size and longevity, the number of available resources, and the requirements of applications. We evaluated the proposed approach by computer simulations. The results demonstrate the feasibility of the proposed approach in coordinating and managing the available SDN-VANETs resources.}, } @article {pmid35161596, year = {2022}, author = {Yousif, A and Alqhtani, SM and Bashir, MB and Ali, A and Hamza, R and Hassan, A and Tawfeeg, TM}, title = {Greedy Firefly Algorithm for Optimizing Job Scheduling in IoT Grid Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161596}, issn = {1424-8220}, support = {NU/IFC/ENT/01/013//The deputyship for research and innovation, Ministry of Education in Saudi Arabia/ ; }, abstract = {The Internet of Things (IoT) is defined as interconnected digital and mechanical devices with intelligent and interactive data transmission features over a defined network. The ability of the IoT to collect, analyze and mine data into information and knowledge motivates the integration of IoT with grid and cloud computing. New job scheduling techniques are crucial for the effective integration and management of IoT with grid computing as they provide optimal computational solutions. The computational grid is a modern technology that enables distributed computing to take advantage of a organization's resources in order to handle complex computational problems. However, the scheduling process is considered an NP-hard problem due to the heterogeneity of resources and management systems in the IoT grid. This paper proposed a Greedy Firefly Algorithm (GFA) for jobs scheduling in the grid environment. In the proposed greedy firefly algorithm, a greedy method is utilized as a local search mechanism to enhance the rate of convergence and efficiency of schedules produced by the standard firefly algorithm. Several experiments were conducted using the GridSim toolkit to evaluate the proposed greedy firefly algorithm's performance. The study measured several sizes of real grid computing workload traces, starting with lightweight traces with only 500 jobs, then typical with 3000 to 7000 jobs, and finally heavy load containing 8000 to 10,000 jobs. The experiment results revealed that the greedy firefly algorithm could insignificantly reduce the makespan makespan and execution times of the IoT grid scheduling process as compared to other evaluated scheduling methods. Furthermore, the proposed greedy firefly algorithm converges on large search spacefaster , making it suitable for large-scale IoT grid environments.}, } @article {pmid35161586, year = {2022}, author = {Tassetti, AN and Galdelli, A and Pulcinella, J and Mancini, A and Bolognini, L}, title = {Addressing Gaps in Small-Scale Fisheries: A Low-Cost Tracking System.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161586}, issn = {1424-8220}, support = {1025515//Interreg V-A Italy-Croatia CBC Programme 2014-2020, Strategic calls for proposals, Project ARGOS - ShARed GOvernance of Sustainable fisheries and aquaculture activities as leverage to protect marine resources in the Adriatic Sea/ ; }, mesh = {Artificial Intelligence ; *Conservation of Natural Resources ; Data Collection ; *Fisheries ; Policy ; }, abstract = {During the last decade vessel-position-recording devices, such as the Vessel Monitoring System and the Automatic Identification System, have increasingly given accurate spatial and quantitative information of industrial fisheries. On the other hand, small-scale fisheries (vessels below 12 m) remain untracked and largely unregulated even though they play an important socio-economic and cultural role in European waters and coastal communities and account for most of the total EU fishing fleet. The typically low-technological capacity of these small-scale fishing boats-for which space and power onboard are often limited-as well their reduced operative range encourage the development of efficient, low-cost, and low-burden tracking solutions. In this context, we designed a cost-effective and scalable prototypic architecture to gather and process positional data from small-scale vessels, making use of a LoRaWAN/cellular network. Data collected by our first installation are presented, as well as its preliminary processing. The emergence of a such low-cost and open-source technology coupled to artificial intelligence could open new opportunities for equipping small-scale vessels, collecting their trajectory data, and estimating their fishing effort (information which has historically not been present). It enables a new monitoring strategy that could effectively include small-scale fleets and support the design of new policies oriented to inform coastal resource and fisheries management.}, } @article {pmid35161458, year = {2022}, author = {Berta, R and Bellotti, F and De Gloria, A and Lazzaroni, L}, title = {Assessing Versatility of a Generic End-to-End Platform for IoT Ecosystem Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {3}, pages = {}, pmid = {35161458}, issn = {1424-8220}, mesh = {*Ecosystem ; }, abstract = {Availability of efficient development tools for data-rich IoT applications is becoming ever more important. Such tools should support cross-platform deployment and seamless and effective applicability in a variety of domains. In this view, we assessed the versatility of an edge-to-cloud system featuring Measurify, a framework for managing smart things. The framework exposes to developers a set of measurement-oriented resources that can be used in different contexts. The tool has been assessed in the development of end-to-end IoT applications in six Electronic and Information Technologies Engineering BSc theses that have highlighted the potential of such a system, both from a didactic and a professional point of view. The main design abstractions of the system (i.e., generic sensor configuration, simple language with chainable operations for processing data on the edge, seamless WiFi/GSM communication) allowed developers to be productive and focus on the application requirements and the high-level design choices needed to define the edge system (microcontroller and its sensors), avoiding the large set-up times necessary to start a solution from scratch. The experience also highlighted some usability issues that will be addressed in an upcoming release of the system.}, } @article {pmid35143670, year = {2022}, author = {Grealey, J and Lannelongue, L and Saw, WY and Marten, J and Méric, G and Ruiz-Carmona, S and Inouye, M}, title = {The Carbon Footprint of Bioinformatics.}, journal = {Molecular biology and evolution}, volume = {39}, number = {3}, pages = {}, pmid = {35143670}, issn = {1537-1719}, support = {MR/S502443/1/MRC_/Medical Research Council/United Kingdom ; BRC-1215-20014/DH_/Department of Health/United Kingdom ; MR/L003120/1/MRC_/Medical Research Council/United Kingdom ; RG/18/13/33946/BHF_/British Heart Foundation/United Kingdom ; /WT_/Wellcome Trust/United Kingdom ; /CSO_/Chief Scientist Office/United Kingdom ; RG/13/13/30194/BHF_/British Heart Foundation/United Kingdom ; }, mesh = {Algorithms ; *Carbon Footprint ; *Computational Biology ; Genome-Wide Association Study ; Software ; }, abstract = {Bioinformatic research relies on large-scale computational infrastructures which have a nonzero carbon footprint but so far, no study has quantified the environmental costs of bioinformatic tools and commonly run analyses. In this work, we estimate the carbon footprint of bioinformatics (in kilograms of CO2 equivalent units, kgCO2e) using the freely available Green Algorithms calculator (www.green-algorithms.org, last accessed 2022). We assessed 1) bioinformatic approaches in genome-wide association studies (GWAS), RNA sequencing, genome assembly, metagenomics, phylogenetics, and molecular simulations, as well as 2) computation strategies, such as parallelization, CPU (central processing unit) versus GPU (graphics processing unit), cloud versus local computing infrastructure, and geography. In particular, we found that biobank-scale GWAS emitted substantial kgCO2e and simple software upgrades could make it greener, for example, upgrading from BOLT-LMM v1 to v2.3 reduced carbon footprint by 73%. Moreover, switching from the average data center to a more efficient one can reduce carbon footprint by approximately 34%. Memory over-allocation can also be a substantial contributor to an algorithm's greenhouse gas emissions. The use of faster processors or greater parallelization reduces running time but can lead to greater carbon footprint. Finally, we provide guidance on how researchers can reduce power consumption and minimize kgCO2e. Overall, this work elucidates the carbon footprint of common analyses in bioinformatics and provides solutions which empower a move toward greener research.}, } @article {pmid35140775, year = {2022}, author = {Wang, R and Chen, X}, title = {Research on Agricultural Product Traceability Technology (Economic Value) Based on Information Supervision and Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4687639}, pmid = {35140775}, issn = {1687-5273}, mesh = {*Blockchain ; *Cloud Computing ; Information Dissemination ; Reproducibility of Results ; Technology ; }, abstract = {Traditional agricultural product traceability system adopts centralized storage, and the traceability process is solidified, which results in the low reliability of traceability results and the poor flexibility of the system. Aiming to solve this problem, blockchain technology is applied to supply chain traceability, and a supply chain traceability system based on sidechain technology is proposed. Goods management, information sharing, and product traceability in supply chain are realized through Ethereum smart contract. The sidechain technology is adopted to expand Ethereum so that it can meet the needs of practical applications. The experiment results show that the proposed system has a transaction function and information sharing function. Compared with similar trading systems, the proposed system has more advantages in throughput and security.}, } @article {pmid35136707, year = {2023}, author = {Mahajan, HB and Rashid, AS and Junnarkar, AA and Uke, N and Deshpande, SD and Futane, PR and Alkhayyat, A and Alhayani, B}, title = {Integration of Healthcare 4.0 and blockchain into secure cloud-based electronic health records systems.}, journal = {Applied nanoscience}, volume = {13}, number = {3}, pages = {2329-2342}, pmid = {35136707}, issn = {2190-5509}, abstract = {Since the last decade, cloud-based electronic health records (EHRs) have gained significant attention to enable remote patient monitoring. The recent development of Healthcare 4.0 using the Internet of Things (IoT) components and cloud computing to access medical operations remotely has gained the researcher's attention from a smart city perspective. Healthcare 4.0 mainly consisted of periodic medical data sensing, aggregation, data transmission, data sharing, and data storage. The sensitive and personal data of patients lead to several challenges while protecting it from hackers. Therefore storing, accessing, and sharing the patient medical information on the cloud needs security attention that data should not be compromised by the authorized user's components of E-healthcare systems. To achieve secure medical data storage, sharing, and accessing in cloud service provider, several cryptography algorithms are designed so far. However, such conventional solutions failed to achieve the trade-off between the requirements of EHR security solutions such as computational efficiency, service side verification, user side verifications, without the trusted third party, and strong security. Blockchain-based security solutions gained significant attention in the recent past due to the ability to provide strong security for data storage and sharing with the minimum computation efforts. The blockchain made focused on bitcoin technology among the researchers. Utilizing the blockchain which secure healthcare records management has been of recent interest. This paper presents the systematic study of modern blockchain-based solutions for securing medical data with or without cloud computing. We implement and evaluate the different methods using blockchain in this paper. According to the research studies, the research gaps, challenges, and future roadmap are the outcomes of this paper that boost emerging Healthcare 4.0 technology.}, } @article {pmid35136669, year = {2022}, author = {Raulerson, CK and Villa, EC and Mathews, JA and Wakeland, B and Xu, Y and Gagan, J and Cantarel, BL}, title = {SCHOOL: Software for Clinical Health in Oncology for Omics Laboratories.}, journal = {Journal of pathology informatics}, volume = {13}, number = {}, pages = {1}, pmid = {35136669}, issn = {2229-5089}, abstract = {Bioinformatics analysis is a key element in the development of in-house next-generation sequencing assays for tumor genetic profiling that can include both tumor DNA and RNA with comparisons to matched-normal DNA in select cases. Bioinformatics analysis encompasses a computationally heavy component that requires a high-performance computing component and an assay-dependent quality assessment, aggregation, and data cleaning component. Although there are free, open-source solutions and fee-for-use commercial services for the computationally heavy component, these solutions and services can lack the options commonly utilized in increasingly complex genomic assays. Additionally, the cost to purchase commercial solutions or implement and maintain open-source solutions can be out of reach for many small clinical laboratories. Here, we present Software for Clinical Health in Oncology for Omics Laboratories (SCHOOL), a collection of genomics analysis workflows that (i) can be easily installed on any platform; (ii) run on the cloud with a user-friendly interface; and (iii) include the detection of single nucleotide variants, insertions/deletions, copy number variants (CNVs), and translocations from RNA and DNA sequencing. These workflows contain elements for customization based on target panel and assay design, including somatic mutational analysis with a matched-normal, microsatellite stability analysis, and CNV analysis with a single nucleotide polymorphism backbone. All of the features of SCHOOL have been designed to run on any computer system, where software dependencies have been containerized. SCHOOL has been built into apps with workflows that can be run on a cloud platform such as DNANexus using their point-and-click graphical interface, which could be automated for high-throughput laboratories.}, } @article {pmid35136576, year = {2021}, author = {Albanese, D and Donati, C}, title = {Large-scale quality assessment of prokaryotic genomes with metashot/prok-quality.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {822}, pmid = {35136576}, issn = {2046-1402}, mesh = {Archaea/genetics ; *Metagenome ; *Metagenomics/methods ; Prokaryotic Cells ; Reproducibility of Results ; }, abstract = {Metagenomic sequencing allows large-scale identification and genomic characterization. Binning is the process of recovering genomes from complex mixtures of sequence fragments (metagenome contigs) of unknown bacteria and archaeal species. Assessing the quality of genomes recovered from metagenomes requires the use of complex pipelines involving many independent steps, often difficult to reproduce and maintain. A comprehensive, automated and easy-to-use computational workflow for the quality assessment of draft prokaryotic genomes, based on container technology, would greatly improve reproducibility and reusability of published results. We present metashot/prok-quality, a container-enabled Nextflow pipeline for quality assessment and genome dereplication. The metashot/prok-quality tool produces genome quality reports that are compliant with the Minimum Information about a Metagenome-Assembled Genome (MIMAG) standard, and can run out-of-the-box on any platform that supports Nextflow, Docker or Singularity, including computing clusters or batch infrastructures in the cloud. metashot/prok-quality is part of the metashot collection of analysis pipelines. Workflow and documentation are available under GPL3 licence on GitHub.}, } @article {pmid35135213, year = {2022}, author = {Habibi, H and Rasoolzadegan, A and Mashmool, A and Band, SS and Chronopoulos, AT and Mosavi, A}, title = {SaaSRec+: a new context-aware recommendation method for SaaS services.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {19}, number = {2}, pages = {1471-1495}, doi = {10.3934/mbe.2022068}, pmid = {35135213}, issn = {1551-0018}, mesh = {*Cloud Computing ; Cluster Analysis ; }, abstract = {Cloud computing is an attractive model that provides users with a variety of services. Thus, the number of cloud services on the market is growing rapidly. Therefore, choosing the proper cloud service is an important challenge. Another major challenge is the availability of diverse cloud services with similar performance, which makes it difficult for users to choose the cloud service that suits their needs. Therefore, the existing service selection approaches is not able to solve the problem, and cloud service recommendation has become an essential and important need. In this paper, we present a new way for context-aware cloud service recommendation. Our proposed method seeks to solve the weakness in user clustering, which itself is due to reasons such as 1) lack of full use of contextual information such as cloud service placement, and 2) inaccurate method of determining the similarity of two vectors. The evaluation conducted by the WSDream dataset indicates a reduction in the cloud service recommendation process error rate. The volume of data used in the evaluation of this paper is 5 times that of the basic method. Also, according to the T-test, the service recommendation performance in the proposed method is significant.}, } @article {pmid35131433, year = {2022}, author = {Schirner, M and Domide, L and Perdikis, D and Triebkorn, P and Stefanovski, L and Pai, R and Prodan, P and Valean, B and Palmer, J and Langford, C and Blickensdörfer, A and van der Vlag, M and Diaz-Pier, S and Peyser, A and Klijn, W and Pleiter, D and Nahm, A and Schmid, O and Woodman, M and Zehl, L and Fousek, J and Petkoski, S and Kusch, L and Hashemi, M and Marinazzo, D and Mangin, JF and Flöel, A and Akintoye, S and Stahl, BC and Cepic, M and Johnson, E and Deco, G and McIntosh, AR and Hilgetag, CC and Morgan, M and Schuller, B and Upton, A and McMurtrie, C and Dickscheid, T and Bjaalie, JG and Amunts, K and Mersmann, J and Jirsa, V and Ritter, P}, title = {Brain simulation as a cloud service: The Virtual Brain on EBRAINS.}, journal = {NeuroImage}, volume = {251}, number = {}, pages = {118973}, doi = {10.1016/j.neuroimage.2022.118973}, pmid = {35131433}, issn = {1095-9572}, mesh = {Animals ; Bayes Theorem ; *Brain/diagnostic imaging ; *Cloud Computing ; Computer Simulation ; Humans ; Magnetic Resonance Imaging/methods ; Mice ; Software ; }, abstract = {The Virtual Brain (TVB) is now available as open-source services on the cloud research platform EBRAINS (ebrains.eu). It offers software for constructing, simulating and analysing brain network models including the TVB simulator; magnetic resonance imaging (MRI) processing pipelines to extract structural and functional brain networks; combined simulation of large-scale brain networks with small-scale spiking networks; automatic conversion of user-specified model equations into fast simulation code; simulation-ready brain models of patients and healthy volunteers; Bayesian parameter optimization in epilepsy patient models; data and software for mouse brain simulation; and extensive educational material. TVB cloud services facilitate reproducible online collaboration and discovery of data assets, models, and software embedded in scalable and secure workflows, a precondition for research on large cohort data sets, better generalizability, and clinical translation.}, } @article {pmid35129073, year = {2024}, author = {Bhardwaj, A and Kumar, M and Alshehri, M and Keshta, I and Abugabah, A and Sharma, SK}, title = {Smart water management framework for irrigation in agriculture.}, journal = {Environmental technology}, volume = {45}, number = {12}, pages = {2320-2334}, doi = {10.1080/09593330.2022.2039783}, pmid = {35129073}, issn = {1479-487X}, mesh = {*Drinking Water ; Agriculture ; Farms ; Climate ; Water Supply ; }, abstract = {Global demand and pressure on natural resources is increasing, which is greater on the availability of pure and safe drinking water. The use of new-age technologies including Smart sensors, embedded devices, and Cloud computing can help deliver efficient and safe management for provisioning drinking water for consumers and irrigation for agriculture. The management actions combined with real-time data gathering, monitoring, and alerting with proactive actions, prevent issues from occurring. This research presents a secure and smart research framework to enhance the existing irrigation system. This involves a low-budget irrigation model that can provide automated control and requirements as per the season, climate by using smart device sensors and Cloud communications. The authors presented four unique algorithms and water management processing rules. This also includes alerting scenarios for device and component failures and water leakage by automatically switching to alternative mode and sending alert messages about the faults to resolve the operational failures.The objective of this research is to identify new-age technologies for providing efficient and effective farming methods and investigate Smart IoT-based water management. The highlights of this research are to investigate IoT water management systems using algorithms for irrigation farming, for which this research presents a secure and smart research framework. This involves a low-budget irrigation model that provides automated control and requirements as per the season, climate by using smart device sensors and Cloud communications. Alerts for device and component failures and water leakage are also in-built for switching to alternative mode to resolve the operational failures.}, } @article {pmid35126905, year = {2022}, author = {Mohana, J and Yakkala, B and Vimalnath, S and Benson Mansingh, PM and Yuvaraj, N and Srihari, K and Sasikala, G and Mahalakshmi, V and Yasir Abdullah, R and Sundramurthy, VP}, title = {Application of Internet of Things on the Healthcare Field Using Convolutional Neural Network Processing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {1892123}, pmid = {35126905}, issn = {2040-2309}, mesh = {Algorithms ; Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; Neural Networks, Computer ; }, abstract = {Population at risk can benefit greatly from remote health monitoring because it allows for early detection and treatment. Because of recent advances in Internet-of-Things (IoT) paradigms, such monitoring systems are now available everywhere. Due to the essential nature of the patients being monitored, these systems demand a high level of quality in aspects such as availability and accuracy. In health applications, where a lot of data are accessible, deep learning algorithms have the potential to perform well. In this paper, we develop a deep learning architecture called the convolutional neural network (CNN), which we examine in this study to see if it can be implemented. The study uses the IoT system with a centralised cloud server, where it is considered as an ideal input data acquisition module. The study uses cloud computing resources by distributing CNN operations to the servers with outsourced fitness functions to be performed at the edge. The results of the simulation show that the proposed method achieves a higher rate of classifying the input instances from the data acquisition tools than other methods. From the results, it is seen that the proposed CNN achieves an average accurate rate of 99.6% on training datasets and 86.3% on testing datasets.}, } @article {pmid35126501, year = {2022}, author = {Khan, R and Srivastava, AK and Gupta, M and Kumari, P and Kumar, S}, title = {Medicolite-Machine Learning-Based Patient Care Model.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {8109147}, pmid = {35126501}, issn = {1687-5273}, mesh = {*Cloud Computing ; Computer Security ; Delivery of Health Care ; Humans ; *Machine Learning ; Patient Care ; }, abstract = {This paper discusses the machine learning effect on healthcare and the development of an application named "Medicolite" in which various modules have been developed for convenience with health-related problems like issues with diet. It also provides online doctor appointments from home and medication through the phone. A healthcare system is "Smart" when it can decide on its own and can prescribe patients life-saving drugs. Machine learning helps in capturing data that are large and contain sensitive information about the patients, so data security is one of the important aspects of this system. It is a health system that uses trending technologies and mobile internet to connect people and healthcare institutions to make them aware of their health condition by intelligently responding to their questions. It perceives information through machine learning and processes this information using cloud computing. With the new technologies, the system decreases the manual intervention in healthcare. Every single piece of information has been saved in the system and the user can access it any time. Furthermore, users can take appointments at any time without standing in a queue. In this paper, the authors proposed a CNN-based classifier. This CNN-based classifier is faster than SVM-based classifier. When these two classifiers are compared based on training and testing sessions, it has been found that the CNN has taken less time (30 seconds) compared to SVM (58 seconds).}, } @article {pmid35126487, year = {2022}, author = {Bukhari, MM and Ghazal, TM and Abbas, S and Khan, MA and Farooq, U and Wahbah, H and Ahmad, M and Adnan, KM}, title = {An Intelligent Proposed Model for Task Offloading in Fog-Cloud Collaboration Using Logistics Regression.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {3606068}, pmid = {35126487}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Logistic Models ; Reproducibility of Results ; }, abstract = {Smart applications and intelligent systems are being developed that are self-reliant, adaptive, and knowledge-based in nature. Emergency and disaster management, aerospace, healthcare, IoT, and mobile applications, among them, revolutionize the world of computing. Applications with a large number of growing devices have transformed the current design of centralized cloud impractical. Despite the use of 5G technology, delay-sensitive applications and cloud cannot go parallel due to exceeding threshold values of certain parameters like latency, bandwidth, response time, etc. Middleware proves to be a better solution to cope up with these issues while satisfying the high requirements task offloading standards. Fog computing is recommended middleware in this research article in view of the fact that it provides the services to the edge of the network; delay-sensitive applications can be entertained effectively. On the contrary, fog nodes contain a limited set of resources that may not process all tasks, especially of computation-intensive applications. Additionally, fog is not the replacement of the cloud, rather supplement to the cloud, both behave like counterparts and offer their services correspondingly to compliance the task needs but fog computing has relatively closer proximity to the devices comparatively cloud. The problem arises when a decision needs to take what is to be offloaded: data, computation, or application, and more specifically where to offload: either fog or cloud and how much to offload. Fog-cloud collaboration is stochastic in terms of task-related attributes like task size, duration, arrival rate, and required resources. Dynamic task offloading becomes crucial in order to utilize the resources at fog and cloud to improve QoS. Since this formation of task offloading policy is a bit complex in nature, this problem is addressed in the research article and proposes an intelligent task offloading model. Simulation results demonstrate the authenticity of the proposed logistic regression model acquiring 86% accuracy compared to other algorithms and confidence in the predictive task offloading policy by making sure process consistency and reliability.}, } @article {pmid35125670, year = {2022}, author = {Bacanin, N and Zivkovic, M and Bezdan, T and Venkatachalam, K and Abouhawwash, M}, title = {Modified firefly algorithm for workflow scheduling in cloud-edge environment.}, journal = {Neural computing & applications}, volume = {34}, number = {11}, pages = {9043-9068}, pmid = {35125670}, issn = {0941-0643}, abstract = {Edge computing is a novel technology, which is closely related to the concept of Internet of Things. This technology brings computing resources closer to the location where they are consumed by end-users-to the edge of the cloud. In this way, response time is shortened and lower network bandwidth is utilized. Workflow scheduling must be addressed to accomplish these goals. In this paper, we propose an enhanced firefly algorithm adapted for tackling workflow scheduling challenges in a cloud-edge environment. Our proposed approach overcomes observed deficiencies of original firefly metaheuristics by incorporating genetic operators and quasi-reflection-based learning procedure. First, we have validated the proposed improved algorithm on 10 modern standard benchmark instances and compared its performance with original and other improved state-of-the-art metaheuristics. Secondly, we have performed simulations for a workflow scheduling problem with two objectives-cost and makespan. We performed comparative analysis with other state-of-the-art approaches that were tested under the same experimental conditions. Algorithm proposed in this paper exhibits significant enhancements over the original firefly algorithm and other outstanding metaheuristics in terms of convergence speed and results' quality. Based on the output of conducted simulations, the proposed improved firefly algorithm obtains prominent results and managed to establish improvement in solving workflow scheduling in cloud-edge by reducing makespan and cost compared to other approaches.}, } @article {pmid35122132, year = {2022}, author = {Abbas, A and O'Byrne, C and Fu, DJ and Moraes, G and Balaskas, K and Struyven, R and Beqiri, S and Wagner, SK and Korot, E and Keane, PA}, title = {Evaluating an automated machine learning model that predicts visual acuity outcomes in patients with neovascular age-related macular degeneration.}, journal = {Graefe's archive for clinical and experimental ophthalmology = Albrecht von Graefes Archiv fur klinische und experimentelle Ophthalmologie}, volume = {260}, number = {8}, pages = {2461-2473}, pmid = {35122132}, issn = {1435-702X}, support = {MR/T000953/1/MRC_/Medical Research Council/United Kingdom ; MR/T019050/1/MRC_/Medical Research Council/United Kingdom ; R190028A//moorfields eye charity career development award/ ; MR/T019050/1//uk research & innovation future leaders fellowship/ ; }, mesh = {Angiogenesis Inhibitors/therapeutic use ; Humans ; Intravitreal Injections ; Machine Learning ; *Macular Degeneration/drug therapy ; Ranibizumab/therapeutic use ; Retrospective Studies ; Treatment Outcome ; Vascular Endothelial Growth Factor A ; Visual Acuity ; *Wet Macular Degeneration/diagnosis/drug therapy ; }, abstract = {PURPOSE: Neovascular age-related macular degeneration (nAMD) is a major global cause of blindness. Whilst anti-vascular endothelial growth factor (anti-VEGF) treatment is effective, response varies considerably between individuals. Thus, patients face substantial uncertainty regarding their future ability to perform daily tasks. In this study, we evaluate the performance of an automated machine learning (AutoML) model which predicts visual acuity (VA) outcomes in patients receiving treatment for nAMD, in comparison to a manually coded model built using the same dataset. Furthermore, we evaluate model performance across ethnic groups and analyse how the models reach their predictions.

METHODS: Binary classification models were trained to predict whether patients' VA would be 'Above' or 'Below' a score of 70 one year after initiating treatment, measured using the Early Treatment Diabetic Retinopathy Study (ETDRS) chart. The AutoML model was built using the Google Cloud Platform, whilst the bespoke model was trained using an XGBoost framework. Models were compared and analysed using the What-if Tool (WIT), a novel model-agnostic interpretability tool.

RESULTS: Our study included 1631 eyes from patients attending Moorfields Eye Hospital. The AutoML model (area under the curve [AUC], 0.849) achieved a highly similar performance to the XGBoost model (AUC, 0.847). Using the WIT, we found that the models over-predicted negative outcomes in Asian patients and performed worse in those with an ethnic category of Other. Baseline VA, age and ethnicity were the most important determinants of model predictions. Partial dependence plot analysis revealed a sigmoidal relationship between baseline VA and the probability of an outcome of 'Above'.

CONCLUSION: We have described and validated an AutoML-WIT pipeline which enables clinicians with minimal coding skills to match the performance of a state-of-the-art algorithm and obtain explainable predictions.}, } @article {pmid35120010, year = {2023}, author = {Wang, Z and Guo, D and Tu, Z and Huang, Y and Zhou, Y and Wang, J and Feng, L and Lin, D and You, Y and Agback, T and Orekhov, V and Qu, X}, title = {A Sparse Model-Inspired Deep Thresholding Network for Exponential Signal Reconstruction-Application in Fast Biological Spectroscopy.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {34}, number = {10}, pages = {7578-7592}, doi = {10.1109/TNNLS.2022.3144580}, pmid = {35120010}, issn = {2162-2388}, mesh = {*Neural Networks, Computer ; *Algorithms ; Tomography, X-Ray Computed/methods ; Spectrum Analysis ; Signal Processing, Computer-Assisted ; Image Processing, Computer-Assisted/methods ; }, abstract = {The nonuniform sampling (NUS) is a powerful approach to enable fast acquisition but requires sophisticated reconstruction algorithms. Faithful reconstruction from partially sampled exponentials is highly expected in general signal processing and many applications. Deep learning (DL) has shown astonishing potential in this field, but many existing problems, such as lack of robustness and explainability, greatly limit its applications. In this work, by combining the merits of the sparse model-based optimization method and data-driven DL, we propose a DL architecture for spectra reconstruction from undersampled data, called MoDern. It follows the iterative reconstruction in solving a sparse model to build the neural network, and we elaborately design a learnable soft-thresholding to adaptively eliminate the spectrum artifacts introduced by undersampling. Extensive results on both synthetic and biological data show that MoDern enables more robust, high-fidelity, and ultrafast reconstruction than the state-of-the-art methods. Remarkably, MoDern has a small number of network parameters and is trained on solely synthetic data while generalizing well to biological data in various scenarios. Furthermore, we extend it to an open-access and easy-to-use cloud computing platform (XCloud-MoDern), contributing a promising strategy for further development of biological applications.}, } @article {pmid35119861, year = {2022}, author = {Rai, BK and Sresht, V and Yang, Q and Unwalla, R and Tu, M and Mathiowetz, AM and Bakken, GA}, title = {TorsionNet: A Deep Neural Network to Rapidly Predict Small-Molecule Torsional Energy Profiles with the Accuracy of Quantum Mechanics.}, journal = {Journal of chemical information and modeling}, volume = {62}, number = {4}, pages = {785-800}, doi = {10.1021/acs.jcim.1c01346}, pmid = {35119861}, issn = {1549-960X}, mesh = {Ligands ; Molecular Dynamics Simulation ; *Neural Networks, Computer ; *Quantum Theory ; Thermodynamics ; }, abstract = {Fast and accurate assessment of small-molecule dihedral energetics is crucial for molecular design and optimization in medicinal chemistry. Yet, accurate prediction of torsion energy profiles remains challenging as the current molecular mechanics (MM) methods are limited by insufficient coverage of drug-like chemical space and accurate quantum mechanical (QM) methods are too expensive. To address this limitation, we introduce TorsionNet, a deep neural network (DNN) model specifically developed to predict small-molecule torsion energy profiles with QM-level accuracy. We applied active learning to identify nearly 50k fragments (with elements H, C, N, O, F, S, and Cl) that maximized the coverage of our corporate compound library and leveraged massively parallel cloud computing resources for density functional theory (DFT) torsion scans of these fragments, generating a training data set of 1.2 million DFT energies. After training TorsionNet on this data set, we obtain a model that can rapidly predict the torsion energy profile of typical drug-like fragments with DFT-level accuracy. Importantly, our method also provides an uncertainty estimate for the predicted profiles without any additional calculations. In this report, we show that TorsionNet can accurately identify the preferred dihedral geometries observed in crystal structures. Our TorsionNet-based analysis of a diverse set of protein-ligand complexes with measured binding affinity shows a strong association between high ligand strain and low potency. We also present practical applications of TorsionNet that demonstrate how consideration of DNN-based strain energy leads to substantial improvement in existing lead discovery and design workflows. TorsionNet500, a benchmark data set comprising 500 chemically diverse fragments with DFT torsion profiles (12k MM- and DFT-optimized geometries and energies), has been created and is made publicly available.}, } @article {pmid35118441, year = {2021}, author = {Cassidy, B and Reeves, ND and Pappachan, JM and Gillespie, D and O'Shea, C and Rajbhandari, S and Maiya, AG and Frank, E and Boulton, AJ and Armstrong, DG and Najafi, B and Wu, J and Kochhar, RS and Yap, MH}, title = {The DFUC 2020 Dataset: Analysis Towards Diabetic Foot Ulcer Detection.}, journal = {TouchREVIEWS in endocrinology}, volume = {17}, number = {1}, pages = {5-11}, pmid = {35118441}, issn = {2752-5457}, support = {R01 DK124789/DK/NIDDK NIH HHS/United States ; }, abstract = {Every 20 seconds a limb is amputated somewhere in the world due to diabetes. This is a global health problem that requires a global solution. The International Conference on Medical Image Computing and Computer Assisted Intervention challenge, which concerns the automated detection of diabetic foot ulcers (DFUs) using machine learning techniques, will accelerate the development of innovative healthcare technology to address this unmet medical need. In an effort to improve patient care and reduce the strain on healthcare systems, recent research has focused on the creation of cloud-based detection algorithms. These can be consumed as a service by a mobile app that patients (or a carer, partner or family member) could use themselves at home to monitor their condition and to detect the appearance of a DFU. Collaborative work between Manchester Metropolitan University, Lancashire Teaching Hospitals and the Manchester University NHS Foundation Trust has created a repository of 4,000 DFU images for the purpose of supporting research toward more advanced methods of DFU detection. This paper presents a dataset description and analysis, assessment methods, benchmark algorithms and initial evaluation results. It facilitates the challenge by providing useful insights into state-of-the-art and ongoing research.}, } @article {pmid35116076, year = {2022}, author = {Li-Yun, Z and Cheng-Ke, W and Qiang, Z}, title = {The Construction of Folk Sports Featured Towns Based on Intelligent Building Technology Based on the Internet of Things.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {4541533}, pmid = {35116076}, issn = {1176-2322}, abstract = {With the emergence of the Internet of Things, technology and Internet thinking have entered traditional communities, and combined with traditional technologies, many new and better management methods and solutions have been born. Among them, the concept of intelligent buildings is also known to people. Based on big data technology, cloud computing technology, and Internet of Things technology, smart buildings provide smart and convenient devices and services for smart device users. The Internet of Things technology is entering our lives at an unimaginable speed. It has been applied in many fields. Smart home, smart transportation, smart medical, smart agriculture, and smart grid are widely used in the Internet of Things technology. The application of Internet of Things technology to the construction of folk sports characteristic towns is of great significance. The construction of folk sports characteristic towns and the protection of intangible cultural heritage have the same purpose and interoperability of elements as the development of traditional cities. From the perspective of protecting folk culture and intangible cultural heritage, it is effective to promote the development of small towns with folk custom characteristics. Based on the research on the construction of folk-custom sports towns, this paper proposes a series of data model analysis and analyzes the proportion of sports preferences in the survey of volunteers in the folk-custom sports towns. The final result of the research shows that the ball games sports personnel accounted for the largest proportion, with 156 people accounting for 48.15%. This shows that about half of the people like ball sports, which proves that ball sports should be the mainstay of folk sports towns, and other sports should be supplemented by other sports.}, } @article {pmid35111919, year = {2022}, author = {Mejahed, S and Elshrkawey, M}, title = {A multi-objective algorithm for virtual machine placement in cloud environments using a hybrid of particle swarm optimization and flower pollination optimization.}, journal = {PeerJ. Computer science}, volume = {8}, number = {}, pages = {e834}, pmid = {35111919}, issn = {2376-5992}, abstract = {The demand for virtual machine requests has increased recently due to the growing number of users and applications. Therefore, virtual machine placement (VMP) is now critical for the provision of efficient resource management in cloud data centers. The VMP process considers the placement of a set of virtual machines onto a set of physical machines, in accordance with a set of criteria. The optimal solution for multi-objective VMP can be determined by using a fitness function that combines the objectives. This paper proposes a novel model to enhance the performance of the VMP decision-making process. Placement decisions are made based on a fitness function that combines three criteria: placement time, power consumption, and resource wastage. The proposed model aims to satisfy minimum values for the three objectives for placement onto all available physical machines. To optimize the VMP solution, the proposed fitness function was implemented using three optimization algorithms: particle swarm optimization with Lévy flight (PSOLF), flower pollination optimization (FPO), and a proposed hybrid algorithm (HPSOLF-FPO). Each algorithm was tested experimentally. The results of the comparative study between the three algorithms show that the hybrid algorithm has the strongest performance. Moreover, the proposed algorithm was tested against the bin packing best fit strategy. The results show that the proposed algorithm outperforms the best fit strategy in total server utilization.}, } @article {pmid35107425, year = {2022}, author = {Ongadi, B and Lihana, R and Kiiru, J and Ngayo, M and Obiero, G}, title = {An Android-Based Mobile App (ARVPredictor) for the Detection of HIV Drug-Resistance Mutations and Treatment at the Point of Care: Development Study.}, journal = {JMIR formative research}, volume = {6}, number = {2}, pages = {e26891}, pmid = {35107425}, issn = {2561-326X}, abstract = {BACKGROUND: HIV/AIDS remains one of the major global human health challenges, especially in resource-limited environments. By 2017, over 77.3 million people were infected with the disease, and approximately 35.4 million individuals had already died from AIDS-related illnesses. Approximately 21.7 million people were accessing ART with significant clinical outcomes. However, numerous challenges are experienced in the delivery and accurate interpretation of data on patients with HIV data by various health care providers at different care levels. Mobile health (mHealth) technology is progressively making inroads into the health sector as well as medical research. Different mobile devices have become common in health care settings, leading to rapid growth in the development of downloadable software specifically designed to fulfill particular health-related purposes.

OBJECTIVE: We developed a mobile-based app called ARVPredictor and demonstrated that it can accurately define HIV-1 drug-resistance mutations in the HIV pol gene for use at the point of care.

METHODS: ARVPredictor was designed using Android Studio with Java as the programming language and is compatible with both Android and iOS. The app system is hosted on Nginx Server, and network calls are built on PHP's Laravel framework handled by the Retrofit Library. The DigitalOcean offers a high-performance and stable cloud computing platform for ARVPredictor. This mobile app is enlisted in the Google Play Store as an "ARVPredictor" and the source code is available under MIT permissive license at a GitHub repository. To test for agreement between the ARVPredictor and Stanford HIV Database in detecting HIV subtype and NNRT and NRTI mutations, a total of 100 known HIV sequences were evaluated.

RESULTS: The mobile-based app (ARVPredictor) takes in a set of sequences or known mutations (protease, reverse transcriptase and integrase). It then returns inferred levels of resistance to selected nucleoside, nonnucleoside protease, and integrase inhibitors for accurate HIV/AIDS management at the point of care. The ARVPredictor identified similar HIV subtypes in 98/100 sequences compared with the Stanford HIV Database (κ=0.98, indicating near perfect agreement). There were 89/100 major NNRTI and NRTI mutations identified by ARVPredictor, similar to the Stanford HIV Database (κ=0.89, indicating near perfect agreement). Eight mutations classified as major by the Stanford HIV Database were classified as others by ARVPredictor.

CONCLUSIONS: The ARVPredictor largely agrees with the Stanford HIV Database in identifying both major and minor proteases, reverse transcriptase, and integrase mutations. The app can be conveniently used robustly at the point of care by HIV/AIDS care providers to improve the management of HIV infection.}, } @article {pmid35106098, year = {2022}, author = {Xing, H and Zhu, L and Chen, B and Niu, J and Li, X and Feng, Y and Fang, W}, title = {Spatial and temporal changes analysis of air quality before and after the COVID-19 in Shandong Province, China.}, journal = {Earth science informatics}, volume = {15}, number = {2}, pages = {863-876}, pmid = {35106098}, issn = {1865-0473}, abstract = {Due to the COVID-19 pandemic outbreak, the home quarantine policy was implemented to control the spread of the pandemic, which may have a positive impact on the improvement of air quality in China. In this study, Google Earth Engine (GEE) cloud computing platform was used to obtain CO, NO2, SO2 and aerosol optical depth (AOD) data from December 2018-March 2019, December 2019-March 2020, and December 2020-March 2021 in Shandong Province. These data were used to study the spatial and temporal distribution of air quality changes in Shandong Province before and after the pandemic and to analyze the reasons for the changes. The results show that: (1) Compared with the same period, CO and NO2 showed a decreasing trend from December 2019 to March 2020, with an average total change of 4082.36 mol/m[2] and 167.25 mol/m[2], and an average total change rate of 4.80% and 38.11%, respectively. SO2 did not have a significant decrease. This is inextricably linked to the reduction of human travel production activities with the implementation of the home quarantine policy. (2) The spatial and temporal variation of AOD was similar to that of pollutants, but showed a significant increase in January 2020, with an average total amount increase of 1.69 × 10[7] up about 2.54% from December 2019 to March 2020. This is attributed to urban heating and the reduction of pollutants such as NOx. (3) Pollutants and AOD were significantly correlated with meteorological data (e.g., average temperature, average humidity, average wind speed, average precipitation, etc.). This study provides data support for atmospheric protection and air quality monitoring in Shandong Province, as well as theoretical basis and technical guidance for policy formulation and urban planning.}, } @article {pmid35096141, year = {2022}, author = {Shenghua, Z and Bader, H and Jue, C}, title = {A Dynamic Equilibrium Mechanism of Core Layer Interests in the Mobile Medical Platform Ecosystem.}, journal = {Applied bionics and biomechanics}, volume = {2022}, number = {}, pages = {8915055}, pmid = {35096141}, issn = {1176-2322}, abstract = {In recent years, with the development of the mobile Internet, big data, and cloud computing, the mobile medical platforms such as Ding Xiang Yuan aggregating platform ecological resources have played an irreplaceable role in improving efficiency, optimizing resource allocation, and even promoting the transformation and upgrading of the medical industry. Despite all this, most mobile medical platforms in China still face many problems, including the immature business model, the stagnation of the interaction of knowledge and information among platform members, and the weak platform competitiveness. Based on a review of the platform and commercial ecosystems, this paper adopts the evolutionary game method and simulation to analyze the evolutionary stability strategy of operators, partners, and users in the core layer of the platform during preflow and postflow periods of a mobile medical platform, hence, to construct a beneficial dynamic equilibrium model of a platform business ecosystem under the optimal decisions made by all parties involved in the platform: the goal in the early stage (preflow period) is to increase platform user flow. Hence, the knowledge/information sharing of platform users is needed to enhance platform's visibility. While in the late period (postflow period), when the platform user flow reaches a certain scale, platform's goal is to promote revenue, which relies mainly on the pricing strategy. It is critical to promote the stability of the platform and the dynamic balance of interests at the core layer in the pricing process. This paper applies the platform business ecosystem theory and the evolutionary game theory to mobile medical platform development, contributing theoretically and practically in the following: (1) providing a more solid theoretical support for the mobile medical platform research and enriching the theoretical framework of the platform business ecosystem; (2) proposing the dynamic equilibrium model based on the optimal decisions of the platform core layers, which help to reveal the inherent law of the evolution of the mobile medical platform; (3) providing policy suggestions and management implications in constructing an appropriate business ecosystem and achieving sustainable development in mobile medical platforms.}, } @article {pmid35096132, year = {2022}, author = {Mustafa, M and Alshare, M and Bhargava, D and Neware, R and Singh, B and Ngulube, P}, title = {Perceived Security Risk Based on Moderating Factors for Blockchain Technology Applications in Cloud Storage to Achieve Secure Healthcare Systems.}, journal = {Computational and mathematical methods in medicine}, volume = {2022}, number = {}, pages = {6112815}, pmid = {35096132}, issn = {1748-6718}, mesh = {Adult ; *Blockchain/standards/statistics & numerical data ; COVID-19/epidemiology ; Cloud Computing/standards/statistics & numerical data ; Computational Biology ; *Computer Security/standards/statistics & numerical data ; Computer Simulation ; *Delivery of Health Care/standards/statistics & numerical data ; *Electronic Health Records/standards/statistics & numerical data ; Female ; Humans ; Male ; Middle Aged ; Pandemics ; Privacy ; SARS-CoV-2 ; Surveys and Questionnaires ; Young Adult ; }, abstract = {Due to the high amount of electronic health records, hospitals have prioritized data protection. Because it uses parallel computing and is distributed, the security of the cloud cannot be guaranteed. Because of the large number of e-health records, hospitals have made data security a major concern. The cloud's security cannot be guaranteed because it uses parallel processing and is distributed. The blockchain (BC) has been deployed in the cloud to preserve and secure medical data because it is particularly prone to security breaches and attacks such as forgery, manipulation, and privacy leaks. An overview of blockchain (BC) technology in cloud storage to improve healthcare system security can be obtained by reading this paper. First, we will look at the benefits and drawbacks of using a basic cloud storage system. After that, a brief overview of blockchain cloud storage technology will be offered. Many researches have focused on using blockchain technology in healthcare systems as a possible solution to the security concerns in healthcare, resulting in tighter and more advanced security requirements being provided. This survey could lead to a blockchain-based solution for the protection of cloud-outsourced healthcare data. Evaluation and comparison of the simulation tests of the offered blockchain technology-focused studies can demonstrate integrity verification with cloud storage and medical data, data interchange with reduced computational complexity, security, and privacy protection. Because of blockchain and IT, business warfare has emerged, and governments in the Middle East have embraced it. Thus, this research focused on the qualities that influence customers' interest in and approval of blockchain technology in cloud storage for healthcare system security and the aspects that increase people's knowledge of blockchain. One way to better understand how people feel about learning how to use blockchain technology in healthcare is through the United Theory of Acceptance and Use of Technology (UTAUT). A snowball sampling method was used to select respondents in an online poll to gather data about blockchain technology in Middle Eastern poor countries. A total of 443 randomly selected responses were tested using SPSS. Blockchain adoption has been shown to be influenced by anticipation, effort expectancy, social influence (SI), facilitation factors, personal innovativeness (PInn), and a perception of security risk (PSR). Blockchain adoption and acceptance were found to be influenced by anticipation, effort expectancy, social influence (SI), facilitating conditions, personal innovativeness (PInn), and perceived security risk (PSR) during the COVID-19 pandemic, as well as providing an overview of current trends in the field and issues pertaining to significance and compatibility.}, } @article {pmid35095192, year = {2022}, author = {Elawady, M and Sarhan, A and Alshewimy, MAM}, title = {Toward a mixed reality domain model for time-Sensitive applications using IoE infrastructure and edge computing (MRIoEF).}, journal = {The Journal of supercomputing}, volume = {78}, number = {8}, pages = {10656-10689}, pmid = {35095192}, issn = {0920-8542}, abstract = {Mixed reality (MR) is one of the technologies with many challenges in the design and implementation phases, especially the problems associated with time-sensitive applications. The main objective of this paper is to introduce a conceptual model for MR application that gives MR application a new layer of interactivity by using Internet of things/Internet of everything models, which provide an improved quality of experience for end-users. The model supports the cloud and fog compute layers to give more functionalities that need more processing resources and reduce the latency problems for time-sensitive applications. Validation of the proposed model is performed via demonstrating a prototype of the model applied to a real-time case study and discussing how to enable standard technologies of the various components in the model. Moreover, it shows the applicability of the model, the ease of defining the roles, and the coherence of data or processes found in the most common applications.}, } @article {pmid35087583, year = {2022}, author = {M Abd El-Aziz, R and Alanazi, R and R Shahin, O and Elhadad, A and Abozeid, A and I Taloba, A and Alshalabi, R}, title = {An Effective Data Science Technique for IoT-Assisted Healthcare Monitoring System with a Rapid Adoption of Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {7425846}, pmid = {35087583}, issn = {1687-5273}, mesh = {*Cloud Computing ; Data Science ; Delivery of Health Care ; Electrocardiography ; Humans ; *Internet of Things ; }, abstract = {Patients are required to be observed and treated continually in some emergency situations. However, due to time constraints, visiting the hospital to execute such tasks is challenging. This can be achieved using a remote healthcare monitoring system. The proposed system introduces an effective data science technique for IoT supported healthcare monitoring system with the rapid adoption of cloud computing that enhances the efficiency of data processing and the accessibility of data in the cloud. Many IoT sensors are employed, which collect real healthcare data. These data are retained in the cloud for the processing of data science. In the Healthcare Monitoring-Data Science Technique (HM-DST), initially, an altered data science technique is introduced. This algorithm is known as the Improved Pigeon Optimization (IPO) algorithm, which is employed for grouping the stored data in the cloud, which helps in improving the prediction rate. Next, the optimum feature selection technique for extraction and selection of features is illustrated. A Backtracking Search-Based Deep Neural Network (BS-DNN) is utilized for classifying human healthcare. The proposed system's performance is finally examined with various healthcare datasets of real time and the variations are observed with the available smart healthcare systems for monitoring.}, } @article {pmid35085445, year = {2022}, author = {Verdu, E and Nieto, YV and Saleem, N}, title = {Call for Special Issue Papers: Cloud Computing and Big Data for Cognitive IoT.}, journal = {Big data}, volume = {10}, number = {1}, pages = {83-84}, doi = {10.1089/big.2021.29048.cfp2}, pmid = {35085445}, issn = {2167-647X}, } @article {pmid35082445, year = {2022}, author = {Edgar, RC and Taylor, B and Lin, V and Altman, T and Barbera, P and Meleshko, D and Lohr, D and Novakovsky, G and Buchfink, B and Al-Shayeb, B and Banfield, JF and de la Peña, M and Korobeynikov, A and Chikhi, R and Babaian, A}, title = {Petabase-scale sequence alignment catalyses viral discovery.}, journal = {Nature}, volume = {602}, number = {7895}, pages = {142-147}, pmid = {35082445}, issn = {1476-4687}, mesh = {Animals ; Archives ; Bacteriophages/enzymology/genetics ; Biodiversity ; *Cloud Computing ; Coronavirus/classification/enzymology/genetics ; *Databases, Genetic ; Evolution, Molecular ; Hepatitis Delta Virus/enzymology/genetics ; Humans ; Models, Molecular ; RNA Viruses/classification/enzymology/*genetics/*isolation & purification ; RNA-Dependent RNA Polymerase/chemistry/genetics ; Sequence Alignment/*methods ; Software ; Virology/*methods ; Virome/*genetics ; }, abstract = {Public databases contain a planetary collection of nucleic acid sequences, but their systematic exploration has been inhibited by a lack of efficient methods for searching this corpus, which (at the time of writing) exceeds 20 petabases and is growing exponentially[1]. Here we developed a cloud computing infrastructure, Serratus, to enable ultra-high-throughput sequence alignment at the petabase scale. We searched 5.7 million biologically diverse samples (10.2 petabases) for the hallmark gene RNA-dependent RNA polymerase and identified well over 10[5] novel RNA viruses, thereby expanding the number of known species by roughly an order of magnitude. We characterized novel viruses related to coronaviruses, hepatitis delta virus and huge phages, respectively, and analysed their environmental reservoirs. To catalyse the ongoing revolution of viral discovery, we established a free and comprehensive database of these data and tools. Expanding the known sequence diversity of viruses can reveal the evolutionary origins of emerging pathogens and improve pathogen surveillance for the anticipation and mitigation of future pandemics.}, } @article {pmid35079199, year = {2022}, author = {Hassan, MR and Ismail, WN and Chowdhury, A and Hossain, S and Huda, S and Hassan, MM}, title = {A framework of genetic algorithm-based CNN on multi-access edge computing for automated detection of COVID-19.}, journal = {The Journal of supercomputing}, volume = {78}, number = {7}, pages = {10250-10274}, pmid = {35079199}, issn = {0920-8542}, abstract = {This paper designs and develops a computational intelligence-based framework using convolutional neural network (CNN) and genetic algorithm (GA) to detect COVID-19 cases. The framework utilizes a multi-access edge computing technology such that end-user can access available resources as well the CNN on the cloud. Early detection of COVID-19 can improve treatment and mitigate transmission. During peaks of infection, hospitals worldwide have suffered from heavy patient loads, bed shortages, inadequate testing kits and short-staffing problems. Due to the time-consuming nature of the standard RT-PCR test, the lack of expert radiologists, and evaluation issues relating to poor quality images, patients with severe conditions are sometimes unable to receive timely treatment. It is thus recommended to incorporate computational intelligence methodologies, which provides highly accurate detection in a matter of minutes, alongside traditional testing as an emergency measure. CNN has achieved extraordinary performance in numerous computational intelligence tasks. However, finding a systematic, automatic and optimal set of hyperparameters for building an efficient CNN for complex tasks remains challenging. Moreover, due to advancement of technology, data are collected at sparse location and hence accumulation of data from such a diverse sparse location poses a challenge. In this article, we propose a framework of computational intelligence-based algorithm that utilize the recent 5G mobile technology of multi-access edge computing along with a new CNN-model for automatic COVID-19 detection using raw chest X-ray images. This algorithm suggests that anyone having a 5G device (e.g., 5G mobile phone) should be able to use the CNN-based automatic COVID-19 detection tool. As part of the proposed automated model, the model introduces a novel CNN structure with the genetic algorithm (GA) for hyperparameter tuning. One such combination of GA and CNN is new in the application of COVID-19 detection/classification. The experimental results show that the developed framework could classify COVID-19 X-ray images with 98.48% accuracy which is higher than any of the performances achieved by other studies.}, } @article {pmid35079189, year = {2022}, author = {Nezami, M and Tuli, KR and Dutta, S}, title = {Shareholder wealth implications of software firms' transition to cloud computing: a marketing perspective.}, journal = {Journal of the Academy of Marketing Science}, volume = {50}, number = {3}, pages = {538-562}, pmid = {35079189}, issn = {0092-0703}, abstract = {Moving into cloud computing represents a major marketing shift because it replaces on-premises offerings requiring large, up-front payments with hosted computing resources made available on-demand on a pay-per-use pricing scheme. However, little is known about the effect of this shift on cloud vendors' financial performance. This study draws on a longitudinal data set of 435 publicly listed business-to-business (B2B) firms within the computer software and services industries to investigate, from the vendors' perspective, the shareholder wealth effect of transitioning to the cloud. Using a value relevance model, we find that an unanticipated increase in the cloud ratio (i.e., the share of a firm's revenues from cloud computing) has a positive and significant effect on excess stock returns; and it has a negative and significant effect on idiosyncratic risk. Yet these effects vary across market structures and firms. In particular, unanticipated increases in market maturity intensify the positive effect of moving into the cloud on excess stock returns. Further, unexpected increases in advertising intensity strengthen the negative effect of shifting to the cloud on idiosyncratic risk.}, } @article {pmid35076283, year = {2023}, author = {Badshah, A and Jalal, A and Farooq, U and Rehman, GU and Band, SS and Iwendi, C}, title = {Service Level Agreement Monitoring as a Service: An Independent Monitoring Service for Service Level Agreements in Clouds.}, journal = {Big data}, volume = {11}, number = {5}, pages = {339-354}, doi = {10.1089/big.2021.0274}, pmid = {35076283}, issn = {2167-647X}, mesh = {*Artificial Intelligence ; Computer Simulation ; *Cloud Computing ; Internet ; Commerce ; }, abstract = {The cloud network is rapidly growing due to a massive increase in interconnected devices and the emergence of different technologies such as the Internet of things, fog computing, and artificial intelligence. In response, cloud computing needs reliable dealings among the service providers, brokers, and consumers. The existing cloud monitoring frameworks such as Amazon Cloud Watch, Paraleap Azure Watch, and Rack Space Cloud Kick work under the control of service providers. They work fine; however, this may create dissatisfaction among customers over Service Level Agreement (SLA) violations. Customers' dissatisfaction may drastically reduce the businesses of service providers. To cope with the earlier mentioned issue and get in line with cloud philosophy, Monitoring as a Service (MaaS), completely independent in nature, is needed for observing and regulating the cloud businesses. However, the existing MaaS frameworks do not address the comprehensive SLA for customer satisfaction and penalties management. This article proposes a reliable framework for monitoring the provider's services by adopting third-party monitoring services with clearcut SLA and penalties management. Since this framework monitors SLA as a cloud monitoring service, it is named as SLA-MaaS. On violations, it penalizes those who are found in breach of terms and condition enlisted in SLA. Simulation results confirmed that the proposed framework adequately satisfies the customers (as well as service providers). This helps in developing a trustworthy relationship among cloud partners and increases customer attention and retention.}, } @article {pmid35070169, year = {2022}, author = {Pezoulas, VC and Goules, A and Kalatzis, F and Chatzis, L and Kourou, KD and Venetsanopoulou, A and Exarchos, TP and Gandolfo, S and Votis, K and Zampeli, E and Burmeister, J and May, T and Marcelino Pérez, M and Lishchuk, I and Chondrogiannis, T and Andronikou, V and Varvarigou, T and Filipovic, N and Tsiknakis, M and Baldini, C and Bombardieri, M and Bootsma, H and Bowman, SJ and Soyfoo, MS and Parisis, D and Delporte, C and Devauchelle-Pensec, V and Pers, JO and Dörner, T and Bartoloni, E and Gerli, R and Giacomelli, R and Jonsson, R and Ng, WF and Priori, R and Ramos-Casals, M and Sivils, K and Skopouli, F and Torsten, W and A G van Roon, J and Xavier, M and De Vita, S and Tzioufas, AG and Fotiadis, DI}, title = {Addressing the clinical unmet needs in primary Sjögren's Syndrome through the sharing, harmonization and federated analysis of 21 European cohorts.}, journal = {Computational and structural biotechnology journal}, volume = {20}, number = {}, pages = {471-484}, pmid = {35070169}, issn = {2001-0370}, abstract = {For many decades, the clinical unmet needs of primary Sjögren's Syndrome (pSS) have been left unresolved due to the rareness of the disease and the complexity of the underlying pathogenic mechanisms, including the pSS-associated lymphomagenesis process. Here, we present the HarmonicSS cloud-computing exemplar which offers beyond the state-of-the-art data analytics services to address the pSS clinical unmet needs, including the development of lymphoma classification models and the identification of biomarkers for lymphomagenesis. The users of the platform have been able to successfully interlink, curate, and harmonize 21 regional, national, and international European cohorts of 7,551 pSS patients with respect to the ethical and legal issues for data sharing. Federated AI algorithms were trained across the harmonized databases, with reduced execution time complexity, yielding robust lymphoma classification models with 85% accuracy, 81.25% sensitivity, 85.4% specificity along with 5 biomarkers for lymphoma development. To our knowledge, this is the first GDPR compliant platform that provides federated AI services to address the pSS clinical unmet needs.}, } @article {pmid35069719, year = {2022}, author = {Li, W}, title = {Big Data Precision Marketing Approach under IoT Cloud Platform Information Mining.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {4828108}, pmid = {35069719}, issn = {1687-5273}, mesh = {*Big Data ; *Cloud Computing ; Data Mining ; Humans ; Marketing ; Technology ; }, abstract = {In this article, an in-depth study and analysis of the precision marketing approach are carried out by building an IoT cloud platform and then using the technology of big data information mining. The cloud platform uses the MySQL database combined with the MongoDB database to store the cloud platform data to ensure the correct storage of data as well as to improve the access speed of data. The storage method of IoT temporal data is optimized, and the way of storing data in time slots is used to improve the efficiency of reading large amounts of data. For the scalability of the IoT data storage system, a MongoDB database clustering scheme is designed to ensure the scalability of data storage and disaster recovery capability. The relevant theories of big data marketing are reviewed and analyzed; secondly, based on the relevant theories, combined with the author's work experience and relevant information, a comprehensive analysis and research on the current situation of big data marketing are conducted, focusing on its macro-, micro-, and industry environment. The service model combines the types of user needs, encapsulates the resources obtained by the alliance through data mining for service products, and publishes and delivers them in the form of data products. From the perspective of the development of the telecommunications industry, in terms of technology, the telecommunications industry has seen the development trend of mobile replacing fixed networks and triple play. The development of emerging technologies represented by the Internet of Things and cloud computing has also led to technological changes in the telecommunications industry. Operators are facing new development opportunities and challenges. It also divides the service mode into self-service and consulting service mode according to the different degrees of users' cognition and understanding of the service, as well as proposes standardized data mining service guarantee from two aspects: after-sales service and operation supervision. A customized data mining service is a kind of data mining service for users' personalized needs. And the intelligent data mining service guarantee is proposed from two aspects of multicase experience integration and group intelligence. In the empirical research part, the big data alliance in Big Data Industry Alliance, which provides data mining service as the main business, is selected as the research object, and the data mining service model of the big data alliance proposed in this article is applied to the actual alliance to verify the scientific and rationality of the data mining service model and improve the data mining service model management system.}, } @article {pmid35064372, year = {2022}, author = {Egger, J and Wild, D and Weber, M and Bedoya, CAR and Karner, F and Prutsch, A and Schmied, M and Dionysio, C and Krobath, D and Jin, Y and Gsaxner, C and Li, J and Pepe, A}, title = {Studierfenster: an Open Science Cloud-Based Medical Imaging Analysis Platform.}, journal = {Journal of digital imaging}, volume = {35}, number = {2}, pages = {340-355}, pmid = {35064372}, issn = {1618-727X}, mesh = {*Cloud Computing ; Humans ; *Image Processing, Computer-Assisted ; Magnetic Resonance Imaging ; Neural Networks, Computer ; Tomography, X-Ray Computed ; }, abstract = {Imaging modalities such as computed tomography (CT) and magnetic resonance imaging (MRI) are widely used in diagnostics, clinical studies, and treatment planning. Automatic algorithms for image analysis have thus become an invaluable tool in medicine. Examples of this are two- and three-dimensional visualizations, image segmentation, and the registration of all anatomical structure and pathology types. In this context, we introduce Studierfenster (www.studierfenster.at): a free, non-commercial open science client-server framework for (bio-)medical image analysis. Studierfenster offers a wide range of capabilities, including the visualization of medical data (CT, MRI, etc.) in two-dimensional (2D) and three-dimensional (3D) space in common web browsers, such as Google Chrome, Mozilla Firefox, Safari, or Microsoft Edge. Other functionalities are the calculation of medical metrics (dice score and Hausdorff distance), manual slice-by-slice outlining of structures in medical images, manual placing of (anatomical) landmarks in medical imaging data, visualization of medical data in virtual reality (VR), and a facial reconstruction and registration of medical data for augmented reality (AR). More sophisticated features include the automatic cranial implant design with a convolutional neural network (CNN), the inpainting of aortic dissections with a generative adversarial network, and a CNN for automatic aortic landmark detection in CT angiography images. A user study with medical and non-medical experts in medical image analysis was performed, to evaluate the usability and the manual functionalities of Studierfenster. When participants were asked about their overall impression of Studierfenster in an ISO standard (ISO-Norm) questionnaire, a mean of 6.3 out of 7.0 possible points were achieved. The evaluation also provided insights into the results achievable with Studierfenster in practice, by comparing these with two ground truth segmentations performed by a physician of the Medical University of Graz in Austria. In this contribution, we presented an online environment for (bio-)medical image analysis. In doing so, we established a client-server-based architecture, which is able to process medical data, especially 3D volumes. Our online environment is not limited to medical applications for humans. Rather, its underlying concept could be interesting for researchers from other fields, in applying the already existing functionalities or future additional implementations of further image processing applications. An example could be the processing of medical acquisitions like CT or MRI from animals [Clinical Pharmacology & Therapeutics, 84(4):448-456, 68], which get more and more common, as veterinary clinics and centers get more and more equipped with such imaging devices. Furthermore, applications in entirely non-medical research in which images/volumes need to be processed are also thinkable, such as those in optical measuring techniques, astronomy, or archaeology.}, } @article {pmid35062619, year = {2022}, author = {Avgeris, M and Spatharakis, D and Dechouniotis, D and Leivadeas, A and Karyotis, V and Papavassiliou, S}, title = {ENERDGE: Distributed Energy-Aware Resource Allocation at the Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062619}, issn = {1424-8220}, support = {CHIST-ERA-18-SDCDN-003//CHIST-ERA-2018-DRUID-NET project/ ; }, abstract = {Mobile applications are progressively becoming more sophisticated and complex, increasing their computational requirements. Traditional offloading approaches that use exclusively the Cloud infrastructure are now deemed unsuitable due to the inherent associated delay. Edge Computing can address most of the Cloud limitations at the cost of limited available resources. This bottleneck necessitates an efficient allocation of offloaded tasks from the mobile devices to the Edge. In this paper, we consider a task offloading setting with applications of different characteristics and requirements, and propose an optimal resource allocation framework leveraging the amalgamation of the edge resources. To balance the trade-off between retaining low total energy consumption, respecting end-to-end delay requirements and load balancing at the Edge, we additionally introduce a Markov Random Field based mechanism for the distribution of the excess workload. The proposed approach investigates a realistic scenario, including different categories of mobile applications, edge devices with different computational capabilities, and dynamic wireless conditions modeled by the dynamic behavior and mobility of the users. The framework is complemented with a prediction mechanism that facilitates the orchestration of the physical resources. The efficiency of the proposed scheme is evaluated via modeling and simulation and is shown to outperform a well-known task offloading solution, as well as a more recent one.}, } @article {pmid35062563, year = {2022}, author = {Pardeshi, MS and Sheu, RK and Yuan, SM}, title = {Hash-Chain Fog/Edge: A Mode-Based Hash-Chain for Secured Mutual Authentication Protocol Using Zero-Knowledge Proofs in Fog/Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062563}, issn = {1424-8220}, abstract = {Authentication is essential for the prevention of various types of attacks in fog/edge computing. Therefore, a novel mode-based hash chain for secure mutual authentication is necessary to address the Internet of Things (IoT) devices' vulnerability, as there have been several years of growing concerns regarding their security. Therefore, a novel model is designed that is stronger and effective against any kind of unauthorized attack, as IoT devices' vulnerability is on the rise due to the mass production of IoT devices (embedded processors, camera, sensors, etc.), which ignore the basic security requirements (passwords, secure communication), making them vulnerable and easily accessible. Furthermore, crackable passwords indicate that the security measures taken are insufficient. As per the recent studies, several applications regarding its requirements are the IoT distributed denial of service attack (IDDOS), micro-cloud, secure university, Secure Industry 4.0, secure government, secure country, etc. The problem statement is formulated as the "design and implementation of dynamically interconnecting fog servers and edge devices using the mode-based hash chain for secure mutual authentication protocol", which is stated to be an NP-complete problem. The hash-chain fog/edge implementation using timestamps, mode-based hash chaining, the zero-knowledge proof property, a distributed database/blockchain, and cryptography techniques can be utilized to establish the connection of smart devices in large numbers securely. The hash-chain fog/edge uses blockchain for identity management only, which is used to store the public keys in distributed ledger form, and all these keys are immutable. In addition, it has no overhead and is highly secure as it performs fewer calculations and requires minimum infrastructure. Therefore, we designed the hash-chain fog/edge (HCFE) protocol, which provides a novel mutual authentication scheme for effective session key agreement (using ZKP properties) with secure protocol communications. The experiment outcomes proved that the hash-chain fog/edge is more efficient at interconnecting various devices and competed favorably in the benchmark comparison.}, } @article {pmid35062426, year = {2022}, author = {Krivic, P and Kusek, M and Cavrak, I and Skocir, P}, title = {Dynamic Scheduling of Contextually Categorised Internet of Things Services in Fog Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062426}, issn = {1424-8220}, mesh = {Algorithms ; Cloud Computing ; *Internet of Things ; Reproducibility of Results ; }, abstract = {Fog computing emerged as a concept that responds to the requirements of upcoming solutions requiring optimizations primarily in the context of the following QoS parameters: latency, throughput, reliability, security, and network traffic reduction. The rapid development of local computing devices and container-based virtualization enabled the application of fog computing within the IoT environment. However, it is necessary to utilize algorithm-based service scheduling that considers the targeted QoS parameters to optimize the service performance and reach the potential of the fog computing concept. In this paper, we first describe our categorization of IoT services that affects the execution of our scheduling algorithm. Secondly, we propose our scheduling algorithm that considers the context of processing devices, user context, and service context to determine the optimal schedule for the execution of service components across the distributed fog-to-cloud environment. The conducted simulations confirmed the performance of the proposed algorithm and showcased its major contribution-dynamic scheduling, i.e., the responsiveness to the volatile QoS parameters due to changeable network conditions. Thus, we successfully demonstrated that our dynamic scheduling algorithm enhances the efficiency of service performance based on the targeted QoS criteria of the specific service scenario.}, } @article {pmid35062410, year = {2022}, author = {Abreha, HG and Hayajneh, M and Serhani, MA}, title = {Federated Learning in Edge Computing: A Systematic Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {2}, pages = {}, pmid = {35062410}, issn = {1424-8220}, support = {31R227//Zayed Center for Health Sciences/ ; }, mesh = {*Cloud Computing ; Forecasting ; Humans ; *Privacy ; }, abstract = {Edge Computing (EC) is a new architecture that extends Cloud Computing (CC) services closer to data sources. EC combined with Deep Learning (DL) is a promising technology and is widely used in several applications. However, in conventional DL architectures with EC enabled, data producers must frequently send and share data with third parties, edge or cloud servers, to train their models. This architecture is often impractical due to the high bandwidth requirements, legalization, and privacy vulnerabilities. The Federated Learning (FL) concept has recently emerged as a promising solution for mitigating the problems of unwanted bandwidth loss, data privacy, and legalization. FL can co-train models across distributed clients, such as mobile phones, automobiles, hospitals, and more, through a centralized server, while maintaining data localization. FL can therefore be viewed as a stimulating factor in the EC paradigm as it enables collaborative learning and model optimization. Although the existing surveys have taken into account applications of FL in EC environments, there has not been any systematic survey discussing FL implementation and challenges in the EC paradigm. This paper aims to provide a systematic survey of the literature on the implementation of FL in EC environments with a taxonomy to identify advanced solutions and other open problems. In this survey, we review the fundamentals of EC and FL, then we review the existing related works in FL in EC. Furthermore, we describe the protocols, architecture, framework, and hardware requirements for FL implementation in the EC environment. Moreover, we discuss the applications, challenges, and related existing solutions in the edge FL. Finally, we detail two relevant case studies of applying FL in EC, and we identify open issues and potential directions for future research. We believe this survey will help researchers better understand the connection between FL and EC enabling technologies and concepts.}, } @article {pmid35062202, year = {2022}, author = {Crisan-Vida, M and Golea, I and Bogdan, R and Stoicu-Tivadar, L}, title = {Application Using Standard Communication Between Medical Facilities.}, journal = {Studies in health technology and informatics}, volume = {289}, number = {}, pages = {498-499}, doi = {10.3233/SHTI210969}, pmid = {35062202}, issn = {1879-8365}, mesh = {Communication ; Delivery of Health Care ; *Electronic Health Records ; *Health Level Seven ; Humans ; Software ; }, abstract = {The web-based application described in this paper will support the patient to receive the treatment quicker and the physician to generate the prescription easier. The patient will have a real time information if their treatment/prescription is available in the pharmacy. Using a cloud solution will have all the information always available and without delays, the only requirement is the Internet connectivity. Using standardized communication as HL7 FHIR, the information exchanged is easier understanded by different medical units, and in the future other medical units can access the patient treatment/prescription and have a medical history, in this way the patient will receive better quality in treatment and health care.}, } @article {pmid35062191, year = {2022}, author = {Gallos, P and Menychtas, A and Panagopoulos, C and Bimpas, M and Maglogiannis, I}, title = {Quantifying Citizens' Well-Being in Areas with Natural Based Solutions Using Mobile Computing.}, journal = {Studies in health technology and informatics}, volume = {289}, number = {}, pages = {465-468}, doi = {10.3233/SHTI210958}, pmid = {35062191}, issn = {1879-8365}, mesh = {Cities ; Healthy Lifestyle ; Hot Temperature ; *Mobile Applications ; *Telemedicine ; }, abstract = {Urban planners, architects and civil engineers are integrating Nature-Based Solutions (NBS) to address contemporary environmental, social, health and economic challenges. Many studies claim that NBS are poised to improve citizens' well-being in urban areas. NBS can also benefit Public Health, as they can contribute to optimising environmental parameters (such as urban heat island effects, floods, etc.), as well as to the reduction of diseases, as for example cardiovascular ones and the overall mortality rate. In addition, the usage of mobile health (mHealth) solutions has been broadly applied to support citizens' well-being as they can offer monitoring of their physical and physiological status and promote a healthier lifestyle. The aim of this paper is to present the specifications, the design and the development of a mobile app for monitoring citizens' well-being in areas where NBS have been applied. The users' physical activity and vital signs are recorded by wearable devices and the users' locations are recorded by the proposed mobile application. All collected data are transferred to the cloud platform where data management mechanisms aggregate data from different sources for combined analysis. The mobile application is currently available for Android and iOS devices and it is compatible with most smart devices and wearables. The "euPOLIS by BioAssist" application can be used as a health and other data collection tool to investigate citizen's well-being improvement in areas with NBS.}, } @article {pmid35061658, year = {2022}, author = {Schnase, JL and Carroll, ML}, title = {Automatic variable selection in ecological niche modeling: A case study using Cassin's Sparrow (Peucaea cassinii).}, journal = {PloS one}, volume = {17}, number = {1}, pages = {e0257502}, pmid = {35061658}, issn = {1932-6203}, mesh = {*Ecosystem ; }, abstract = {MERRA/Max provides a feature selection approach to dimensionality reduction that enables direct use of global climate model outputs in ecological niche modeling. The system accomplishes this reduction through a Monte Carlo optimization in which many independent MaxEnt runs, operating on a species occurrence file and a small set of randomly selected variables in a large collection of variables, converge on an estimate of the top contributing predictors in the larger collection. These top predictors can be viewed as potential candidates in the variable selection step of the ecological niche modeling process. MERRA/Max's Monte Carlo algorithm operates on files stored in the underlying filesystem, making it scalable to large data sets. Its software components can run as parallel processes in a high-performance cloud computing environment to yield near real-time performance. In tests using Cassin's Sparrow (Peucaea cassinii) as the target species, MERRA/Max selected a set of predictors from Worldclim's Bioclim collection of 19 environmental variables that have been shown to be important determinants of the species' bioclimatic niche. It also selected biologically and ecologically plausible predictors from a more diverse set of 86 environmental variables derived from NASA's Modern-Era Retrospective Analysis for Research and Applications Version 2 (MERRA-2) reanalysis, an output product of the Goddard Earth Observing System Version 5 (GEOS-5) modeling system. We believe these results point to a technological approach that could expand the use global climate model outputs in ecological niche modeling, foster exploratory experimentation with otherwise difficult-to-use climate data sets, streamline the modeling process, and, eventually, enable automated bioclimatic modeling as a practical, readily accessible, low-cost, commercial cloud service.}, } @article {pmid35060754, year = {2022}, author = {Krampis, K}, title = {Democratizing bioinformatics through easily accessible software platforms for non-experts in the field.}, journal = {BioTechniques}, volume = {72}, number = {2}, pages = {36-38}, pmid = {35060754}, issn = {1940-9818}, support = {U54 CA221704/CA/NCI NIH HHS/United States ; U54 CA221705/CA/NCI NIH HHS/United States ; }, mesh = {*Computational Biology ; Genomics ; *Software ; }, } @article {pmid35058976, year = {2022}, author = {Zhang, J and Li, T and Jiang, Q and Ma, J}, title = {Enabling efficient traceable and revocable time-based data sharing in smart city.}, journal = {EURASIP journal on wireless communications and networking}, volume = {2022}, number = {1}, pages = {3}, pmid = {35058976}, issn = {1687-1472}, abstract = {With the assistance of emerging techniques, such as cloud computing, fog computing and Internet of Things (IoT), smart city is developing rapidly into a novel and well-accepted service pattern these days. The trend also facilitates numerous relevant applications, e.g., smart health care, smart office, smart campus, etc., and drives the urgent demand for data sharing. However, this brings many concerns on data security as there is more private and sensitive information contained in the data of smart city applications. It may incur disastrous consequences if the shared data are illegally accessed, which necessitates an efficient data access control scheme for data sharing in smart city applications with resource-poor user terminals. To this end, we proposes an efficient traceable and revocable time-based CP-ABE (TR-TABE) scheme which can achieve time-based and fine-grained data access control over large attribute universe for data sharing in large-scale smart city applications. To trace and punish the malicious users that intentionally leak their keys to pursue illicit profits, we design an efficient user tracing and revocation mechanism with forward and backward security. For efficiency improvement, we integrate outsourced decryption and verify the correctness of its result. The proposed scheme is proved secure with formal security proof and is demonstrated to be practical for data sharing in smart city applications with extensive performance evaluation.}, } @article {pmid35052084, year = {2021}, author = {Balicki, J}, title = {Many-Objective Quantum-Inspired Particle Swarm Optimization Algorithm for Placement of Virtual Machines in Smart Computing Cloud.}, journal = {Entropy (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {35052084}, issn = {1099-4300}, support = {0b241de88b60bf04//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 2555c92a616617fe//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 468b476fc32fb800//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 7266125be481efdf//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; bdf736e2e2ff48a7//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; ad3a4560dcf5b889//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; bbc3447ea3f36d12//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 3ff49a7a7655c658//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; c2cd01ef141cd4c1//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; d29efa1a53c86b9f//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 7fc62d47fb31935b//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; a59d04f6891c30b7//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; fab425d0ee521689//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; eece9460d518b025//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; cedad61ffdeda3d3//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 6706376c39a87747//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 571a18daaa84fb1e//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 967271458f9e94eb//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; 30601201c3d85c5b//Multidisciplinary Digital Publishing Institute (Switzerland)/ ; }, abstract = {Particle swarm optimization algorithm (PSO) is an effective metaheuristic that can determine Pareto-optimal solutions. We propose an extended PSO by introducing quantum gates in order to ensure the diversity of particle populations that are looking for efficient alternatives. The quality of solutions was verified in the issue of assignment of resources in the computing cloud to improve the live migration of virtual machines. We consider the multi-criteria optimization problem of deep learning-based models embedded into virtual machines. Computing clouds with deep learning agents can support several areas of education, smart city or economy. Because deep learning agents require lots of computer resources, seven criteria are studied such as electric power of hosts, reliability of cloud, CPU workload of the bottleneck host, communication capacity of the critical node, a free RAM capacity of the most loaded memory, a free disc memory capacity of the most busy storage, and overall computer costs. Quantum gates modify an accepted position for the current location of a particle. To verify the above concept, various simulations have been carried out on the laboratory cloud based on the OpenStack platform. Numerical experiments have confirmed that multi-objective quantum-inspired particle swarm optimization algorithm provides better solutions than the other metaheuristics.}, } @article {pmid35047635, year = {2022}, author = {Kasinathan, G and Jayakumar, S}, title = {Cloud-Based Lung Tumor Detection and Stage Classification Using Deep Learning Techniques.}, journal = {BioMed research international}, volume = {2022}, number = {}, pages = {4185835}, pmid = {35047635}, issn = {2314-6141}, mesh = {*Cloud Computing ; *Databases, Factual ; *Deep Learning ; Humans ; Lung Neoplasms/*diagnostic imaging ; Neoplasm Staging ; *Positron Emission Tomography Computed Tomography ; *Radiographic Image Interpretation, Computer-Assisted ; }, abstract = {Artificial intelligence (AI), Internet of Things (IoT), and the cloud computing have recently become widely used in the healthcare sector, which aid in better decision-making for a radiologist. PET imaging or positron emission tomography is one of the most reliable approaches for a radiologist to diagnosing many cancers, including lung tumor. In this work, we proposed stage classification of lung tumor which is a more challenging task in computer-aided diagnosis. As a result, a modified computer-aided diagnosis is being considered as a way to reduce the heavy workloads and second opinion to radiologists. In this paper, we present a strategy for classifying and validating different stages of lung tumor progression, as well as a deep neural model and data collection using cloud system for categorizing phases of pulmonary illness. The proposed system presents a Cloud-based Lung Tumor Detector and Stage Classifier (Cloud-LTDSC) as a hybrid technique for PET/CT images. The proposed Cloud-LTDSC initially developed the active contour model as lung tumor segmentation, and multilayer convolutional neural network (M-CNN) for classifying different stages of lung cancer has been modelled and validated with standard benchmark images. The performance of the presented technique is evaluated using a benchmark image LIDC-IDRI dataset of 50 low doses and also utilized the lung CT DICOM images. Compared with existing techniques in the literature, our proposed method achieved good result for the performance metrics accuracy, recall, and precision evaluated. Under numerous aspects, our proposed approach produces superior outcomes on all of the applied dataset images. Furthermore, the experimental result achieves an average lung tumor stage classification accuracy of 97%-99.1% and an average of 98.6% which is significantly higher than the other existing techniques.}, } @article {pmid35047153, year = {2022}, author = {Syed, SA and Sheela Sobana Rani, K and Mohammad, GB and Anil Kumar, G and Chennam, KK and Jaikumar, R and Natarajan, Y and Srihari, K and Barakkath Nisha, U and Sundramurthy, VP}, title = {Design of Resources Allocation in 6G Cybertwin Technology Using the Fuzzy Neuro Model in Healthcare Systems.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {5691203}, pmid = {35047153}, issn = {2040-2309}, mesh = {*Cloud Computing ; Computer Simulation ; *Delivery of Health Care ; Humans ; Resource Allocation ; Technology ; }, abstract = {In 6G edge communication networks, the machine learning models play a major role in enabling intelligent decision-making in case of optimal resource allocation in case of the healthcare system. However, it causes a bottleneck, in the form of sophisticated memory calculations, between the hidden layers and the cost of communication between the edge devices/edge nodes and the cloud centres, while transmitting the data from the healthcare management system to the cloud centre via edge nodes. In order to reduce these hurdles, it is important to share workloads to further eliminate the problems related to complicated memory calculations and transmission costs. The effort aims mainly to reduce storage costs and cloud computing associated with neural networks as the complexity of the computations increases with increasing numbers of hidden layers. This study modifies federated teaching to function with distributed assignment resource settings as a distributed deep learning model. It improves the capacity to learn from the data and assigns an ideal workload depending on the limited available resources, slow network connection, and more edge devices. Current network status can be sent to the cloud centre by the edge devices and edge nodes autonomously using cybertwin, meaning that local data are often updated to calculate global data. The simulation shows how effective resource management and allocation is better than standard approaches. It is seen from the results that the proposed method achieves higher resource utilization and success rate than existing methods. Index Terms are fuzzy, healthcare, bioinformatics, 6G wireless communication, cybertwin, machine learning, neural network, and edge.}, } @article {pmid35047027, year = {2022}, author = {Raju, KB and Dara, S and Vidyarthi, A and Gupta, VM and Khan, B}, title = {Smart Heart Disease Prediction System with IoT and Fog Computing Sectors Enabled by Cascaded Deep Learning Model.}, journal = {Computational intelligence and neuroscience}, volume = {2022}, number = {}, pages = {1070697}, pmid = {35047027}, issn = {1687-5273}, mesh = {Cloud Computing ; *Deep Learning ; *Heart Diseases ; Humans ; *Internet of Things ; Neural Networks, Computer ; }, abstract = {Chronic illnesses like chronic respiratory disease, cancer, heart disease, and diabetes are threats to humans around the world. Among them, heart disease with disparate features or symptoms complicates diagnosis. Because of the emergence of smart wearable gadgets, fog computing and "Internet of Things" (IoT) solutions have become necessary for diagnosis. The proposed model integrates Edge-Fog-Cloud computing for the accurate and fast delivery of outcomes. The hardware components collect data from different patients. The heart feature extraction from signals is done to get significant features. Furthermore, the feature extraction of other attributes is also gathered. All these features are gathered and subjected to the diagnostic system using an Optimized Cascaded Convolution Neural Network (CCNN). Here, the hyperparameters of CCNN are optimized by the Galactic Swarm Optimization (GSO). Through the performance analysis, the precision of the suggested GSO-CCNN is 3.7%, 3.7%, 3.6%, 7.6%, 67.9%, 48.4%, 33%, 10.9%, and 7.6% more advanced than PSO-CCNN, GWO-CCNN, WOA-CCNN, DHOA-CCNN, DNN, RNN, LSTM, CNN, and CCNN, respectively. Thus, the comparative analysis of the suggested system ensures its efficiency over the conventional models.}, } @article {pmid35043169, year = {2022}, author = {Xie, M and Yang, L and Chen, G and Wang, Y and Xie, Z and Wang, H}, title = {RiboChat: a chat-style web interface for analysis and annotation of ribosome profiling data.}, journal = {Briefings in bioinformatics}, volume = {23}, number = {2}, pages = {}, doi = {10.1093/bib/bbab559}, pmid = {35043169}, issn = {1477-4054}, mesh = {Computational Biology/methods ; *Protein Biosynthesis ; RNA, Messenger/metabolism ; *Ribosomes/genetics/metabolism ; Software ; }, abstract = {The increasing volume of ribosome profiling (Ribo-seq) data, computational complexity of its data processing and operational handicap of related analytical procedures present a daunting set of informatics challenges. These impose a substantial barrier to researchers particularly with no or limited bioinformatics expertise in analyzing and decoding translation information from Ribo-seq data, thus driving the need for a new research paradigm for data computation and information extraction. In this knowledge base, we herein present a novel interactive web platform, RiboChat (https://db.cngb.org/ribobench/chat.html), for direct analyzing and annotating Ribo-seq data in the form of a chat conversation. It consists of a user-friendly web interface and a backend cloud-computing service. When typing a data analysis question into the chat window, the object-text detection module will be run to recognize relevant keywords from the input text. Based on the features identified in the input, individual analytics modules are then scored to find the perfect-matching candidate. The corresponding analytics module will be further executed after checking the completion status of the uploading of datasets and configured parameters. Overall, RiboChat represents an important step forward in the emerging direction of next-generation data analytics and will enable the broad research community to conveniently decipher translation information embedded within Ribo-seq data.}, } @article {pmid35037207, year = {2022}, author = {Wang, L and Lu, Z and Van Buren, P and Ware, D}, title = {SciApps: An Automated Platform for Processing and Distribution of Plant Genomics Data.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2443}, number = {}, pages = {197-209}, pmid = {35037207}, issn = {1940-6029}, mesh = {Computational Biology ; Genome, Plant ; *Genomics/methods ; Information Storage and Retrieval ; *Software ; Workflow ; }, abstract = {SciApps is an open-source, web-based platform for processing, storing, visualizing, and distributing genomic data and analysis results. Built upon the Tapis (formerly Agave) platform, SciApps brings users TB-scale of data storage via CyVerse Data Store and over one million CPUs via the Extreme Science and Engineering Discovery Environment (XSEDE) resources at Texas Advanced Computing Center (TACC). SciApps provides users ways to chain individual jobs into automated and reproducible workflows in a distributed cloud and provides a management system for data, associated metadata, individual analysis jobs, and multi-step workflows. This chapter provides examples of how to (1) submitting, managing, constructing workflows, (2) using public workflows for Bulked Segregant Analysis (BSA), (3) constructing a Data Analysis Center (DAC), and Data Coordination Center (DCC) for the plant ENCODE project.}, } @article {pmid35037200, year = {2022}, author = {Williams, J}, title = {CyVerse for Reproducible Research: RNA-Seq Analysis.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2443}, number = {}, pages = {57-79}, pmid = {35037200}, issn = {1940-6029}, mesh = {*Big Data ; Cloud Computing ; Data Analysis ; RNA-Seq ; Reproducibility of Results ; *Software ; }, abstract = {Posing complex research questions poses complex reproducibility challenges. Datasets may need to be managed over long periods of time. Reliable and secure repositories are needed for data storage. Sharing big data requires advance planning and becomes complex when collaborators are spread across institutions and countries. Many complex analyses require the larger compute resources only provided by cloud and high-performance computing infrastructure. Finally at publication, funder and publisher requirements must be met for data availability and accessibility and computational reproducibility. For all of these reasons, cloud-based cyberinfrastructures are an important component for satisfying the needs of data-intensive research. Learning how to incorporate these technologies into your research skill set will allow you to work with data analysis challenges that are often beyond the resources of individual research institutions. One of the advantages of CyVerse is that there are many solutions for high-powered analyses that do not require knowledge of command line (i.e., Linux) computing. In this chapter we will highlight CyVerse capabilities by analyzing RNA-Seq data. The lessons learned will translate to doing RNA-Seq in other computing environments and will focus on how CyVerse infrastructure supports reproducibility goals (e.g., metadata management, containers), team science (e.g., data sharing features), and flexible computing environments (e.g., interactive computing, scaling).}, } @article {pmid35036553, year = {2022}, author = {Ogwel, B and Odhiambo-Otieno, G and Otieno, G and Abila, J and Omore, R}, title = {Leveraging cloud computing for improved health service delivery: Findings from public health facilities in Kisumu County, Western Kenya-2019.}, journal = {Learning health systems}, volume = {6}, number = {1}, pages = {e10276}, pmid = {35036553}, issn = {2379-6146}, abstract = {INTRODUCTION: Healthcare delivery systems across the world have been shown to fall short of the ideals of being cost-effective and meeting pre-established standards of quality but the problem is more pronounced in Africa. Cloud computing emerges as a platform healthcare institutions could leverage to address these shortfalls. The aim of this study was to establish the extent of cloud computing adoption and its influence on health service delivery by public health facilities in Kisumu County.

METHODS: The study employed a cross-sectional study design in one-time data collection among facility in-charges and health records officers from 57 public health facilities. The target population was 114 healthcare personnel and the sample size (n = 88) was computed using Yamane formula and drawn using stratified random sampling. Poisson regression was used to determine the influence of cloud computing adoption on the number of realized benefits to health service delivery.

RESULTS: Among 80 respondents, Cloud computing had been adopted by 42 (53%) while Software-as-a-Service, Platform-as-a-Service and Infrastructure-as-a-Service implementations were at 100%, 0% and 5% among adopters, respectively. Overall, those who had adopted cloud computing realized a significantly higher number of benefits to health service delivery compared to those who had not (Incident-rate ratio (IRR) =1.93, 95% confidence interval (95% CI) [1.36-2.72]). A significantly higher number of benefits was realized by those who had implemented Infrastructure-as-a-Service alongside Software-as-a-Service (IRR = 2.22, 95% CI [1.15-4.29]) and those who had implemented Software-as-a-Service only (IRR = 1.89, 95% CI [1.33-2.70]) compared to non-adopters. We observed similar results in the stratified analysis looking at economic, operational, and functional benefits to health service delivery.

CONCLUSION: Cloud computing resulted in improved health service delivery with these benefits still being realized irrespective of the service implementation model deployed. The findings buttress the need for healthcare institutions to adopt cloud computing and integrate it in their operations in order to improve health service delivery.}, } @article {pmid35036538, year = {2021}, author = {Li, Y and Li, T and Shen, P and Hao, L and Liu, W and Wang, S and Song, Y and Bao, L}, title = {Sim-DRS: a similarity-based dynamic resource scheduling algorithm for microservice-based web systems.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e824}, pmid = {35036538}, issn = {2376-5992}, abstract = {Microservice-based Web Systems (MWS), which provide a fundamental infrastructure for constructing large-scale cloud-based Web applications, are designed as a set of independent, small and modular microservices implementing individual tasks and communicating with messages. This microservice-based architecture offers great application scalability, but meanwhile incurs complex and reactive autoscaling actions that are performed dynamically and periodically based on current workloads. However, this problem has thus far remained largely unexplored. In this paper, we formulate a problem of Dynamic Resource Scheduling for Microservice-based Web Systems (DRS-MWS) and propose a similarity-based heuristic scheduling algorithm that aims to quickly find viable scheduling schemes by utilizing solutions to similar problems. The performance superiority of the proposed scheduling solution in comparison with three state-of-the-art algorithms is illustrated by experimental results generated through a well-known microservice benchmark on disparate computing nodes in public clouds.}, } @article {pmid35036334, year = {2022}, author = {Hussain, SA and Bassam, NA and Zayegh, A and Ghawi, SA}, title = {Prediction and evaluation of healthy and unhealthy status of COVID-19 patients using wearable device prototype data.}, journal = {MethodsX}, volume = {9}, number = {}, pages = {101618}, pmid = {35036334}, issn = {2215-0161}, abstract = {COVID-19 pandemic seriousness is making the whole world suffer due to inefficient medication and vaccines. The article prediction analysis is carried out with the dataset downloaded from the Application peripheral interface (API) designed explicitly for COVID-19 quarantined patients. The measured data is collected from a wearable device used for quarantined healthy and unhealthy patients. The wearable device provides data of temperature, heart rate, SPO2, blood saturation, and blood pressure timely for alerting the medical authorities and providing a better diagnosis and treatment. The dataset contains 1085 patients with eight features representing 490 COVID-19 infected and 595 standard cases. The work considers different parameters, namely heart rate, temperature, SpO2, bpm parameters, and health status. Furthermore, the real-time data collected can predict the health status of patients as infected and non-infected from measured parameters. The collected dataset uses a random forest classifier with linear and polynomial regression to train and validate COVID-19 patient data. The google colab is an Integral development environment inbuilt with python and Jupyter notebook with scikit-learn version 0.22.1 virtually tested on cloud coding tools. The dataset is trained and tested in 80% and 20% ratio for accuracy evaluation and avoid overfitting in the model. This analysis could help medical authorities and governmental agencies of every country respond timely and reduce the contamination of the disease.•The measured data provide a comprehensive mapping of disease symptoms to predict the health status. They can restrict the virus transmission and take necessary steps to control, mitigate and manage the disease.•Benefits in scientific research with Artificial Intelligence (AI) to tackle the hurdles in analyzing disease diagnosis.•The diagnosis results of disease symptoms can identify the severity of the patient to monitor and manage the difficulties for the outbreak caused.}, } @article {pmid35035864, year = {2022}, author = {He, P and Zhang, B and Shen, S}, title = {Effects of Out-of-Hospital Continuous Nursing on Postoperative Breast Cancer Patients by Medical Big Data.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {9506915}, pmid = {35035864}, issn = {2040-2309}, mesh = {Big Data ; *Breast Neoplasms/surgery ; Female ; Hospitals ; Humans ; Patient Compliance ; *Quality of Life ; }, abstract = {This study aimed to explore the application value of the intelligent medical communication system based on the Apriori algorithm and cloud follow-up platform in out-of-hospital continuous nursing of breast cancer patients. In this study, the Apriori algorithm is optimized by Amazon Web Services (AWS) and graphics processing unit (GPU) to improve its data mining speed. At the same time, a cloud follow-up platform-based intelligent mobile medical communication system is established, which includes the log-in, my workstation, patient records, follow-up center, satisfaction management, propaganda and education center, SMS platform, and appointment management module. The subjects are divided into the control group (routine telephone follow-up, 163) and the intervention group (continuous nursing intervention, 216) according to different nursing methods. The cloud follow-up platform-based intelligent medical communication system is used to analyze patients' compliance, quality of life before and after nursing, function limitation of affected limb, and nursing satisfaction under different nursing methods. The running time of Apriori algorithm is proportional to the data amount and inversely proportional to the number of nodes in the cluster. Compared with the control group, there are statistical differences in the proportion of complete compliance data, the proportion of poor compliance data, and the proportion of total compliance in the intervention group (P < 0.05). After the intervention, the scores of the quality of life in the two groups are statistically different from those before treatment (P < 0.05), and the scores of the quality of life in the intervention group were higher than those in the control group (P < 0.05). The proportion of patients with limited and severely limited functional activity of the affected limb in the intervention group is significantly lower than that in the control group (P < 0.05). The satisfaction rate of postoperative nursing in the intervention group is significantly higher than that in the control group (P < 0.001), and the proportion of basically satisfied and dissatisfied patients in the control group was higher than that in the intervention group (P < 0.05).}, } @article {pmid35035843, year = {2022}, author = {Tang, J}, title = {Discussion on Health Service System of Mobile Medical Institutions Based on Internet of Things and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {5235349}, pmid = {35035843}, issn = {2040-2309}, mesh = {*Cloud Computing ; Delivery of Health Care ; Health Services ; Humans ; Internet ; *Internet of Things ; }, abstract = {Because modern human beings pay more and more attention to physical health, and there are many problems in the traditional medical service system, human beings have a higher and higher voice for the new medical model. At present, there are many researches on the application of modern science and technology to put forward solutions to medical development, but they generally pay attention to some details and ignore the construction of the whole medical service system. In order to solve the problems of low efficiency of the traditional medical model, difficult communication between doctors and patients, unreasonable allocation of medical resources, and so on, this article proposes establishing a perfect medical and health service system. First, the correlation functions are used, such as cosine correlation, to calculate the correlation of various medical products, and then the correlation measurement methods of cloud computing and the Internet of Things are used to realize the network connection of smart medical equipment, efficiently store, calculate and analyze health data, and realize online outpatient services, health file management, data analysis, and other functions. Then, the energy consumption formula of the wireless transceiver was used to reduce the resource loss in the operation of the system. Then, we use the questionnaire to understand the current situation of mobile medical and put forward improvement suggestions. This article also scores the performance of the system. The experimental results show that the performance rating of traditional medical institutions is B, while the model rating of mobile medical institutions is a, and the efficiency is optimized by 4.42%.}, } @article {pmid35035817, year = {2022}, author = {Li, W and Zhang, Y and Wang, J and Li, Q and Zhao, D and Tang, B and Wang, S and Shao, H}, title = {MicroRNA-489 Promotes the Apoptosis of Cardiac Muscle Cells in Myocardial Ischemia-Reperfusion Based on Smart Healthcare.}, journal = {Journal of healthcare engineering}, volume = {2022}, number = {}, pages = {2538769}, pmid = {35035817}, issn = {2040-2309}, mesh = {Animals ; Apoptosis/physiology ; Humans ; *MicroRNAs/genetics/metabolism ; *Myocardial Infarction ; *Myocardial Ischemia ; *Myocardial Reperfusion Injury/genetics/metabolism ; Myocytes, Cardiac/metabolism ; Phosphatidylinositol 3-Kinases/metabolism ; Proto-Oncogene Proteins c-akt/metabolism ; Reperfusion ; Signal Transduction ; }, abstract = {With the development of information technology, the concept of smart healthcare has gradually come to the fore. Smart healthcare uses a new generation of information technologies, such as the Internet of Things (loT), big data, cloud computing, and artificial intelligence, to transform the traditional medical system in an all-around way, making healthcare more efficient, more convenient, and more personalized. miRNAs can regulate the proliferation, differentiation, and apoptosis of human cells. Relevant studies have also shown that miRNAs may play a key role in the occurrence and development of myocardial ischemia-reperfusion injury (MIRI). This study aims to explore the effects of miR-489 in MIRI. In this study, miR-489 expression in a myocardial ischemia-reperfusion animal model and H9C2 cells induced by H/R was detected by qRT-PCR. The release of lactate dehydrogenase (LDH) and the activity of creatine kinase (CK) was detected after miR-489 knockdown in H9C2 cells induced by H/R. The apoptosis of H9C2 cells and animal models were determined by ELISA. The relationship between miR-489 and SPIN1 was verified by a double fluorescence reporter enzyme assay. The expression of the PI3K/AKT pathway-related proteins was detected by Western blot. Experimental results showed that miR-489 was highly expressed in cardiac muscle cells of the animal model and in H9C2 cells induced by H/R of the myocardial infarction group, which was positively associated with the apoptosis of cardiac muscle cells with ischemia-reperfusion. miR-489 knockdown can reduce the apoptosis of cardiac muscle cells caused by ischemia-reperfusion. In downstream targeting studies, it was found that miR-489 promotes the apoptosis of cardiac muscle cells after ischemia-reperfusion by targeting the inhibition of the SPIN1-mediated PI3K/AKT pathway. In conclusion, high expression of miR-489 is associated with increased apoptosis of cardiac muscle cells after ischemia-reperfusion, which can promote the apoptosis after ischemia-reperfusion by targeting the inhibition of the SPIN1-mediated PI3K/AKT pathway. Therefore, miR-489 can be one of the potential therapeutic targets for reducing the apoptosis of cardiac muscle cells after ischemia-reperfusion.}, } @article {pmid35033986, year = {2022}, author = {Jadhao, S and Davison, CL and Roulis, EV and Schoeman, EM and Divate, M and Haring, M and Williams, C and Shankar, AJ and Lee, S and Pecheniuk, NM and Irving, DO and Hyland, CA and Flower, RL and Nagaraj, SH}, title = {RBCeq: A robust and scalable algorithm for accurate genetic blood typing.}, journal = {EBioMedicine}, volume = {76}, number = {}, pages = {103759}, pmid = {35033986}, issn = {2352-3964}, mesh = {Algorithms ; Australia ; *Blood Group Antigens/genetics ; *Blood Grouping and Crossmatching ; Genotype ; Humans ; Reproducibility of Results ; }, abstract = {BACKGROUND: While blood transfusion is an essential cornerstone of hematological care, patients requiring repetitive transfusion remain at persistent risk of alloimmunization due to the diversity of human blood group polymorphisms. Despite the promise, user friendly methods to accurately identify blood types from next-generation sequencing data are currently lacking. To address this unmet need, we have developed RBCeq, a novel genetic blood typing algorithm to accurately identify 36 blood group systems.

METHODS: RBCeq can predict complex blood groups such as RH, and ABO that require identification of small indels and copy number variants. RBCeq also reports clinically significant, rare, and novel variants with potential clinical relevance that may lead to the identification of novel blood group alleles.

FINDINGS: The RBCeq algorithm demonstrated 99·07% concordance when validated on 402 samples which included 29 antigens with serology and 9 antigens with SNP-array validation in 14 blood group systems and 59 antigens validation on manual predicted phenotype from variant call files. We have also developed a user-friendly web server that generates detailed blood typing reports with advanced visualization (https://www.rbceq.org/).

INTERPRETATION: RBCeq will assist blood banks and immunohematology laboratories by overcoming existing methodological limitations like scalability, reproducibility, and accuracy when genotyping and phenotyping in multi-ethnic populations. This Amazon Web Services (AWS) cloud based platform has the potential to reduce pre-transfusion testing time and to increase sample processing throughput, ultimately improving quality of patient care.

FUNDING: This work was supported in part by Advance Queensland Research Fellowship, MRFF Genomics Health Futures Mission (76,757), and the Australian Red Cross LifeBlood. The Australian governments fund the Australian Red Cross Lifeblood for the provision of blood, blood products and services to the Australian community.}, } @article {pmid35031650, year = {2022}, author = {Li, J and Wang, J and Yang, L and Ye, H}, title = {Spatiotemporal change analysis of long time series inland water in Sri Lanka based on remote sensing cloud computing.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {766}, pmid = {35031650}, issn = {2045-2322}, support = {No. 2021KRM079//Innovation Capability Support Program of ShaanxiNo. 2021KRM079/ ; No. 2021WHZ0090//Technology Innovation Center for Land Engineering and Human Settlements, Shaanxi Land Engineering Construction Group Co.,Ltd and Xi' an Jiaotong University/ ; No. XDA2003030201//Strategic Priority Research Program of Chinese Academy of Sciences/ ; No. 41771388//National Natural Science Foundation of China/ ; }, abstract = {Sri Lanka is an important hub connecting Asia-Africa-Europe maritime routes. It receives abundant but uneven spatiotemporal distribution of rainfall and has evident seasonal water shortages. Monitoring water area changes in inland lakes and reservoirs plays an important role in guiding the development and utilisation of water resources. In this study, a rapid surface water extraction model based on the Google Earth Engine remote sensing cloud computing platform was constructed. By evaluating the optimal spectral water index method, the spatiotemporal variations of reservoirs and inland lakes in Sri Lanka were analysed. The results showed that Automated Water Extraction Index (AWEIsh) could accurately identify the water boundary with an overall accuracy of 99.14%, which was suitable for surface water extraction in Sri Lanka. The area of the Maduru Oya Reservoir showed an overall increasing trend based on small fluctuations from 1988 to 2018, and the monthly area of the reservoir fluctuated significantly in 2017. Thus, water resource management in the dry zone should focus more on seasonal regulation and control. From 1995 to 2015, the number and area of lakes and reservoirs in Sri Lanka increased to different degrees, mainly concentrated in arid provinces including Northern, North Central, and Western Provinces. Overall, the amount of surface water resources have increased.}, } @article {pmid35028522, year = {2021}, author = {Li, Q and Jiang, L and Qiao, K and Hu, Y and Chen, B and Zhang, X and Ding, Y and Yang, Z and Li, C}, title = {INCloud: integrated neuroimaging cloud for data collection, management, analysis and clinical translations.}, journal = {General psychiatry}, volume = {34}, number = {6}, pages = {e100651}, pmid = {35028522}, issn = {2517-729X}, abstract = {BACKGROUND: Neuroimaging techniques provide rich and accurate measures of brain structure and function, and have become one of the most popular methods in mental health and neuroscience research. Rapidly growing neuroimaging research generates massive amounts of data, bringing new challenges in data collection, large-scale data management, efficient computing requirements and data mining and analyses.

AIMS: To tackle the challenges and promote the application of neuroimaging technology in clinical practice, we developed an integrated neuroimaging cloud (INCloud). INCloud provides a full-stack solution for the entire process of large-scale neuroimaging data collection, management, analysis and clinical applications.

METHODS: INCloud consists of data acquisition systems, a data warehouse, automatic multimodal image quality check and processing systems, a brain feature library, a high-performance computing cluster and computer-aided diagnosis systems (CADS) for mental disorders. A unique design of INCloud is the brain feature library that converts the unit of data management from image to image features such as hippocampal volume. Connecting the CADS to the scientific database, INCloud allows the accumulation of scientific data to continuously improve the accuracy of objective diagnosis of mental disorders.

RESULTS: Users can manage and analyze neuroimaging data on INCloud, without the need to download them to the local device. INCloud users can query, manage, analyze and share image features based on customized criteria. Several examples of 'mega-analyses' based on the brain feature library are shown.

CONCLUSIONS: Compared with traditional neuroimaging acquisition and analysis workflow, INCloud features safe and convenient data management and sharing, reduced technical requirements for researchers, high-efficiency computing and data mining, and straightforward translations to clinical service. The design and implementation of the system are also applicable to imaging research platforms in other fields.}, } @article {pmid35027906, year = {2021}, author = {Han, H and Gu, X}, title = {Linkage Between Inclusive Digital Finance and High-Tech Enterprise Innovation Performance: Role of Debt and Equity Financing.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {814408}, pmid = {35027906}, issn = {1664-1078}, abstract = {This study investigates the relationship between digital financial inclusion, external financing, and the innovation performance of high-tech enterprises in China. The choice of corporate financing methods is an important part of organizational behavioral psychology, and different financing models will have a certain effect on organizational performance, especially in the digital economy environment. Therefore, based on resource dependence theory and financing constraint theory, the present study utilizes the panel data collected from the China Stock Market & Accounting Research (CSMAR) database from 2011 to 2020 of 112 companies in the Yangtze River Delta region and the "The Peking University Digital Financial Inclusion Index of China (PKU-DFIIC)" released by the Peking University Digital Finance Research Center and Ant Financial Group. The results show that the Digital Financial Inclusion Index (DFIIC) has a significant positive correlation with the innovation performance of high-tech enterprises. The higher the level of debt financing, the stronger the role of digital financial inclusion in promoting innovation performance. Investigating the DFIIC in terms of coverage breadth and usage depth, we find that usage depth does not significantly encourage innovation performance. The effect of the interaction between coverage breadth and external financing is consistent with the results for the DFIIC. The study suggests that equity financing promotes the usage depth of the DFIIC in state-owned enterprises. In contrast, debt financing promotes the coverage breadth of non-state-owned enterprises. Finally, we propose relevant policy recommendations based on the research results. It includes in-depth popularization of inclusive finance in the daily operations of enterprises at the technical level, refinement of external financing policy incentives for enterprises based on the characteristics of ownership, and strengthening the research of technologies such as big data, artificial intelligence (AI), and cloud computing. The paper presents a range of theoretical and practical implications for practitioners and academics relevant to high-tech enterprises.}, } @article {pmid35022699, year = {2022}, author = {Decap, D and de Schaetzen van Brienen, L and Larmuseau, M and Costanza, P and Herzeel, C and Wuyts, R and Marchal, K and Fostier, J}, title = {Halvade somatic: Somatic variant calling with Apache Spark.}, journal = {GigaScience}, volume = {11}, number = {1}, pages = {}, pmid = {35022699}, issn = {2047-217X}, mesh = {*High-Throughput Nucleotide Sequencing/methods ; Polymorphism, Single Nucleotide ; Sequence Analysis, DNA/methods ; *Software ; Exome Sequencing ; Whole Genome Sequencing ; }, abstract = {BACKGROUND: The accurate detection of somatic variants from sequencing data is of key importance for cancer treatment and research. Somatic variant calling requires a high sequencing depth of the tumor sample, especially when the detection of low-frequency variants is also desired. In turn, this leads to large volumes of raw sequencing data to process and hence, large computational requirements. For example, calling the somatic variants according to the GATK best practices guidelines requires days of computing time for a typical whole-genome sequencing sample.

FINDINGS: We introduce Halvade Somatic, a framework for somatic variant calling from DNA sequencing data that takes advantage of multi-node and/or multi-core compute platforms to reduce runtime. It relies on Apache Spark to provide scalable I/O and to create and manage data streams that are processed on different CPU cores in parallel. Halvade Somatic contains all required steps to process the tumor and matched normal sample according to the GATK best practices recommendations: read alignment (BWA), sorting of reads, preprocessing steps such as marking duplicate reads and base quality score recalibration (GATK), and, finally, calling the somatic variants (Mutect2). Our approach reduces the runtime on a single 36-core node to 19.5 h compared to a runtime of 84.5 h for the original pipeline, a speedup of 4.3 times. Runtime can be further decreased by scaling to multiple nodes, e.g., we observe a runtime of 1.36 h using 16 nodes, an additional speedup of 14.4 times. Halvade Somatic supports variant calling from both whole-genome sequencing and whole-exome sequencing data and also supports Strelka2 as an alternative or complementary variant calling tool. We provide a Docker image to facilitate single-node deployment. Halvade Somatic can be executed on a variety of compute platforms, including Amazon EC2 and Google Cloud.

CONCLUSIONS: To our knowledge, Halvade Somatic is the first somatic variant calling pipeline that leverages Big Data processing platforms and provides reliable, scalable performance. Source code is freely available.}, } @article {pmid35022620, year = {2022}, author = {Feldman, D and Funk, L and Le, A and Carlson, RJ and Leiken, MD and Tsai, F and Soong, B and Singh, A and Blainey, PC}, title = {Pooled genetic perturbation screens with image-based phenotypes.}, journal = {Nature protocols}, volume = {17}, number = {2}, pages = {476-512}, pmid = {35022620}, issn = {1750-2799}, support = {P50 HG006193/HG/NHGRI NIH HHS/United States ; R01 HG009283/HG/NHGRI NIH HHS/United States ; RM1 HG006193/HG/NHGRI NIH HHS/United States ; }, mesh = {*RNA, Guide, CRISPR-Cas Systems ; }, abstract = {Discovery of the genetic components underpinning fundamental and disease-related processes is being rapidly accelerated by combining efficient, programmable genetic engineering with phenotypic readouts of high spatial, temporal and/or molecular resolution. Microscopy is a fundamental tool for studying cell biology, but its lack of high-throughput sequence readouts hinders integration in large-scale genetic screens. Optical pooled screens using in situ sequencing provide massively scalable integration of barcoded lentiviral libraries (e.g., CRISPR perturbation libraries) with high-content imaging assays, including dynamic processes in live cells. The protocol uses standard lentiviral vectors and molecular biology, providing single-cell resolution of phenotype and engineered genotype, scalability to millions of cells and accurate sequence reads sufficient to distinguish >10[6] perturbations. In situ amplification takes ~2 d, while sequencing can be performed in ~1.5 h per cycle. The image analysis pipeline provided enables fully parallel automated sequencing analysis using a cloud or cluster computing environment.}, } @article {pmid35016766, year = {2022}, author = {Maniyar, CB and Kumar, A and Mishra, DR}, title = {Continuous and Synoptic Assessment of Indian Inland Waters for Harmful Algae Blooms.}, journal = {Harmful algae}, volume = {111}, number = {}, pages = {102160}, doi = {10.1016/j.hal.2021.102160}, pmid = {35016766}, issn = {1878-1470}, mesh = {*Cyanobacteria ; *Harmful Algal Bloom ; India ; Lakes/microbiology ; Water Quality ; }, abstract = {Cyanobacterial Harmful Algal Blooms (CyanoHABs) are progressively becoming a major water quality, socioeconomic, and health hazard worldwide. In India, there are frequent episodes of severe CyanoHABs, which are left untreated due to a lack of awareness and monitoring infrastructure, affecting the economy of the country gravely. In this study, for the first time, we present a country-wide analysis of CyanoHABs in India by developing a novel interactive cloud-based dashboard called "CyanoKhoj" in Google Earth Engine (GEE) which uses Sentinel-3 Ocean and Land Colour Instrument (OLCI) remotely sensed datasets. The main goal of this study was to showcase the utility of CyanoKhoj for rapid monitoring and discuss the widespread CyanoHABs problems across India. We demonstrate the utility of Cyanokhoj by including select case studies of lakes and reservoirs geographically spread across five states: Bargi and Gandhisagar Dams in Madhya Pradesh, Hirakud Reservoir in Odisha, Ukai Dam in Gujarat, Linganamakki Reservoir in Karnataka, and Pulicat Lake in Tamil Nadu. These sites were studied from September to November 2018 using CyanoKhoj, which is capable of near-real-time monitoring and country-wide assessment of CyanoHABs. We used CyanoKhoj to prepare spatiotemporal maps of Chlorophyll-a (Chl-a) content and Cyanobacterial Cell Density (CCD) to study the local spread of the CyanoHABs and their phenology in these waterbodies. A first-ever all-India CCD map is also presented for the year 2018, which highlights the spatial spread of CyanoHABs throughout the country (32 large waterbodies across India with severe bloom: CCD>2,500,000). Results indicate that CyanoHABs are most prevalent in nutrient-rich waterbodies prone to industrial and other nutrient-rich discharges. A clear temporal evolution of the blooms showed that they are dominant during the post-monsoon season (September-October) when the nutrient concentrations in the waterbodies are at their peak, and they begin to decline towards winter (November-December). CyanoKhoj is an open-source tool that can have a significant broader impact in mapping CyanoHABs not only throughout cyanobacteria data-scarce India, but on a global level using archived and future Sentinel-3A/B OLCI data.}, } @article {pmid35013645, year = {2022}, author = {Saxena, D and Singh, AK}, title = {OFP-TM: an online VM failure prediction and tolerance model towards high availability of cloud computing environments.}, journal = {The Journal of supercomputing}, volume = {78}, number = {6}, pages = {8003-8024}, pmid = {35013645}, issn = {0920-8542}, abstract = {The indispensable collaboration of cloud computing in every digital service has raised its resource usage exponentially. The ever-growing demand of cloud resources evades service availability leading to critical challenges such as cloud outages, SLA violation, and excessive power consumption. Previous approaches have addressed this problem by utilizing multiple cloud platforms or running multiple replicas of a Virtual Machine (VM) resulting into high operational cost. This paper has addressed this alarming problem from a different perspective by proposing a novel O nline virtual machine F ailure P rediction and T olerance M odel (OFP-TM) with high availability awareness embedded in physical machines as well as virtual machines. The failure-prone VMs are estimated in real-time based on their future resource usage by developing an ensemble approach-based resource predictor. These VMs are assigned to a failure tolerance unit comprising of a resource provision matrix and Selection Box (S-Box) mechanism which triggers the migration of failure-prone VMs and handle any outage beforehand while maintaining the desired level of availability for cloud users. The proposed model is evaluated and compared against existing related approaches by simulating cloud environment and executing several experiments using a real-world workload Google Cluster dataset. Consequently, it has been concluded that OFP-TM improves availability and scales down the number of live VM migrations up to 33.5% and 83.3%, respectively, over without OFP-TM.}, } @article {pmid35009941, year = {2022}, author = {Syed, SA and Rashid, M and Hussain, S and Azim, F and Zahid, H and Umer, A and Waheed, A and Zareei, M and Vargas-Rosales, C}, title = {QoS Aware and Fault Tolerance Based Software-Defined Vehicular Networks Using Cloud-Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009941}, issn = {1424-8220}, support = {MX009876//This project is supported by Tecnologico de Monterrey, School of Engineering and Sciences, Zapopan 45201, Mexico/ ; }, abstract = {Software-defined network (SDN) and vehicular ad-hoc network (VANET) combined provided a software-defined vehicular network (SDVN). To increase the quality of service (QoS) of vehicle communication and to make the overall process efficient, researchers are working on VANET communication systems. Current research work has made many strides, but due to the following limitations, it needs further investigation and research: Cloud computing is used for messages/tasks execution instead of fog computing, which increases response time. Furthermore, a fault tolerance mechanism is used to reduce the tasks/messages failure ratio. We proposed QoS aware and fault tolerance-based software-defined V vehicular networks using Cloud-fog computing (QAFT-SDVN) to address the above issues. We provided heuristic algorithms to solve the above limitations. The proposed model gets vehicle messages through SDN nodes which are placed on fog nodes. SDN controllers receive messages from nearby SDN units and prioritize the messages in two different ways. One is the message nature way, while the other one is deadline and size way of messages prioritization. SDN controller categorized in safety and non-safety messages and forward to the destination. After sending messages to their destination, we check their acknowledgment; if the destination receives the messages, then no action is taken; otherwise, we use a fault tolerance mechanism. We send the messages again. The proposed model is implemented in CloudSIm and iFogSim, and compared with the latest models. The results show that our proposed model decreased response time by 50% of the safety and non-safety messages by using fog nodes for the SDN controller. Furthermore, we reduced the execution time of the safety and non-safety messages by up to 4%. Similarly, compared with the latest model, we reduced the task failure ratio by 20%, 15%, 23.3%, and 22.5%.}, } @article {pmid35009820, year = {2021}, author = {Loke, CH and Adam, MS and Nordin, R and Abdullah, NF and Abu-Samah, A}, title = {Physical Distancing Device with Edge Computing for COVID-19 (PADDIE-C19).}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009820}, issn = {1424-8220}, support = {FA2386-20-1-4045//United States Air Force Office of Scientific Research/ ; }, mesh = {Artificial Intelligence ; *COVID-19 ; Humans ; Masks ; *Physical Distancing ; SARS-CoV-2 ; }, abstract = {The most effective methods of preventing COVID-19 infection include maintaining physical distancing and wearing a face mask while in close contact with people in public places. However, densely populated areas have a greater incidence of COVID-19 dissemination, which is caused by people who do not comply with standard operating procedures (SOPs). This paper presents a prototype called PADDIE-C19 (Physical Distancing Device with Edge Computing for COVID-19) to implement the physical distancing monitoring based on a low-cost edge computing device. The PADDIE-C19 provides real-time results and responses, as well as notifications and warnings to anyone who violates the 1-m physical distance rule. In addition, PADDIE-C19 includes temperature screening using an MLX90614 thermometer and ultrasonic sensors to restrict the number of people on specified premises. The Neural Network Processor (KPU) in Grove Artificial Intelligence Hardware Attached on Top (AI HAT), an edge computing unit, is used to accelerate the neural network model on person detection and achieve up to 18 frames per second (FPS). The results show that the accuracy of person detection with Grove AI HAT could achieve 74.65% and the average absolute error between measured and actual physical distance is 8.95 cm. Furthermore, the accuracy of the MLX90614 thermometer is guaranteed to have less than 0.5 °C value difference from the more common Fluke 59 thermometer. Experimental results also proved that when cloud computing is compared to edge computing, the Grove AI HAT achieves the average performance of 18 FPS for a person detector (kmodel) with an average 56 ms execution time in different networks, regardless of the network connection type or speed.}, } @article {pmid35009814, year = {2021}, author = {Ojo, MO and Viola, I and Baratta, M and Giordano, S}, title = {Practical Experiences of a Smart Livestock Location Monitoring System Leveraging GNSS, LoRaWAN and Cloud Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009814}, issn = {1424-8220}, mesh = {Animals ; Cloud Computing ; Farms ; *Internet of Things ; *Livestock ; Monitoring, Physiologic ; }, abstract = {Livestock farming is, in most cases in Europe, unsupervised, thus making it difficult to ensure adequate control of the position of the animals for the improvement of animal welfare. In addition, the geographical areas involved in livestock grazing usually have difficult access with harsh orography and lack of communications infrastructure, thus the need to provide a low-power livestock localization and monitoring system is of paramount importance, which is crucial not for a sustainable agriculture, but also for the protection of native breeds and meats thanks to their controlled supervision. In this context, this work presents an Internet of things (IoT)-based system integrating low-power wide area (LPWA) technology, cloud, and virtualization services to provide real-time livestock location monitoring. Taking into account the constraints coming from the environment in terms of energy supply and network connectivity, our proposed system is based on a wearable device equipped with inertial sensors, Global Positioning System (GPS) receiver, and LoRaWAN transceiver, which can provide a satisfactory compromise between performance, cost, and energy consumption. At first, this article provides the state-of-the-art localization techniques and technologies applied to smart livestock. Then, we proceed to provide the hardware and firmware co-design to achieve very low energy consumption, thus providing a significant positive impact to the battery life. The proposed platform has been evaluated in a pilot test in the northern part of Italy, evaluating different configurations in terms of sampling period, experimental duration, and number of devices. The results are analyzed and discussed for packet delivery ratio, energy consumption, localization accuracy, battery discharge measurement, and delay.}, } @article {pmid35009770, year = {2021}, author = {Forcén-Muñoz, M and Pavón-Pulido, N and López-Riquelme, JA and Temnani-Rajjaf, A and Berríos, P and Morais, R and Pérez-Pastor, A}, title = {Irriman Platform: Enhancing Farming Sustainability through Cloud Computing Techniques for Irrigation Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009770}, issn = {1424-8220}, support = {PCIN-2017-091//Ministry of Economy, Industry and Competitiveness of Spain, National Research Agency (AEI)/ ; PID2019-106226RB-C22//National Research Agency (AEI) Spain/ ; }, mesh = {Agricultural Irrigation ; *Agriculture ; Climate Change ; *Cloud Computing ; Crops, Agricultural ; Farms ; }, abstract = {Crop sustainability is essential for balancing economic development and environmental care, mainly in strong and very competitive regions in the agri-food sector, such as the Region of Murcia in Spain, considered to be the orchard of Europe, despite being a semi-arid area with an important scarcity of fresh water. In this region, farmers apply efficient techniques to minimize supplies and maximize quality and productivity; however, the effects of climate change and the degradation of significant natural environments, such as, the "Mar Menor", the most extent saltwater lagoon of Europe, threatened by resources overexploitation, lead to the search of even better irrigation management techniques to avoid certain effects which could damage the quaternary aquifer connected to such lagoon. This paper describes the Irriman Platform, a system based on Cloud Computing techniques, which includes low-cost wireless data loggers, capable of acquiring data from a wide range of agronomic sensors, and a novel software architecture for safely storing and processing such information, making crop monitoring and irrigation management easier. The proposed platform helps agronomists to optimize irrigation procedures through a usable web-based tool which allows them to elaborate irrigation plans and to evaluate their effectiveness over crops. The system has been deployed in a large number of representative crops, located along near 50,000 ha of the surface, during several phenological cycles. Results demonstrate that the system enables crop monitoring and irrigation optimization, and makes interaction between farmers and agronomists easier.}, } @article {pmid35009740, year = {2021}, author = {Angel, NA and Ravindran, D and Vincent, PMDR and Srinivasan, K and Hu, YC}, title = {Recent Advances in Evolving Computing Paradigms: Cloud, Edge, and Fog Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009740}, issn = {1424-8220}, support = {MOST 110-2622-E-197-009//Ministry of Science and Technology/ ; }, abstract = {Cloud computing has become integral lately due to the ever-expanding Internet-of-things (IoT) network. It still is and continues to be the best practice for implementing complex computational applications, emphasizing the massive processing of data. However, the cloud falls short due to the critical constraints of novel IoT applications generating vast data, which entails a swift response time with improved privacy. The newest drift is moving computational and storage resources to the edge of the network, involving a decentralized distributed architecture. The data processing and analytics perform at proximity to end-users, and overcome the bottleneck of cloud computing. The trend of deploying machine learning (ML) at the network edge to enhance computing applications and services has gained momentum lately, specifically to reduce latency and energy consumed while optimizing the security and management of resources. There is a need for rigorous research efforts oriented towards developing and implementing machine learning algorithms that deliver the best results in terms of speed, accuracy, storage, and security, with low power consumption. This extensive survey presented on the prominent computing paradigms in practice highlights the latest innovations resulting from the fusion between ML and the evolving computing paradigms and discusses the underlying open research challenges and future prospects.}, } @article {pmid35009652, year = {2021}, author = {Quezada-Gaibor, D and Torres-Sospedra, J and Nurmi, J and Koucheryavy, Y and Huerta, J}, title = {Cloud Platforms for Context-Adaptive Positioning and Localisation in GNSS-Denied Scenarios-A Systematic Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009652}, issn = {1424-8220}, support = {813278//H2020 Marie Skłodowska-Curie Actions/ ; 101023072//H2020 Marie Skłodowska-Curie Actions/ ; PTQ2018-009981//Ministerio de Ciencia, Innovación y Universidades/ ; }, abstract = {Cloud Computing and Cloud Platforms have become an essential resource for businesses, due to their advanced capabilities, performance, and functionalities. Data redundancy, scalability, and security, are among the key features offered by cloud platforms. Location-Based Services (LBS) often exploit cloud platforms to host positioning and localisation systems. This paper introduces a systematic review of current positioning platforms for GNSS-denied scenarios. We have undertaken a comprehensive analysis of each component of the positioning and localisation systems, including techniques, protocols, standards, and cloud services used in the state-of-the-art deployments. Furthermore, this paper identifies the limitations of existing solutions, outlining shortcomings in areas that are rarely subjected to scrutiny in existing reviews of indoor positioning, such as computing paradigms, privacy, and fault tolerance. We then examine contributions in the areas of efficient computation, interoperability, positioning, and localisation. Finally, we provide a brief discussion concerning the challenges for cloud platforms based on GNSS-denied scenarios.}, } @article {pmid35009649, year = {2021}, author = {Ali, A and Iqbal, MM and Jamil, H and Akbar, H and Muthanna, A and Ammi, M and Althobaiti, MM}, title = {Multilevel Central Trust Management Approach for Task Scheduling on IoT-Based Mobile Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009649}, issn = {1424-8220}, mesh = {Algorithms ; *Cloud Computing ; Computers, Handheld ; *Trust ; }, abstract = {With the increasing number of mobile devices and IoT devices across a wide range of real-life applications, our mobile cloud computing devices will not cope with this growing number of audiences soon, which implies and demands the need to shift to fog computing. Task scheduling is one of the most demanding scopes after the trust computation inside the trustable nodes. The mobile devices and IoT devices transfer the resource-intensive tasks towards mobile cloud computing. Some tasks are resource-intensive and not trustable to allocate to the mobile cloud computing resources. This consequently gives rise to trust evaluation and data sync-up of devices joining and leaving the network. The resources are more intensive for cloud computing and mobile cloud computing. Time, energy, and resources are wasted due to the nontrustable nodes. This research article proposes a multilevel trust enhancement approach for efficient task scheduling in mobile cloud environments. We first calculate the trustable tasks needed to offload towards the mobile cloud computing. Then, an efficient and dynamic scheduler is added to enhance the task scheduling after trust computation using social and environmental trust computation techniques. To improve the time and energy efficiency of IoT and mobile devices using the proposed technique, the energy computation and time request computation are compared with the existing methods from literature, which identified improvements in the results. Our proposed approach is centralized to tackle constant SyncUPs of incoming devices' trust values with mobile cloud computing. With the benefits of mobile cloud computing, the centralized data distribution method is a positive approach.}, } @article {pmid35009609, year = {2021}, author = {Rocha-Jácome, C and Carvajal, RG and Chavero, FM and Guevara-Cabezas, E and Hidalgo Fort, E}, title = {Industry 4.0: A Proposal of Paradigm Organization Schemes from a Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {22}, number = {1}, pages = {}, pmid = {35009609}, issn = {1424-8220}, support = {PID2019-107258RB-C31//AEI/FEDER/ ; 802C2000003//Agencia IDEA/ ; }, mesh = {Bibliometrics ; Humans ; *Industry ; *Technology ; }, abstract = {Currently, the concept of Industry 4.0 is well known; however, it is extremely complex, as it is constantly evolving and innovating. It includes the participation of many disciplines and areas of knowledge as well as the integration of many technologies, both mature and emerging, but working in collaboration and relying on their study and implementation under the novel criteria of Cyber-Physical Systems. This study starts with an exhaustive search for updated scientific information of which a bibliometric analysis is carried out with results presented in different tables and graphs. Subsequently, based on the qualitative analysis of the references, we present two proposals for the schematic analysis of Industry 4.0 that will help academia and companies to support digital transformation studies. The results will allow us to perform a simple alternative analysis of Industry 4.0 to understand the functions and scope of the integrating technologies to achieve a better collaboration of each area of knowledge and each professional, considering the potential and limitations of each one, supporting the planning of an appropriate strategy, especially in the management of human resources, for the successful execution of the digital transformation of the industry.}, } @article {pmid35005616, year = {2021}, author = {Goudarzi, A and Moya-Galé, G}, title = {Automatic Speech Recognition in Noise for Parkinson's Disease: A Pilot Study.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {809321}, pmid = {35005616}, issn = {2624-8212}, abstract = {The sophistication of artificial intelligence (AI) technologies has significantly advanced in the past decade. However, the observed unpredictability and variability of AI behavior in noisy signals is still underexplored and represents a challenge when trying to generalize AI behavior to real-life environments, especially for people with a speech disorder, who already experience reduced speech intelligibility. In the context of developing assistive technology for people with Parkinson's disease using automatic speech recognition (ASR), this pilot study reports on the performance of Google Cloud speech-to-text technology with dysarthric and healthy speech in the presence of multi-talker babble noise at different intensity levels. Despite sensitivities and shortcomings, it is possible to control the performance of these systems with current tools in order to measure speech intelligibility in real-life conditions.}, } @article {pmid35003323, year = {2021}, author = {Almusallam, N and Alabdulatif, A and Alarfaj, F}, title = {Analysis of Privacy-Preserving Edge Computing and Internet of Things Models in Healthcare Domain.}, journal = {Computational and mathematical methods in medicine}, volume = {2021}, number = {}, pages = {6834800}, pmid = {35003323}, issn = {1748-6718}, mesh = {Cloud Computing ; Computational Biology ; *Computer Security ; *Delivery of Health Care ; Electronic Health Records ; Humans ; *Internet of Things ; *Privacy ; }, abstract = {The healthcare sector is rapidly being transformed to one that operates in new computing environments. With researchers increasingly committed to finding and expanding healthcare solutions to include the Internet of Things (IoT) and edge computing, there is a need to monitor more closely than ever the data being collected, shared, processed, and stored. The advent of cloud, IoT, and edge computing paradigms poses huge risks towards the privacy of data, especially, in the healthcare environment. However, there is a lack of comprehensive research focused on seeking efficient and effective solutions that ensure data privacy in the healthcare domain. The data being collected and processed by healthcare applications is sensitive, and its manipulation by malicious actors can have catastrophic repercussions. This paper discusses the current landscape of privacy-preservation solutions in IoT and edge healthcare applications. It describes the common techniques adopted by researchers to integrate privacy in their healthcare solutions. Furthermore, the paper discusses the limitations of these solutions in terms of their technical complexity, effectiveness, and sustainability. The paper closes with a summary and discussion of the challenges of safeguarding privacy in IoT and edge healthcare solutions which need to be resolved for future applications.}, } @article {pmid35002704, year = {2021}, author = {Wang, S and Hou, Y and Li, X and Meng, X and Zhang, Y and Wang, X}, title = {Practical Implementation of Artificial Intelligence-Based Deep Learning and Cloud Computing on the Application of Traditional Medicine and Western Medicine in the Diagnosis and Treatment of Rheumatoid Arthritis.}, journal = {Frontiers in pharmacology}, volume = {12}, number = {}, pages = {765435}, pmid = {35002704}, issn = {1663-9812}, abstract = {Rheumatoid arthritis (RA), an autoimmune disease of unknown etiology, is a serious threat to the health of middle-aged and elderly people. Although western medicine, traditional medicine such as traditional Chinese medicine, Tibetan medicine and other ethnic medicine have shown certain advantages in the diagnosis and treatment of RA, there are still some practical shortcomings, such as delayed diagnosis, improper treatment scheme and unclear drug mechanism. At present, the applications of artificial intelligence (AI)-based deep learning and cloud computing has aroused wide attention in the medical and health field, especially in screening potential active ingredients, targets and action pathways of single drugs or prescriptions in traditional medicine and optimizing disease diagnosis and treatment models. Integrated information and analysis of RA patients based on AI and medical big data will unquestionably benefit more RA patients worldwide. In this review, we mainly elaborated the application status and prospect of AI-assisted deep learning and cloud computation-oriented western medicine and traditional medicine on the diagnosis and treatment of RA in different stages. It can be predicted that with the help of AI, more pharmacological mechanisms of effective ethnic drugs against RA will be elucidated and more accurate solutions will be provided for the treatment and diagnosis of RA in the future.}, } @article {pmid35002664, year = {2021}, author = {Bai, Y and Liu, Q and Wu, W and Feng, Y}, title = {cuSCNN: A Secure and Batch-Processing Framework for Privacy-Preserving Convolutional Neural Network Prediction on GPU.}, journal = {Frontiers in computational neuroscience}, volume = {15}, number = {}, pages = {799977}, pmid = {35002664}, issn = {1662-5188}, abstract = {The emerging topic of privacy-preserving deep learning as a service has attracted increasing attention in recent years, which focuses on building an efficient and practical neural network prediction framework to secure client and model-holder data privately on the cloud. In such a task, the time cost of performing the secure linear layers is expensive, where matrix multiplication is the atomic operation. Most existing mix-based solutions heavily emphasized employing BGV-based homomorphic encryption schemes to secure the linear layer on the CPU platform. However, they suffer an efficiency and energy loss when dealing with a larger-scale dataset, due to the complicated encoded methods and intractable ciphertext operations. To address it, we propose cuSCNN, a secure and efficient framework to perform the privacy prediction task of a convolutional neural network (CNN), which can flexibly perform on the GPU platform. Its main idea is 2-fold: (1) To avoid the trivia and complicated homomorphic matrix computations brought by BGV-based solutions, it adopts GSW-based homomorphic matrix encryption to efficiently enable the linear layers of CNN, which is a naive method to secure matrix computation operations. (2) To improve the computation efficiency on GPU, a hybrid optimization approach based on CUDA (Compute Unified Device Architecture) has been proposed to improve the parallelism level and memory access speed when performing the matrix multiplication on GPU. Extensive experiments are conducted on industrial datasets and have shown the superior performance of the proposed cuSCNN framework in terms of runtime and power consumption compared to the other frameworks.}, } @article {pmid35002476, year = {2022}, author = {Zhu, L and Wang, C and He, Z and Zhang, Y}, title = {A lightweight automatic sleep staging method for children using single-channel EEG based on edge artificial intelligence.}, journal = {World wide web}, volume = {25}, number = {5}, pages = {1883-1903}, pmid = {35002476}, issn = {1573-1413}, abstract = {With the development of telemedicine and edge computing, edge artificial intelligence (AI) will become a new development trend for smart medicine. On the other hand, nearly one-third of children suffer from sleep disorders. However, all existing sleep staging methods are for adults. Therefore, we adapted edge AI to develop a lightweight automatic sleep staging method for children using single-channel EEG. The trained sleep staging model will be deployed to edge smart devices so that the sleep staging can be implemented on edge devices which will greatly save network resources and improving the performance and privacy of sleep staging application. Then the results and hypnogram will be uploaded to the cloud server for further analysis by the physicians to get sleep disease diagnosis reports and treatment opinions. We utilized 1D convolutional neural networks (1D-CNN) and long short term memory (LSTM) to build our sleep staging model, named CSleepNet. We tested the model on our childrens sleep (CS) dataset and sleep-EDFX dataset. For the CS dataset, we experimented with F4-M1 channel EEG using four different loss functions, and the logcosh performed best with overall accuracy of 83.06% and F1-score of 76.50%. We used Fpz-Cz and Pz-Oz channel EEG to train our model in Sleep-EDFX dataset, and achieved an accuracy of 86.41% without manual feature extraction. The experimental results show that our method has great potential. It not only plays an important role in sleep-related research, but also can be widely used in the classification of other time sequences physiological signals.}, } @article {pmid35002010, year = {2022}, author = {Peng, Y and Liu, E and Peng, S and Chen, Q and Li, D and Lian, D}, title = {Using artificial intelligence technology to fight COVID-19: a review.}, journal = {Artificial intelligence review}, volume = {55}, number = {6}, pages = {4941-4977}, pmid = {35002010}, issn = {0269-2821}, abstract = {In late December 2019, a new type of coronavirus was discovered, which was later named severe acute respiratory syndrome coronavirus 2(SARS-CoV-2). Since its discovery, the virus has spread globally, with 2,975,875 deaths as of 15 April 2021, and has had a huge impact on our health systems and economy. How to suppress the continued spread of new coronary pneumonia is the main task of many scientists and researchers. The introduction of artificial intelligence technology has provided a huge contribution to the suppression of the new coronavirus. This article discusses the main application of artificial intelligence technology in the suppression of coronavirus from three major aspects of identification, prediction, and development through a large amount of literature research, and puts forward the current main challenges and possible development directions. The results show that it is an effective measure to combine artificial intelligence technology with a variety of new technologies to predict and identify COVID-19 patients.}, } @article {pmid34999074, year = {2022}, author = {Elnashar, A and Zeng, H and Wu, B and Gebremicael, TG and Marie, K}, title = {Assessment of environmentally sensitive areas to desertification in the Blue Nile Basin driven by the MEDALUS-GEE framework.}, journal = {The Science of the total environment}, volume = {815}, number = {}, pages = {152925}, doi = {10.1016/j.scitotenv.2022.152925}, pmid = {34999074}, issn = {1879-1026}, mesh = {Climate ; *Conservation of Natural Resources ; *Soil ; }, abstract = {Assessing environmentally sensitive areas (ESA) to desertification and understanding their primary drivers are necessary for applying targeted management practices to combat land degradation at the basin scale. We have developed the MEditerranean Desertification And Land Use framework in the Google Earth Engine cloud platform (MEDALUS-GEE) to map and assess the ESA index at 300 m grids in the Blue Nile Basin (BNB). The ESA index was derived from elaborating 19 key indicators representing soil, climate, vegetation, and management through the geometric mean of their sensitivity scores. The results showed that 43.4%, 28.8%, and 70.4% of the entire BNB, Upper BNB, and Lower BNB, respectively, are highly susceptible to desertification, indicating appropriate land and water management measures should be urgently implemented. Our findings also showed that the main land degradation drivers are moderate to intensive cultivation across the BNB, high slope gradient and water erosion in the Upper BNB, and low soil organic matter and vegetation cover in the Lower BNB. The study presented an integrated monitoring and assessment framework for understanding desertification processes to help achieve land-related sustainable development goals.}, } @article {pmid34997109, year = {2022}, author = {Alrebdi, N and Alabdulatif, A and Iwendi, C and Lian, Z}, title = {SVBE: searchable and verifiable blockchain-based electronic medical records system.}, journal = {Scientific reports}, volume = {12}, number = {1}, pages = {266}, pmid = {34997109}, issn = {2045-2322}, support = {501100007414//Qassim University (QU)/ ; 501100007414//Qassim University (QU)/ ; }, abstract = {Central management of electronic medical systems faces a major challenge because it requires trust in a single entity that cannot effectively protect files from unauthorized access or attacks. This challenge makes it difficult to provide some services in central electronic medical systems, such as file search and verification, although they are needed. This gap motivated us to develop a system based on blockchain that has several characteristics: decentralization, security, anonymity, immutability, and tamper-proof. The proposed system provides several services: storage, verification, and search. The system consists of a smart contract that connects to a decentralized user application through which users can transact with the system. In addition, the system uses an interplanetary file system (IPFS) and cloud computing to store patients' data and files. Experimental results and system security analysis show that the system performs search and verification tasks securely and quickly through the network.}, } @article {pmid34996614, year = {2022}, author = {Li, L and Zhang, Y and Geng, Q}, title = {Mean-square bounded consensus of nonlinear multi-agent systems under deception attack.}, journal = {ISA transactions}, volume = {129}, number = {Pt A}, pages = {91-101}, doi = {10.1016/j.isatra.2021.12.020}, pmid = {34996614}, issn = {1879-2022}, abstract = {This paper researches mean-square bounded consensus for a nonlinear multi-agent system subjected to randomly occurring deception attack, process and measurement noises. Considering the measurement tampered by the attacker, an estimator is presented to obtain relative accurate state estimation, where the gain is acquired by a recursive algorithm. On this basis, a type of centralized controller is designed combined with cloud computing system. Moreover, from perspective of the defender, a detector is proposed at the side of agent to detect whether the current actuator input is attacked. Using linear matrix inequality, sufficient conditions are given for achieving mean-square bounded consensus and an upper boundary is derived. Finally, validity of the proposed method is illustrated via two simulation examples.}, } @article {pmid34989688, year = {2022}, author = {Cresswell, K and Domínguez Hernández, A and Williams, R and Sheikh, A}, title = {Key Challenges and Opportunities for Cloud Technology in Health Care: Semistructured Interview Study.}, journal = {JMIR human factors}, volume = {9}, number = {1}, pages = {e31246}, pmid = {34989688}, issn = {2292-9495}, abstract = {BACKGROUND: The use of cloud computing (involving storage and processing of data on the internet) in health care has increasingly been highlighted as having great potential in facilitating data-driven innovations. Although some provider organizations are reaping the benefits of using cloud providers to store and process their data, others are lagging behind.

OBJECTIVE: We aim to explore the existing challenges and barriers to the use of cloud computing in health care settings and investigate how perceived risks can be addressed.

METHODS: We conducted a qualitative case study of cloud computing in health care settings, interviewing a range of individuals with perspectives on supply, implementation, adoption, and integration of cloud technology. Data were collected through a series of in-depth semistructured interviews exploring current applications, implementation approaches, challenges encountered, and visions for the future. The interviews were transcribed and thematically analyzed using NVivo 12 (QSR International). We coded the data based on a sociotechnical coding framework developed in related work.

RESULTS: We interviewed 23 individuals between September 2020 and November 2020, including professionals working across major cloud providers, health care provider organizations, innovators, small and medium-sized software vendors, and academic institutions. The participants were united by a common vision of a cloud-enabled ecosystem of applications and by drivers surrounding data-driven innovation. The identified barriers to progress included the cost of data migration and skill gaps to implement cloud technologies within provider organizations, the cultural shift required to move to externally hosted services, a lack of user pull as many benefits were not visible to those providing frontline care, and a lack of interoperability standards and central regulations.

CONCLUSIONS: Implementations need to be viewed as a digitally enabled transformation of services, driven by skill development, organizational change management, and user engagement, to facilitate the implementation and exploitation of cloud-based infrastructures and to maximize returns on investment.}, } @article {pmid34989198, year = {2022}, author = {Fang, Q and Yan, S}, title = {MCX Cloud-a modern, scalable, high-performance and in-browser Monte Carlo simulation platform with cloud computing.}, journal = {Journal of biomedical optics}, volume = {27}, number = {8}, pages = {}, pmid = {34989198}, issn = {1560-2281}, support = {R01 EB026998/EB/NIBIB NIH HHS/United States ; R01 GM114365/GM/NIGMS NIH HHS/United States ; U24 NS124027/NS/NINDS NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computer Simulation ; Computers ; Monte Carlo Method ; *Software ; }, abstract = {SIGNIFICANCE: Despite the ample progress made toward faster and more accurate Monte Carlo (MC) simulation tools over the past decade, the limited usability and accessibility of these advanced modeling tools remain key barriers to widespread use among the broad user community.

AIM: An open-source, high-performance, web-based MC simulator that builds upon modern cloud computing architectures is highly desirable to deliver state-of-the-art MC simulations and hardware acceleration to general users without the need for special hardware installation and optimization.

APPROACH: We have developed a configuration-free, in-browser 3D MC simulation platform-Monte Carlo eXtreme (MCX) Cloud-built upon an array of robust and modern technologies, including a Docker Swarm-based cloud-computing backend and a web-based graphical user interface (GUI) that supports in-browser 3D visualization, asynchronous data communication, and automatic data validation via JavaScript Object Notation (JSON) schemas.

RESULTS: The front-end of the MCX Cloud platform offers an intuitive simulation design, fast 3D data rendering, and convenient simulation sharing. The Docker Swarm container orchestration backend is highly scalable and can support high-demand GPU MC simulations using MCX over a dynamically expandable virtual cluster.

CONCLUSION: MCX Cloud makes fast, scalable, and feature-rich MC simulations readily available to all biophotonics researchers without overhead. It is fully open-source and can be freely accessed at http://mcx.space/cloud.}, } @article {pmid34987566, year = {2021}, author = {Alsuhibany, SA and Abdel-Khalek, S and Algarni, A and Fayomi, A and Gupta, D and Kumar, V and Mansour, RF}, title = {Ensemble of Deep Learning Based Clinical Decision Support System for Chronic Kidney Disease Diagnosis in Medical Internet of Things Environment.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {4931450}, pmid = {34987566}, issn = {1687-5273}, mesh = {*Decision Support Systems, Clinical ; *Deep Learning ; Humans ; *Internet of Things ; Neural Networks, Computer ; *Renal Insufficiency, Chronic/diagnosis ; }, abstract = {Recently, Internet of Things (IoT) and cloud computing environments become commonly employed in several healthcare applications by the integration of monitoring things such as sensors and medical gadgets for observing remote patients. For availing of improved healthcare services, the huge count of data generated by IoT gadgets from the medicinal field can be investigated in the CC environment rather than relying on limited processing and storage resources. At the same time, earlier identification of chronic kidney disease (CKD) becomes essential to reduce the mortality rate significantly. This study develops an ensemble of deep learning based clinical decision support systems (EDL-CDSS) for CKD diagnosis in the IoT environment. The goal of the EDL-CDSS technique is to detect and classify different stages of CKD using the medical data collected by IoT devices and benchmark repositories. In addition, the EDL-CDSS technique involves the design of Adaptive Synthetic (ADASYN) technique for outlier detection process. Moreover, an ensemble of three models, namely, deep belief network (DBN), kernel extreme learning machine (KELM), and convolutional neural network with gated recurrent unit (CNN-GRU), are performed. Finally, quasi-oppositional butterfly optimization algorithm (QOBOA) is used for the hyperparameter tuning of the DBN and CNN-GRU models. A wide range of simulations was carried out and the outcomes are studied in terms of distinct measures. A brief outcomes analysis highlighted the supremacy of the EDL-CDSS technique on exiting approaches.}, } @article {pmid34983991, year = {2022}, author = {Perkel, JM}, title = {Terra takes the pain out of 'omics' computing in the cloud.}, journal = {Nature}, volume = {601}, number = {7891}, pages = {154-155}, doi = {10.1038/d41586-021-03822-7}, pmid = {34983991}, issn = {1476-4687}, mesh = {Aging/genetics ; Animals ; Biomedical Research ; *Cloud Computing/economics ; Datasets as Topic ; Dogs ; Genome/genetics ; Genomics/economics/*methods ; Humans ; Information Dissemination/*methods ; Internet ; Multicenter Studies as Topic/*methods ; National Human Genome Research Institute (U.S.) ; Pets/genetics ; *Software/economics ; United States ; Workflow ; }, } @article {pmid34978034, year = {2022}, author = {Ha, LT}, title = {Are digital business and digital public services a driver for better energy security? Evidence from a European sample.}, journal = {Environmental science and pollution research international}, volume = {29}, number = {18}, pages = {27232-27256}, pmid = {34978034}, issn = {1614-7499}, mesh = {*Commerce ; }, abstract = {This paper empirically analyses the impacts of the digital transformation process in the business and public sectors on energy security (ES). We employ 8 indicators to represent four aspects of energy security, including availability, acceptability, develop-ability, and sustainability. Digital businesses development is captured by e-Commerce (including e-Commerce sales, e-Commerce turnover, e-Commerce web sales) and e-Business (including customer relation management (CRM) usage and cloud usage). Digital public services development is reflected by business mobility and key enablers. Different econometric techniques are utilized in a database of 24 European Union countries from 2011 to 2019. Our estimation results demonstrate that digital businesses play a critical role in improving the acceptability and develop-ability of energy security, while digitalization in public services supports achieving energy sustainability goals. The use of modern digital technology such as big data, cloud computing is extremely important to ensure the security of the energy system, especially the availability of energy. For further discussion on the role of digital public services, we reveal a nonlinear association between digitalization in the public sector and energy intensity and energy consumption, suggesting the acceptability and develop-ability of energy security can be enhanced if the digital transformation process achieves a certain level.}, } @article {pmid34976326, year = {2021}, author = {Wang, B and Xu, L}, title = {Construction of the "Internet Plus" Community Smart Elderly Care Service Platform.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {4310648}, pmid = {34976326}, issn = {2040-2309}, mesh = {Aged ; *Aging ; Humans ; *Internet ; }, abstract = {With the rapid development of China's market economy and the increasing trend of population aging, the traditional community elderly care service model has exposed more and more problems, such as the imbalance between supply and demand, single service, and lack of flexibility. In response to these issues, this research attempts to explore the possible paths and practical challenges of applying the Internet, Internet of Things, mobile networks, big data, and cloud computing to community elderly care services. This research believes that the construction of the "Internet Plus" community smart elderly care services platform is a general trend. Innovating the traditional community elderly care service model is conducive to fully integrating elderly care resources and improving the quality of elderly care services.}, } @article {pmid34976046, year = {2021}, author = {Abd Elaziz, M and Abualigah, L and Ibrahim, RA and Attiya, I}, title = {IoT Workflow Scheduling Using Intelligent Arithmetic Optimization Algorithm in Fog Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {9114113}, pmid = {34976046}, issn = {1687-5273}, mesh = {Algorithms ; Cloud Computing ; Humans ; *Internet of Things ; Workflow ; }, abstract = {Instead of the cloud, the Internet of things (IoT) activities are offloaded into fog computing to boost the quality of services (QoSs) needed by many applications. However, the availability of continuous computing resources on fog computing servers is one of the restrictions for IoT applications since transmitting the large amount of data generated using IoT devices would create network traffic and cause an increase in computational overhead. Therefore, task scheduling is the main problem that needs to be solved efficiently. This study proposes an energy-aware model using an enhanced arithmetic optimization algorithm (AOA) method called AOAM, which addresses fog computing's job scheduling problem to maximize users' QoSs by maximizing the makespan measure. In the proposed AOAM, we enhanced the conventional AOA searchability using the marine predators algorithm (MPA) search operators to address the diversity of the used solutions and local optimum problems. The proposed AOAM is validated using several parameters, including various clients, data centers, hosts, virtual machines, tasks, and standard evaluation measures, including the energy and makespan. The obtained results are compared with other state-of-the-art methods; it showed that AOAM is promising and solved task scheduling effectively compared with the other comparative methods.}, } @article {pmid34972098, year = {2021}, author = {Xiao, Y and Wang, K and Liu, W and Peng, K and Wan, F}, title = {Research on rapier loom fault system based on cloud-side collaboration.}, journal = {PloS one}, volume = {16}, number = {12}, pages = {e0260888}, pmid = {34972098}, issn = {1932-6203}, mesh = {*Algorithms ; Bayes Theorem ; *Cloud Computing ; Neural Networks, Computer ; *Textiles ; }, abstract = {The electrical control system of rapier weaving machines is susceptible to various disturbances during operation and is prone to failures. This will seriously affect the production and a fault diagnosis system is needed to reduce this effect. However, the existing popular fault diagnosis systems and methods need to be improved due to the limitations of rapier weaving machine process and electrical characteristics. Based on this, this paper presents an in-depth study of rapier loom fault diagnosis system and proposes a rapier loom fault diagnosis method combining edge expert system and cloud-based rough set and Bayesian network. By analyzing the process and fault characteristics of rapier loom, the electrical faults of rapier loom are classified into common faults and other faults according to the frequency of occurrence. An expert system is built in the field for edge computing based on knowledge fault diagnosis experience to diagnose common loom faults and reduce the computing pressure in the cloud. Collect loom fault data in the cloud, train loom fault diagnosis algorithms to diagnose other faults, and handle other faults diagnosed by the expert system. The effectiveness of loom fault diagnosis is verified by on-site operation and remote monitoring of the loom human-machine interaction system. Technical examples are provided for the research of loom fault diagnosis system.}, } @article {pmid34960506, year = {2021}, author = {Lee, S and Yoon, D and Yeo, S and Oh, S}, title = {Mitigating Cold Start Problem in Serverless Computing with Function Fusion.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960506}, issn = {1424-8220}, support = {UD190033ED//Agency for Defense Development/ ; }, mesh = {*Artificial Intelligence ; *Software ; Workflow ; }, abstract = {As Artificial Intelligence (AI) is becoming ubiquitous in many applications, serverless computing is also emerging as a building block for developing cloud-based AI services. Serverless computing has received much interest because of its simplicity, scalability, and resource efficiency. However, due to the trade-off with resource efficiency, serverless computing suffers from the cold start problem, that is, a latency between a request arrival and function execution. The cold start problem significantly influences the overall response time of workflow that consists of functions because the cold start may occur in every function within the workflow. Function fusion can be one of the solutions to mitigate the cold start latency of a workflow. If two functions are fused into a single function, the cold start of the second function is removed; however, if parallel functions are fused, the workflow response time can be increased because the parallel functions run sequentially even if the cold start latency is reduced. This study presents an approach to mitigate the cold start latency of a workflow using function fusion while considering a parallel run. First, we identify three latencies that affect response time, present a workflow response time model considering the latency, and efficiently find a fusion solution that can optimize the response time on the cold start. Our method shows a response time of 28-86% of the response time of the original workflow in five workflows.}, } @article {pmid34960483, year = {2021}, author = {Salih, S and Hamdan, M and Abdelmaboud, A and Abdelaziz, A and Abdelsalam, S and Althobaiti, MM and Cheikhrouhou, O and Hamam, H and Alotaibi, F}, title = {Prioritising Organisational Factors Impacting Cloud ERP Adoption and the Critical Issues Related to Security, Usability, and Vendors: A Systematic Literature Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960483}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Commerce ; }, abstract = {Cloud ERP is a type of enterprise resource planning (ERP) system that runs on the vendor's cloud platform instead of an on-premises network, enabling companies to connect through the Internet. The goal of this study was to rank and prioritise the factors driving cloud ERP adoption by organisations and to identify the critical issues in terms of security, usability, and vendors that impact adoption of cloud ERP systems. The assessment of critical success factors (CSFs) in on-premises ERP adoption and implementation has been well documented; however, no previous research has been carried out on CSFs in cloud ERP adoption. Therefore, the contribution of this research is to provide research and practice with the identification and analysis of 16 CSFs through a systematic literature review, where 73 publications on cloud ERP adoption were assessed from a range of different conferences and journals, using inclusion and exclusion criteria. Drawing from the literature, we found security, usability, and vendors were the top three most widely cited critical issues for the adoption of cloud-based ERP; hence, the second contribution of this study was an integrative model constructed with 12 drivers based on the security, usability, and vendor characteristics that may have greater influence as the top critical issues in the adoption of cloud ERP systems. We also identified critical gaps in current research, such as the inconclusiveness of findings related to security critical issues, usability critical issues, and vendor critical issues, by highlighting the most important drivers influencing those issues in cloud ERP adoption and the lack of discussion on the nature of the criticality of those CSFs. This research will aid in the development of new strategies or the revision of existing strategies and polices aimed at effectively integrating cloud ERP into cloud computing infrastructure. It will also allow cloud ERP suppliers to determine organisations' and business owners' expectations and implement appropriate tactics. A better understanding of the CSFs will narrow the field of failure and assist practitioners and managers in increasing their chances of success.}, } @article {pmid34960455, year = {2021}, author = {Bucur, V and Miclea, LC}, title = {Multi-Cloud Resource Management Techniques for Cyber-Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960455}, issn = {1424-8220}, mesh = {*Autonomous Vehicles ; *Cloud Computing ; Humans ; Software ; }, abstract = {Information technology is based on data management between various sources. Software projects, as varied as simple applications or as complex as self-driving cars, are heavily reliant on the amounts, and types, of data ingested by one or more interconnected systems. Data is not only consumed but is transformed or mutated which requires copious amounts of computing resources. One of the most exciting areas of cyber-physical systems, autonomous vehicles, makes heavy use of deep learning and AI to mimic the highly complex actions of a human driver. Attempting to map human behavior (a large and abstract concept) requires large amounts of data, used by AIs to increase their knowledge and better attempt to solve complex problems. This paper outlines a full-fledged solution for managing resources in a multi-cloud environment. The purpose of this API is to accommodate ever-increasing resource requirements by leveraging the multi-cloud and using commercially available tools to scale resources and make systems more resilient while remaining as cloud agnostic as possible. To that effect, the work herein will consist of an architectural breakdown of the resource management API, a low-level description of the implementation and an experiment aimed at proving the feasibility, and applicability of the systems described.}, } @article {pmid34960384, year = {2021}, author = {Hameed, SS and Selamat, A and Abdul Latiff, L and Razak, SA and Krejcar, O and Fujita, H and Ahmad Sharif, MN and Omatu, S}, title = {A Hybrid Lightweight System for Early Attack Detection in the IoMT Fog.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960384}, issn = {1424-8220}, support = {FRGS/1/2018/ICT04/UTM/01/1//Ministry of Higher Education/ ; Vot 4L876//Ministry of Higher Education/ ; }, mesh = {Bayes Theorem ; Big Data ; Early Diagnosis ; *Internet of Things ; }, abstract = {Cyber-attack detection via on-gadget embedded models and cloud systems are widely used for the Internet of Medical Things (IoMT). The former has a limited computation ability, whereas the latter has a long detection time. Fog-based attack detection is alternatively used to overcome these problems. However, the current fog-based systems cannot handle the ever-increasing IoMT's big data. Moreover, they are not lightweight and are designed for network attack detection only. In this work, a hybrid (for host and network) lightweight system is proposed for early attack detection in the IoMT fog. In an adaptive online setting, six different incremental classifiers were implemented, namely a novel Weighted Hoeffding Tree Ensemble (WHTE), Incremental K-Nearest Neighbors (IKNN), Incremental Naïve Bayes (INB), Hoeffding Tree Majority Class (HTMC), Hoeffding Tree Naïve Bayes (HTNB), and Hoeffding Tree Naïve Bayes Adaptive (HTNBA). The system was benchmarked with seven heterogeneous sensors and a NetFlow data infected with nine types of recent attack. The results showed that the proposed system worked well on the lightweight fog devices with ~100% accuracy, a low detection time, and a low memory usage of less than 6 MiB. The single-criteria comparative analysis showed that the WHTE ensemble was more accurate and was less sensitive to the concept drift.}, } @article {pmid34960320, year = {2021}, author = {Alwakeel, AM}, title = {An Overview of Fog Computing and Edge Computing Security and Privacy Issues.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {24}, pages = {}, pmid = {34960320}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Privacy ; }, abstract = {With the advancement of different technologies such as 5G networks and IoT the use of different cloud computing technologies became essential. Cloud computing allowed intensive data processing and warehousing solution. Two different new cloud technologies that inherit some of the traditional cloud computing paradigm are fog computing and edge computing that is aims to simplify some of the complexity of cloud computing and leverage the computing capabilities within the local network in order to preform computation tasks rather than carrying it to the cloud. This makes this technology fits with the properties of IoT systems. However, using such technology introduces several new security and privacy challenges that could be huge obstacle against implementing these technologies. In this paper, we survey some of the main security and privacy challenges that faces fog and edge computing illustrating how these security issues could affect the work and implementation of edge and fog computing. Moreover, we present several countermeasures to mitigate the effect of these security issues.}, } @article {pmid34956357, year = {2021}, author = {Xie, P and Ma, E and Xu, Z}, title = {Cloud Computing Image Recognition System Assists the Construction of the Internet of Things Model of Administrative Management Event Parameters.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {8630256}, pmid = {34956357}, issn = {1687-5273}, mesh = {Algorithms ; *Cloud Computing ; *Internet of Things ; Software ; }, abstract = {In order to successfully apply the Internet of Things and cloud computing to the administrative management of spatial structures and realize the systematization, digitization, and intelligence of administrative management, this article draws on research experience in related fields and considers the data characteristics and computing tasks of administrative management. The whole cycle of transmission, storage, postprocessing, and visualization is the main line of research, and a cloud computing-based spatial structure administrative management IoT system is constructed. First, by summarizing the application status of the Internet of Things, the general Internet of Things system is summarized into three levels, and combined with the specific work in the spatial structure administrative management, the overall framework of the spatial structure administrative management of the Internet of Things system is proposed, and the functional sublayers are carried out. Secondly, in response to the above problems, through the traditional image recognition system research and practical application investigation, in order to meet the user's requirements for the computing efficiency and recognition accuracy of the image recognition system, an image recognition system in the cloud computing environment is proposed. It proposes a fuzzy evaluation algorithm of health grade hierarchy analysis optimized for the index system and scoring system and a calculation method that uses time series to identify regular outliers. The optical image pixel-level fusion method and the infrared and visible image fusion method based on complementary information are proposed, and the image fusion software is developed. Finally, in order to enable the application layer to use cluster resources to efficiently and intelligently process massive monitoring data containing redundancy, heterogeneity, anomalies, and many other defects, according to the calculation process of each specific task of data preprocessing and postprocessing in the application layer, demonstrations are made one by one. After analysis, it is concluded that vertical storage of data blocks according to different sensor channels is the optimal strategy.}, } @article {pmid34951597, year = {2021}, author = {Yang, JS and Cuomo, RE and Purushothaman, V and Nali, M and Shah, N and Bardier, C and Obradovich, N and Mackey, T}, title = {Campus Smoking Policies and Smoking-Related Twitter Posts Originating From California Public Universities: Retrospective Study.}, journal = {JMIR formative research}, volume = {5}, number = {12}, pages = {e33331}, pmid = {34951597}, issn = {2561-326X}, abstract = {BACKGROUND: The number of colleges and universities with smoke- or tobacco-free campus policies has been increasing. The effects of campus smoking policies on overall sentiment, particularly among young adult populations, are more difficult to assess owing to the changing tobacco and e-cigarette product landscape and differential attitudes toward policy implementation and enforcement.

OBJECTIVE: The goal of the study was to retrospectively assess the campus climate toward tobacco use by comparing tweets from California universities with and those without smoke- or tobacco-free campus policies.

METHODS: Geolocated Twitter posts from 2015 were collected using the Twitter public application programming interface in combination with cloud computing services on Amazon Web Services. Posts were filtered for tobacco products and behavior-related keywords. A total of 42,877,339 posts were collected from 2015, with 2837 originating from a University of California or California State University system campus, and 758 of these manually verified as being about smoking. Chi-square tests were conducted to determine if there were significant differences in tweet user sentiments between campuses that were smoke- or tobacco-free (all University of California campuses and California State University, Fullerton) compared to those that were not. A separate content analysis of tweets included in chi-square tests was conducted to identify major themes by campus smoking policy status.

RESULTS: The percentage of positive sentiment tweets toward tobacco use was higher on campuses without a smoke- or tobacco-free campus policy than on campuses with a smoke- or tobacco-free campus policy (76.7% vs 66.4%, P=.03). Higher positive sentiment on campuses without a smoke- or tobacco-free campus policy may have been driven by general comments about one's own smoking behavior and comments about smoking as a general behavior. Positive sentiment tweets originating from campuses without a smoke- or tobacco-free policy had greater variation in tweet type, which may have also contributed to differences in sentiment among universities.

CONCLUSIONS: Our study introduces preliminary data suggesting that campus smoke- and tobacco-free policies are associated with a reduction in positive sentiment toward smoking. However, continued expressions and intentions to smoke and reports of one's own smoking among Twitter users suggest a need for more research to better understand the dynamics between implementation of smoke- and tobacco-free policies and resulting tobacco behavioral sentiment.}, } @article {pmid34948885, year = {2021}, author = {Garcés-Jiménez, A and Calderón-Gómez, H and Gómez-Pulido, JM and Gómez-Pulido, JA and Vargas-Lombardo, M and Castillo-Sequera, JL and Aguirre, MP and Sanz-Moreno, J and Polo-Luque, ML and Rodríguez-Puyol, D}, title = {Medical Prognosis of Infectious Diseases in Nursing Homes by Applying Machine Learning on Clinical Data Collected in Cloud Microservices.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {24}, pages = {}, pmid = {34948885}, issn = {1660-4601}, mesh = {Aged ; *Biomedical Research ; Cloud Computing ; *Communicable Diseases/diagnosis/epidemiology ; Humans ; Machine Learning ; Nursing Homes ; }, abstract = {BACKGROUND: treating infectious diseases in elderly individuals is difficult; patient referral to emergency services often occurs, since the elderly tend to arrive at consultations with advanced, serious symptoms.

AIM: it was hypothesized that anticipating an infectious disease diagnosis by a few days could significantly improve a patient's well-being and reduce the burden on emergency health system services.

METHODS: vital signs from residents were taken daily and transferred to a database in the cloud. Classifiers were used to recognize patterns in the spatial domain process of the collected data. Doctors reported their diagnoses when any disease presented. A flexible microservice architecture provided access and functionality to the system.

RESULTS: combining two different domains, health and technology, is not easy, but the results are encouraging. The classifiers reported good results; the system has been well accepted by medical personnel and is proving to be cost-effective and a good solution to service disadvantaged areas. In this context, this research found the importance of certain clinical variables in the identification of infectious diseases.

CONCLUSIONS: this work explores how to apply mobile communications, cloud services, and machine learning technology, in order to provide efficient tools for medical staff in nursing homes. The scalable architecture can be extended to big data applications that may extract valuable knowledge patterns for medical research.}, } @article {pmid34941535, year = {2022}, author = {Qiu, J and Yan, X and Wang, W and Wei, W and Fang, K}, title = {Skeleton-Based Abnormal Behavior Detection Using Secure Partitioned Convolutional Neural Network Model.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {12}, pages = {5829-5840}, doi = {10.1109/JBHI.2021.3137334}, pmid = {34941535}, issn = {2168-2208}, mesh = {Humans ; *Neural Networks, Computer ; *Privacy ; Computer Security ; Skeleton ; }, abstract = {Theabnormal behavior detection is the vital for evaluation of daily-life health status of the patient with cognitive impairment. Previous studies about abnormal behavior detection indicate that convolution neural network (CNN)-based computer vision owns the high robustness and accuracy for detection. However, executing CNN model on the cloud possible incurs a privacy disclosure problem during data transmission, and the high computation overhead makes difficult to execute the model on edge-end IoT devices with a well real-time performance. In this paper, we realize a skeleton-based abnormal behavior detection, and propose a secure partitioned CNN model (SP-CNN) to extract human skeleton keypoints and achieve safely collaborative computing by deploying different CNN model layers on the cloud and the IoT device. Because, the data outputted from the IoT device are processed by the several CNN layers instead of transmitting the sensitive video data, objectively it reduces the risk of privacy disclosure. Moreover, we also design an encryption method based on channel state information (CSI) to guarantee the sensitive data security. At last, we apply SP-CNN in abnormal behavior detection to evaluate its effectiveness. The experiment results illustrate that the efficiency of the abnormal behavior detection based on SP-CNN is at least 33.2% higher than the state-of-the-art methods, and its detection accuracy arrives to 97.54%.}, } @article {pmid34939144, year = {2021}, author = {Lu, ZX and Qian, P and Bi, D and Ye, ZW and He, X and Zhao, YH and Su, L and Li, SL and Zhu, ZL}, title = {Application of AI and IoT in Clinical Medicine: Summary and Challenges.}, journal = {Current medical science}, volume = {41}, number = {6}, pages = {1134-1150}, pmid = {34939144}, issn = {2523-899X}, mesh = {Algorithms ; Artificial Intelligence/*trends ; *Big Data ; Clinical Medicine/*trends ; Cloud Computing/*trends ; Humans ; Internet of Things/*trends ; Machine Learning ; }, abstract = {The application of artificial intelligence (AI) technology in the medical field has experienced a long history of development. In turn, some long-standing points and challenges in the medical field have also prompted diverse research teams to continue to explore AI in depth. With the development of advanced technologies such as the Internet of Things (IoT), cloud computing, big data, and 5G mobile networks, AI technology has been more widely adopted in the medical field. In addition, the in-depth integration of AI and IoT technology enables the gradual improvement of medical diagnosis and treatment capabilities so as to provide services to the public in a more effective way. In this work, we examine the technical basis of IoT, cloud computing, big data analysis and machine learning involved in clinical medicine, combined with concepts of specific algorithms such as activity recognition, behavior recognition, anomaly detection, assistant decision-making system, to describe the scenario-based applications of remote diagnosis and treatment collaboration, neonatal intensive care unit, cardiology intensive care unit, emergency first aid, venous thromboembolism, monitoring nursing, image-assisted diagnosis, etc. We also systematically summarize the application of AI and IoT in clinical medicine, analyze the main challenges thereof, and comment on the trends and future developments in this field.}, } @article {pmid34938329, year = {2021}, author = {Siam, AI and Almaiah, MA and Al-Zahrani, A and Elazm, AA and El Banby, GM and El-Shafai, W and El-Samie, FEA and El-Bahnasawy, NA}, title = {Secure Health Monitoring Communication Systems Based on IoT and Cloud Computing for Medical Emergency Applications.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {8016525}, pmid = {34938329}, issn = {1687-5273}, mesh = {*Cloud Computing ; Communication ; Humans ; *Internet of Things ; Oxygen Saturation ; Reproducibility of Results ; }, abstract = {Smart health surveillance technology has attracted wide attention between patients and professionals or specialists to provide early detection of critical abnormal situations without the need to be in direct contact with the patient. This paper presents a secure smart monitoring portable multivital signal system based on Internet-of-Things (IoT) technology. The implemented system is designed to measure the key health parameters: heart rate (HR), blood oxygen saturation (SpO2), and body temperature, simultaneously. The captured physiological signals are processed and encrypted using the Advanced Encryption Standard (AES) algorithm before sending them to the cloud. An ESP8266 integrated unit is used for processing, encryption, and providing connectivity to the cloud over Wi-Fi. On the other side, trusted medical organization servers receive and decrypt the measurements and display the values on the monitoring dashboard for the authorized specialists. The proposed system measurements are compared with a number of commercial medical devices. Results demonstrate that the measurements of the proposed system are within the 95% confidence interval. Moreover, Root Mean Squared Error (RMSE), Mean Absolute Error (MAE), and Mean Relative Error (MRE) for the proposed system are calculated as 1.44, 1.12, and 0.012, respectively, for HR, 1.13, 0.92, and 0.009, respectively, for SpO2, and 0.13, 0.11, and 0.003, respectively, for body temperature. These results demonstrate the high accuracy and reliability of the proposed system.}, } @article {pmid34934784, year = {2021}, author = {Abdul Hadi, M and Schmid, J and Trabesinger, S and Brillinger, M}, title = {High-frequency machine datasets captured via Edge Device from Spinner U5-630 milling machine.}, journal = {Data in brief}, volume = {39}, number = {}, pages = {107670}, pmid = {34934784}, issn = {2352-3409}, abstract = {The high-frequency (HF) machine data is retrieved from the Spinner U5-630 milling machine via an Edge Device. Unlike cloud computing, an Edge Device refers to distributed data processing of devices in proximity that generate data, which can thereby be used for analysis [1,2]. This data has a sampling rate of 2ms and hence, a frequency of 500Hz. The HF machine data is from various experiments performed. There are 2 experiments performed (parts 1 and 2). The experimented part 1 has 12 .json data files and part 2 has 11 .json files. In total, there are 23 files of HF machine data from 23 experiments. The HF machine data has vast potential for analysis as it contains all the information from the machine during the machining process. One part of the information was used in our case to calculate the energy consumption of the machine. Similarly, the data can be used for retrieving information of torque, commanded and actual speed, NC code, current, etc.}, } @article {pmid34919694, year = {2022}, author = {Kahn, MG and Mui, JY and Ames, MJ and Yamsani, AK and Pozdeyev, N and Rafaels, N and Brooks, IM}, title = {Migrating a research data warehouse to a public cloud: challenges and opportunities.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {592-600}, pmid = {34919694}, issn = {1527-974X}, support = {UL1 TR002535/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Data Warehousing ; }, abstract = {OBJECTIVE: Clinical research data warehouses (RDWs) linked to genomic pipelines and open data archives are being created to support innovative, complex data-driven discoveries. The computing and storage needs of these research environments may quickly exceed the capacity of on-premises systems. New RDWs are migrating to cloud platforms for the scalability and flexibility needed to meet these challenges. We describe our experience in migrating a multi-institutional RDW to a public cloud.

MATERIALS AND METHODS: This study is descriptive. Primary materials included internal and public presentations before and after the transition, analysis documents, and actual billing records. Findings were aggregated into topical categories.

RESULTS: Eight categories of migration issues were identified. Unanticipated challenges included legacy system limitations; network, computing, and storage architectures that realize performance and cost benefits in the face of hyper-innovation, complex security reviews and approvals, and limited cloud consulting expertise.

DISCUSSION: Cloud architectures enable previously unavailable capabilities, but numerous pitfalls can impede realizing the full benefits of a cloud environment. Rapid changes in cloud capabilities can quickly obsolete existing architectures and associated institutional policies. Touchpoints with on-premise networks and systems can add unforeseen complexity. Governance, resource management, and cost oversight are critical to allow rapid innovation while minimizing wasted resources and unnecessary costs.

CONCLUSIONS: Migrating our RDW to the cloud has enabled capabilities and innovations that would not have been possible with an on-premises environment. Notwithstanding the challenges of managing cloud resources, the resulting RDW capabilities have been highly positive to our institution, research community, and partners.}, } @article {pmid34917585, year = {2021}, author = {Pandya, S and Sur, A and Solke, N}, title = {COVIDSAVIOR: A Novel Sensor-Fusion and Deep Learning Based Framework for Virus Outbreaks.}, journal = {Frontiers in public health}, volume = {9}, number = {}, pages = {797808}, pmid = {34917585}, issn = {2296-2565}, mesh = {Algorithms ; *COVID-19 ; *Deep Learning ; Disease Outbreaks/prevention & control ; Humans ; Masks ; }, abstract = {The presented deep learning and sensor-fusion based assistive technology (Smart Facemask and Thermal scanning kiosk) will protect the individual using auto face-mask detection and auto thermal scanning to detect the current body temperature. Furthermore, the presented system also facilitates a variety of notifications, such as an alarm, if an individual is not wearing a mask and detects thermal temperature beyond the standard body temperature threshold, such as 98.6°F (37°C). Design/methodology/approach-The presented deep Learning and sensor-fusion-based approach can also detect an individual in with or without mask situations and provide appropriate notification to the security personnel by raising the alarm. Moreover, the smart tunnel is also equipped with a thermal sensing unit embedded with a camera, which can detect the real-time body temperature of an individual concerning the prescribed body temperature limits as prescribed by WHO reports. Findings-The investigation results validate the performance evaluation of the presented smart face-mask and thermal scanning mechanism. The presented system can also detect an outsider entering the building with or without mask condition and be aware of the security control room by raising appropriate alarms. Furthermore, the presented smart epidemic tunnel is embedded with an intelligent algorithm that can perform real-time thermal scanning of an individual and store essential information in a cloud platform, such as Google firebase. Thus, the proposed system favors society by saving time and helps in lowering the spread of coronavirus.}, } @article {pmid34914924, year = {2022}, author = {Iregbu, K and Dramowski, A and Milton, R and Nsutebu, E and Howie, SRC and Chakraborty, M and Lavoie, PM and Costelloe, CE and Ghazal, P}, title = {Global health systems' data science approach for precision diagnosis of sepsis in early life.}, journal = {The Lancet. Infectious diseases}, volume = {22}, number = {5}, pages = {e143-e152}, doi = {10.1016/S1473-3099(21)00645-9}, pmid = {34914924}, issn = {1474-4457}, support = {G0701289/MRC_/Medical Research Council/United Kingdom ; K43 TW010682/TW/FIC NIH HHS/United States ; }, mesh = {Artificial Intelligence ; Child ; *Data Science ; Developing Countries ; Global Health ; Humans ; Infant, Newborn ; *Sepsis/diagnosis ; }, abstract = {Neonates and children in low-income and middle-income countries (LMICs) contribute to the highest number of sepsis-associated deaths globally. Interventions to prevent sepsis mortality are hampered by a lack of comprehensive epidemiological data and pathophysiological understanding of biological pathways. In this review, we discuss the challenges faced by LMICs in diagnosing sepsis in these age groups. We highlight a role for multi-omics and health care data to improve diagnostic accuracy of clinical algorithms, arguing that health-care systems urgently need precision medicine to avoid the pitfalls of missed diagnoses, misdiagnoses, and overdiagnoses, and associated antimicrobial resistance. We discuss ethical, regulatory, and systemic barriers related to the collection and use of big data in LMICs. Technologies such as cloud computing, artificial intelligence, and medical tricorders might help, but they require collaboration with local communities. Co-partnering (joint equal development of technology between producer and end-users) could facilitate integration of these technologies as part of future care-delivery systems, offering a chance to transform the global management and prevention of sepsis for neonates and children.}, } @article {pmid34914923, year = {2022}, author = {Keddy, KH and Saha, S and Kariuki, S and Kalule, JB and Qamar, FN and Haq, Z and Okeke, IN}, title = {Using big data and mobile health to manage diarrhoeal disease in children in low-income and middle-income countries: societal barriers and ethical implications.}, journal = {The Lancet. Infectious diseases}, volume = {22}, number = {5}, pages = {e130-e142}, doi = {10.1016/S1473-3099(21)00585-5}, pmid = {34914923}, issn = {1474-4457}, support = {MR/L00464X/1/MRC_/Medical Research Council/United Kingdom ; R01 AI099525/AI/NIAID NIH HHS/United States ; 215675/Z/19/Z/WT_/Wellcome Trust/United Kingdom ; }, mesh = {Big Data ; *Developing Countries ; Diarrhea/prevention & control/therapy ; Humans ; Poverty ; *Telemedicine ; }, abstract = {Diarrhoea is an important cause of morbidity and mortality in children from low-income and middle-income countries (LMICs), despite advances in the management of this condition. Understanding of the causes of diarrhoea in children in LMICs has advanced owing to large multinational studies and big data analytics computing the disease burden, identifying the important variables that have contributed to reducing this burden. The advent of the mobile phone has further enabled the management of childhood diarrhoea by providing both clinical support to health-care workers (such as diagnosis and management) and communicating preventive measures to carers (such as breastfeeding and vaccination reminders) in some settings. There are still challenges in addressing the burden of diarrhoeal diseases, such as incomplete patient information, underrepresented geographical areas, concerns about patient confidentiality, unequal partnerships between study investigators, and the reactive approach to outbreaks. A transparent approach to promote the inclusion of researchers in LMICs could address partnership imbalances. A big data umbrella encompassing cloud-based centralised databases to analyse interlinked human, animal, agricultural, social, and climate data would provide an informative solution to the development of appropriate management protocols in LMICs.}, } @article {pmid34912536, year = {2021}, author = {Lin, B and Huang, W}, title = {A Study of Cloud-Based Remote Clinical Care Technology.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {8024091}, pmid = {34912536}, issn = {2040-2309}, mesh = {*Cloud Computing ; Delivery of Health Care ; Humans ; *Software ; Surveys and Questionnaires ; Technology ; }, abstract = {This paper uses cloud computing to build and design remote clinical care technology, and the study refines the evaluation approach for the elements and builds an evaluation prototype for the strategy, uses service design theory to improve the design of the service part of the assistive system, summarizes the list of requirements based on system design and service design, and designs a service design prototype. Through design practice, the detailed design of the software interaction interface and the auxiliary product of the care assistance system based on the prototype are investigated. Based on the user perspective, the strategy of meeting user expectations and improving user information literacy is proposed; based on the social network perspective, the strategy of establishing a long-term mechanism for smart medical operation and improving the information interaction network environment is proposed; and based on the system service perspective, the strategy of optimizing the system function design and innovating the service model is proposed. Compared with the traditional written patient handover, the application of MNIS under cloud computing can significantly shorten the handover time of surgical patients, improve the standardized rate of surgical safety verification execution and the qualified rate of nursing documents, while the rate of standardized application of prophylactic antibiotics is also significantly higher than that of the control group. The questionnaire survey of nursing staff in the operating room showed that clinical nursing staff was generally satisfied with the clinical application of MNIS under cloud computing, with an average satisfaction score of 64.5 to 11.3, and an average score of 3.58 to 0.54 for each item. Among them, pre-application training of MNIS, departmental support for MNIS, and its ease of verification in surgical patients were the three main factors favoring the clinical application of MNIS in the operating room with cloud computing, while barriers to wireless network connectivity, inconvenient PDA input, and small screen size were the three main drawbacks affecting its application. The determined clinical evaluation index system of MNIS in the operating room is innovative, which not only includes clinical care indicators but also covers general hardware and software indicators, which can effectively reflect the practical application capability of mobile terminal clinical and user experience feelings, and the evaluation index system is comprehensive.}, } @article {pmid34910757, year = {2021}, author = {Byrne, M and O'Malley, L and Glenny, AM and Pretty, I and Tickle, M}, title = {Assessing the reliability of automatic sentiment analysis tools on rating the sentiment of reviews of NHS dental practices in England.}, journal = {PloS one}, volume = {16}, number = {12}, pages = {e0259797}, pmid = {34910757}, issn = {1932-6203}, mesh = {*Artificial Intelligence ; Automation ; Dental Care/*standards ; Humans ; Internet ; National Health Programs ; United Kingdom ; }, abstract = {BACKGROUND: Online reviews may act as a rich source of data to assess the quality of dental practices. Assessing the content and sentiment of reviews on a large scale is time consuming and expensive. Automation of the process of assigning sentiment to big data samples of reviews may allow for reviews to be used as Patient Reported Experience Measures for primary care dentistry.

AIM: To assess the reliability of three different online sentiment analysis tools (Amazon Comprehend DetectSentiment API (ACDAPI), Google and Monkeylearn) at assessing the sentiment of reviews of dental practices working on National Health Service contracts in the United Kingdom.

METHODS: A Python 3 script was used to mine 15800 reviews from 4803 unique dental practices on the NHS.uk websites between April 2018 -March 2019. A random sample of 270 reviews were rated by the three sentiment analysis tools. These reviews were rated by 3 blinded independent human reviewers and a pooled sentiment score was assigned. Kappa statistics and polychoric evalutaiton were used to assess the level of agreement. Disagreements between the automated and human reviewers were qualitatively assessed.

RESULTS: There was good agreement between the sentiment assigned to reviews by the human reviews and ACDAPI (k = 0.660). The Google (k = 0.706) and Monkeylearn (k = 0.728) showed slightly better agreement at the expense of usability on a massive dataset. There were 33 disagreements in rating between ACDAPI and human reviewers, of which n = 16 were due to syntax errors, n = 10 were due to misappropriation of the strength of conflicting emotions and n = 7 were due to a lack of overtly emotive language in the text.

CONCLUSIONS: There is good agreement between the sentiment of an online review assigned by a group of humans and by cloud-based sentiment analysis. This may allow the use of automated sentiment analysis for quality assessment of dental service provision in the NHS.}, } @article {pmid34906327, year = {2021}, author = {Halder, A and Verma, A and Biswas, D and Srivastava, S}, title = {Recent advances in mass-spectrometry based proteomics software, tools and databases.}, journal = {Drug discovery today. Technologies}, volume = {39}, number = {}, pages = {69-79}, doi = {10.1016/j.ddtec.2021.06.007}, pmid = {34906327}, issn = {1740-6749}, mesh = {Algorithms ; Databases, Factual ; Databases, Protein ; Mass Spectrometry ; *Proteomics ; *Software ; }, abstract = {The field of proteomics immensely depends on data generation and data analysis which are thoroughly supported by software and databases. There has been a massive advancement in mass spectrometry-based proteomics over the last 10 years which has compelled the scientific community to upgrade or develop algorithms, tools, and repository databases in the field of proteomics. Several standalone software, and comprehensive databases have aided the establishment of integrated omics pipeline and meta-analysis workflow which has contributed to understand the disease pathobiology, biomarker discovery and predicting new therapeutic modalities. For shotgun proteomics where Data Dependent Acquisition is performed, several user-friendly software are developed that can analyse the pre-processed data to provide mechanistic insights of the disease. Likewise, in Data Independent Acquisition, pipelines are emerged which can accomplish the task from building the spectral library to identify the therapeutic targets. Furthermore, in the age of big data analysis the implications of machine learning and cloud computing are appending robustness, rapidness and in-depth proteomics data analysis. The current review talks about the recent advancement, and development of software, tools, and database in the field of mass-spectrometry based proteomics.}, } @article {pmid34906321, year = {2021}, author = {Frye, L and Bhat, S and Akinsanya, K and Abel, R}, title = {From computer-aided drug discovery to computer-driven drug discovery.}, journal = {Drug discovery today. Technologies}, volume = {39}, number = {}, pages = {111-117}, doi = {10.1016/j.ddtec.2021.08.001}, pmid = {34906321}, issn = {1740-6749}, mesh = {*Artificial Intelligence ; Computer-Aided Design ; Computers ; Drug Design ; *Drug Discovery ; Machine Learning ; Proteins ; }, abstract = {Computational chemistry and structure-based design have traditionally been viewed as a subset of tools that could aid acceleration of the drug discovery process, but were not commonly regarded as a driving force in small molecule drug discovery. In the last decade however, there have been dramatic advances in the field, including (1) development of physics-based computational approaches to accurately predict a broad variety of endpoints from potency to solubility, (2) improvements in artificial intelligence and deep learning methods and (3) dramatic increases in computational power with the advent of GPUs and cloud computing, resulting in the ability to explore and accurately profile vast amounts of drug-like chemical space in silico. There have also been simultaneous advancements in structural biology such as cryogenic electron microscopy (cryo-EM) and computational protein-structure prediction, allowing for access to many more high-resolution 3D structures of novel drug-receptor complexes. The convergence of these breakthroughs has positioned structurally-enabled computational methods to be a driving force behind the discovery of novel small molecule therapeutics. This review will give a broad overview of the synergies in recent advances in the fields of computational chemistry, machine learning and structural biology, in particular in the areas of hit identification, hit-to-lead, and lead optimization.}, } @article {pmid34902160, year = {2022}, author = {Rowe, SP and Pomper, MG}, title = {Molecular imaging in oncology: Current impact and future directions.}, journal = {CA: a cancer journal for clinicians}, volume = {72}, number = {4}, pages = {333-352}, pmid = {34902160}, issn = {1542-4863}, support = {R01 CA184228/CA/NCI NIH HHS/United States ; R01 CA134675/CA/NCI NIH HHS/United States ; P41 EB024495/EB/NIBIB NIH HHS/United States ; }, mesh = {Animals ; Humans ; Magnetic Resonance Imaging ; *Medical Oncology ; *Molecular Imaging/methods ; Positron-Emission Tomography ; }, abstract = {The authors define molecular imaging, according to the Society of Nuclear Medicine and Molecular Imaging, as the visualization, characterization, and measurement of biological processes at the molecular and cellular levels in humans and other living systems. Although practiced for many years clinically in nuclear medicine, expansion to other imaging modalities began roughly 25 years ago and has accelerated since. That acceleration derives from the continual appearance of new and highly relevant animal models of human disease, increasingly sensitive imaging devices, high-throughput methods to discover and optimize affinity agents to key cellular targets, new ways to manipulate genetic material, and expanded use of cloud computing. Greater interest by scientists in allied fields, such as chemistry, biomedical engineering, and immunology, as well as increased attention by the pharmaceutical industry, have likewise contributed to the boom in activity in recent years. Whereas researchers and clinicians have applied molecular imaging to a variety of physiologic processes and disease states, here, the authors focus on oncology, arguably where it has made its greatest impact. The main purpose of imaging in oncology is early detection to enable interception if not prevention of full-blown disease, such as the appearance of metastases. Because biochemical changes occur before changes in anatomy, molecular imaging-particularly when combined with liquid biopsy for screening purposes-promises especially early localization of disease for optimum management. Here, the authors introduce the ways and indications in which molecular imaging can be undertaken, the tools used and under development, and near-term challenges and opportunities in oncology.}, } @article {pmid34902120, year = {2022}, author = {Calabrese, B}, title = {Web and Cloud Computing to Analyze Microarray Data.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2401}, number = {}, pages = {29-38}, pmid = {34902120}, issn = {1940-6029}, mesh = {*Cloud Computing ; Information Storage and Retrieval ; Internet ; Microarray Analysis ; *Software ; }, abstract = {Microarray technology is a high-throughput technique that can simultaneously measure hundreds of thousands of genes' expression levels. Web and cloud computing tools and databases for storage and analysis of microarray data are necessary for biologists to interpret massive data from experiments. This chapter presents the main databases and web and cloud computing tools for microarray data storage and analysis.}, } @article {pmid34902119, year = {2022}, author = {Marozzo, F and Belcastro, L}, title = {High-Performance Framework to Analyze Microarray Data.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2401}, number = {}, pages = {13-27}, pmid = {34902119}, issn = {1940-6029}, mesh = {Algorithms ; *Computational Biology ; Computing Methodologies ; Genome ; Humans ; *Microarray Analysis ; Software ; }, abstract = {Pharmacogenomics is an important research field that studies the impact of genetic variation of patients on drug responses, looking for correlations between single nucleotide polymorphisms (SNPs) of patient genome and drug toxicity or efficacy. The large number of available samples and the high resolution of the instruments allow microarray platforms to produce huge amounts of SNP data. To analyze such data and find correlations in a reasonable time, high-performance computing solutions must be used. Cloud4SNP is a bioinformatics tool, based on Data Mining Cloud Framework (DMCF), for parallel preprocessing and statistical analysis of SNP pharmacogenomics microarray data.This work describes how Cloud4SNP has been extended to execute applications on Apache Spark, which provides faster execution time for iterative and batch processing. The experimental evaluation shows that Cloud4SNP is able to exploit the high-performance features of Apache Spark, obtaining faster execution times and high level of scalability, with a global speedup that is very close to linear values.}, } @article {pmid34900023, year = {2021}, author = {Tiwari, A and Dhiman, V and Iesa, MAM and Alsarhan, H and Mehbodniya, A and Shabaz, M}, title = {Patient Behavioral Analysis with Smart Healthcare and IoT.}, journal = {Behavioural neurology}, volume = {2021}, number = {}, pages = {4028761}, pmid = {34900023}, issn = {1875-8584}, mesh = {Artificial Intelligence ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {Patient behavioral analysis is the key factor for providing treatment to patients who may suffer from various difficulties including neurological disease, head trauma, and mental disease. Analyzing the patient's behavior helps in determining the root cause of the disease. In traditional healthcare, patient behavioral analysis has lots of challenges that were much more difficult. The patient behavior can be easily analyzed with the development of smart healthcare. Information technology plays a key role in understanding the concept of smart healthcare. A new generation of information technologies including IoT and cloud computing is used for changing the traditional healthcare system in all ways. Using Internet of Things in the healthcare institution enhances the effectiveness as well as makes it more personalized and convenient to the patients. The first thing that will be discussed in the article is the technologies that have been used to support the smart class, and further, there will be a discussion on the existing problems with the smart healthcare system and how these problems can be solved. This study can provide essential information about the role of smart healthcare and IoT in maintaining behavior of patent. Various biomarkers are maintained properly with the help of these technologies. This study can provide effective information about importance of smart health system. This smart healthcare is conducted with the involvement of proper architecture. This is treated as effective energy efficiency architecture. Artificial intelligence is used increasingly in healthcare to maintain diagnosis and other important factors of healthcare. This application is also used to maintain patient engagement, which is also included in this study. Major hardware components are also included in this technology such as CO sensor and CO2 sensor.}, } @article {pmid34899960, year = {2022}, author = {ElAraby, ME and Elzeki, OM and Shams, MY and Mahmoud, A and Salem, H}, title = {A novel Gray-Scale spatial exploitation learning Net for COVID-19 by crawling Internet resources.}, journal = {Biomedical signal processing and control}, volume = {73}, number = {}, pages = {103441}, pmid = {34899960}, issn = {1746-8094}, abstract = {Today, the earth planet suffers from the decay of active pandemic COVID-19 which motivates scientists and researchers to detect and diagnose the infected people. Chest X-ray (CXR) image is a common utility tool for detection. Even the CXR suffers from low informative details about COVID-19 patches; the computer vision helps to overcome it through grayscale spatial exploitation analysis. In turn, it is highly recommended to acquire more CXR images to increase the capacity and ability to learn for mining the grayscale spatial exploitation. In this paper, an efficient Gray-scale Spatial Exploitation Net (GSEN) is designed by employing web pages crawling across cloud computing environments. The motivation of this work are i) utilizing a framework methodology for constructing consistent dataset by web crawling to update the dataset continuously per crawling iteration; ii) designing lightweight, fast learning, comparable accuracy, and fine-tuned parameters gray-scale spatial exploitation deep neural net; iii) comprehensive evaluation of the designed gray-scale spatial exploitation net for different collected dataset(s) based on web COVID-19 crawling verse the transfer learning of the pre-trained nets. Different experiments have been performed for benchmarking both the proposed web crawling framework methodology and the designed gray-scale spatial exploitation net. Due to the accuracy metric, the proposed net achieves 95.60% for two-class labels, and 92.67% for three-class labels, respectively compared with the most recent transfer learning Google-Net, VGG-19, Res-Net 50, and Alex-Net approaches. Furthermore, web crawling utilizes the accuracy rates improvement in a positive relationship to the cardinality of crawled CXR dataset.}, } @article {pmid34898797, year = {2022}, author = {Subramanian, M and Shanmuga Vadivel, K and Hatamleh, WA and Alnuaim, AA and Abdelhady, M and V E, S}, title = {The role of contemporary digital tools and technologies in COVID-19 crisis: An exploratory analysis.}, journal = {Expert systems}, volume = {39}, number = {6}, pages = {e12834}, pmid = {34898797}, issn = {1468-0394}, abstract = {Following the COVID-19 pandemic, there has been an increase in interest in using digital resources to contain pandemics. To avoid, detect, monitor, regulate, track, and manage diseases, predict outbreaks and conduct data analysis and decision-making processes, a variety of digital technologies are used, ranging from artificial intelligence (AI)-powered machine learning (ML) or deep learning (DL) focused applications to blockchain technology and big data analytics enabled by cloud computing and the internet of things (IoT). In this paper, we look at how emerging technologies such as the IoT and sensors, AI, ML, DL, blockchain, augmented reality, virtual reality, cloud computing, big data, robots and drones, intelligent mobile apps, and 5G are advancing health care and paving the way to combat the COVID-19 pandemic. The aim of this research is to look at possible technologies, processes, and tools for addressing COVID-19 issues such as pre-screening, early detection, monitoring infected/quarantined individuals, forecasting future infection rates, and more. We also look at the research possibilities that have arisen as a result of the use of emerging technology to handle the COVID-19 crisis.}, } @article {pmid34898269, year = {2021}, author = {Verdu, E and Nieto, YV and Saleem, N}, title = {Call for Special Issue Papers: Cloud Computing and Big Data for Cognitive IoT.}, journal = {Big data}, volume = {9}, number = {6}, pages = {413-414}, doi = {10.1089/big.2021.29048.cfp}, pmid = {34898269}, issn = {2167-647X}, } @article {pmid34897506, year = {2022}, author = {Waitman, LR and Song, X and Walpitage, DL and Connolly, DC and Patel, LP and Liu, M and Schroeder, MC and VanWormer, JJ and Mosa, AS and Anye, ET and Davis, AM}, title = {Enhancing PCORnet Clinical Research Network data completeness by integrating multistate insurance claims with electronic health records in a cloud environment aligned with CMS security and privacy requirements.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {29}, number = {4}, pages = {660-670}, pmid = {34897506}, issn = {1527-974X}, support = {UL1TR002366/NH/NIH HHS/United States ; UL1 TR002366/TR/NCATS NIH HHS/United States ; RI-CRN-2020-003-IC/PCORI/Patient-Centered Outcomes Research Institute/United States ; }, mesh = {Aged ; Centers for Medicare and Medicaid Services, U.S. ; *Electronic Health Records ; Humans ; Medicare ; Obesity ; *Privacy ; United States ; }, abstract = {OBJECTIVE: The Greater Plains Collaborative (GPC) and other PCORnet Clinical Data Research Networks capture healthcare utilization within their health systems. Here, we describe a reusable environment (GPC Reusable Observable Unified Study Environment [GROUSE]) that integrates hospital and electronic health records (EHRs) data with state-wide Medicare and Medicaid claims and assess how claims and clinical data complement each other to identify obesity and related comorbidities in a patient sample.

MATERIALS AND METHODS: EHR, billing, and tumor registry data from 7 healthcare systems were integrated with Center for Medicare (2011-2016) and Medicaid (2011-2012) services insurance claims to create deidentified databases in Informatics for Integrating Biology & the Bedside and PCORnet Common Data Model formats. We describe technical details of how this federally compliant, cloud-based data environment was built. As a use case, trends in obesity rates for different age groups are reported, along with the relative contribution of claims and EHR data-to-data completeness and detecting common comorbidities.

RESULTS: GROUSE contained 73 billion observations from 24 million unique patients (12.9 million Medicare; 13.9 million Medicaid; 6.6 million GPC patients) with 1 674 134 patients crosswalked and 983 450 patients with body mass index (BMI) linked to claims. Diagnosis codes from EHR and claims sources underreport obesity by 2.56 times compared with body mass index measures. However, common comorbidities such as diabetes and sleep apnea diagnoses were more often available from claims diagnoses codes (1.6 and 1.4 times, respectively).

CONCLUSION: GROUSE provides a unified EHR-claims environment to address health system and federal privacy concerns, which enables investigators to generalize analyses across health systems integrated with multistate insurance claims.}, } @article {pmid34895958, year = {2022}, author = {Li, Y and Cianfrocco, MA}, title = {Cloud computing platforms to support cryo-EM structure determination.}, journal = {Trends in biochemical sciences}, volume = {47}, number = {2}, pages = {103-105}, doi = {10.1016/j.tibs.2021.11.005}, pmid = {34895958}, issn = {0968-0004}, mesh = {*Cloud Computing ; Cryoelectron Microscopy ; }, abstract = {Leveraging the power of single-particle cryo-electron microscopy (cryo-EM) requires robust and accessible computational infrastructure. Here, we summarize the cloud computing landscape and picture the outlook of a hybrid cryo-EM computing workflow, and make suggestions to the community to facilitate a future for cryo-EM that integrates into cloud computing infrastructure.}, } @article {pmid34891943, year = {2021}, author = {Zhou, Y and Qian, C and Guo, Y and Wang, Z and Wang, J and Qu, B and Guo, D and You, Y and Qu, X}, title = {XCloud-pFISTA: A Medical Intelligence Cloud for Accelerated MRI.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2021}, number = {}, pages = {3289-3292}, doi = {10.1109/EMBC46164.2021.9630813}, pmid = {34891943}, issn = {2694-0604}, mesh = {Algorithms ; *Artificial Intelligence ; *Image Processing, Computer-Assisted ; Intelligence ; Magnetic Resonance Imaging ; }, abstract = {Machine learning and artificial intelligence have shown remarkable performance in accelerated magnetic resonance imaging (MRI). Cloud computing technologies have great advantages in building an easily accessible platform to deploy advanced algorithms. In this work, we develop an open-access, easy-to-use and high-performance medical intelligence cloud computing platform (XCloud-pFISTA) to reconstruct MRI images from undersampled k-space data. Two state-of-the-art approaches of the Projected Fast Iterative Soft-Thresholding Algorithm (pFISTA) family have been successfully implemented on the cloud. This work can be considered as a good example of cloud-based medical image reconstruction and may benefit the future development of integrated reconstruction and online diagnosis system.}, } @article {pmid34884122, year = {2021}, author = {Kua, J and Loke, SW and Arora, C and Fernando, N and Ranaweera, C}, title = {Internet of Things in Space: A Review of Opportunities and Challenges from Satellite-Aided Computing to Digitally-Enhanced Space Living.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34884122}, issn = {1424-8220}, mesh = {Artificial Intelligence ; Cloud Computing ; *Internet of Things ; Machine Learning ; Technology ; }, abstract = {Recent scientific and technological advancements driven by the Internet of Things (IoT), Machine Learning (ML) and Artificial Intelligence (AI), distributed computing and data communication technologies have opened up a vast range of opportunities in many scientific fields-spanning from fast, reliable and efficient data communication to large-scale cloud/edge computing and intelligent big data analytics. Technological innovations and developments in these areas have also enabled many opportunities in the space industry. The successful Mars landing of NASA's Perseverance rover on 18 February 2021 represents another giant leap for humankind in space exploration. Emerging research and developments of connectivity and computing technologies in IoT for space/non-terrestrial environments is expected to yield significant benefits in the near future. This survey paper presents a broad overview of the area and provides a look-ahead of the opportunities made possible by IoT and space-based technologies. We first survey the current developments of IoT and space industry, and identify key challenges and opportunities in these areas. We then review the state-of-the-art and discuss future opportunities for IoT developments, deployment and integration to support future endeavors in space exploration.}, } @article {pmid34884048, year = {2021}, author = {Sodhro, AH and Zahid, N}, title = {AI-Enabled Framework for Fog Computing Driven E-Healthcare Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34884048}, issn = {1424-8220}, mesh = {Aged ; Artificial Intelligence ; Delivery of Health Care ; Humans ; *Internet of Things ; Reproducibility of Results ; *Telemedicine ; }, abstract = {Artificial Intelligence (AI) is the revolutionary paradigm to empower sixth generation (6G) edge computing based e-healthcare for everyone. Thus, this research aims to promote an AI-based cost-effective and efficient healthcare application. The cyber physical system (CPS) is a key player in the internet world where humans and their personal devices such as cell phones, laptops, wearables, etc., facilitate the healthcare environment. The data extracting, examining and monitoring strategies from sensors and actuators in the entire medical landscape are facilitated by cloud-enabled technologies for absorbing and accepting the entire emerging wave of revolution. The efficient and accurate examination of voluminous data from the sensor devices poses restrictions in terms of bandwidth, delay and energy. Due to the heterogeneous nature of the Internet of Medical Things (IoMT), the driven healthcare system must be smart, interoperable, convergent, and reliable to provide pervasive and cost-effective healthcare platforms. Unfortunately, because of higher power consumption and lesser packet delivery rate, achieving interoperable, convergent, and reliable transmission is challenging in connected healthcare. In such a scenario, this paper has fourfold major contributions. The first contribution is the development of a single chip wearable electrocardiogram (ECG) with the support of an analog front end (AFE) chip model (i.e., ADS1292R) for gathering the ECG data to examine the health status of elderly or chronic patients with the IoT-based cyber physical system (CPS). The second proposes a fuzzy-based sustainable, interoperable, and reliable algorithm (FSIRA), which is an intelligent and self-adaptive decision-making approach to prioritize emergency and critical patients in association with the selected parameters for improving healthcare quality at reasonable costs. The third is the proposal of a specific cloud-based architecture for mobile and connected healthcare. The fourth is the identification of the right balance between reliability, packet loss ratio, convergence, latency, interoperability, and throughput to support an adaptive IoMT driven connected healthcare. It is examined and observed that our proposed approaches outperform the conventional techniques by providing high reliability, high convergence, interoperability, and a better foundation to analyze and interpret the accuracy in systems from a medical health aspect. As for the IoMT, an enabled healthcare cloud is the key ingredient on which to focus, as it also faces the big hurdle of less bandwidth, more delay and energy drain. Thus, we propose the mathematical trade-offs between bandwidth, interoperability, reliability, delay, and energy dissipation for IoMT-oriented smart healthcare over a 6G platform.}, } @article {pmid34883979, year = {2021}, author = {Lazazzera, R and Laguna, P and Gil, E and Carrault, G}, title = {Proposal for a Home Sleep Monitoring Platform Employing a Smart Glove.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883979}, issn = {1424-8220}, mesh = {Humans ; *Oxygen Saturation ; Photoplethysmography ; Polysomnography ; Sleep ; *Sleep Apnea Syndromes/diagnosis ; }, abstract = {The present paper proposes the design of a sleep monitoring platform. It consists of an entire sleep monitoring system based on a smart glove sensor called UpNEA worn during the night for signals acquisition, a mobile application, and a remote server called AeneA for cloud computing. UpNEA acquires a 3-axis accelerometer signal, a photoplethysmography (PPG), and a peripheral oxygen saturation (SpO2) signal from the index finger. Overnight recordings are sent from the hardware to a mobile application and then transferred to AeneA. After cloud computing, the results are shown in a web application, accessible for the user and the clinician. The AeneA sleep monitoring activity performs different tasks: sleep stages classification and oxygen desaturation assessment; heart rate and respiration rate estimation; tachycardia, bradycardia, atrial fibrillation, and premature ventricular contraction detection; and apnea and hypopnea identification and classification. The PPG breathing rate estimation algorithm showed an absolute median error of 0.5 breaths per minute for the 32 s window and 0.2 for the 64 s window. The apnea and hypopnea detection algorithm showed an accuracy (Acc) of 75.1%, by windowing the PPG in one-minute segments. The classification task revealed 92.6% Acc in separating central from obstructive apnea, 83.7% in separating central apnea from central hypopnea and 82.7% in separating obstructive apnea from obstructive hypopnea. The novelty of the integrated algorithms and the top-notch cloud computing products deployed, encourage the production of the proposed solution for home sleep monitoring.}, } @article {pmid34883895, year = {2021}, author = {Guo, K and Liu, C and Zhao, S and Lu, J and Zhang, S and Yang, H}, title = {Design of a Millimeter-Wave Radar Remote Monitoring System for the Elderly Living Alone Using WIFI Communication.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883895}, issn = {1424-8220}, support = {2017YFB1304103//national key R&D program of china/ ; }, mesh = {Aged ; Algorithms ; Communication ; Heart Rate ; Home Environment ; Humans ; *Radar ; *Signal Processing, Computer-Assisted ; }, abstract = {In response to the current demand for the remote monitoring of older people living alone, a non-contact human vital signs monitoring system based on millimeter wave radar has gradually become the object of research. This paper mainly carried out research regarding the detection method to obtain human breathing and heartbeat signals using a frequency modulated continuous wave system. We completed a portable millimeter-wave radar module for wireless communication. The radar module was a small size and had a WIFI communication interface, so we only needed to provide a power cord for the radar module. The breathing and heartbeat signals were detected and separated by FIR digital filter and the wavelet transform method. By building a cloud computing framework, we realized remote and senseless monitoring of the vital signs for older people living alone. Experiments were also carried out to compare the performance difference between the system and the common contact detection system. The experimental results showed that the life parameter detection system based on the millimeter wave sensor has strong real-time performance and accuracy.}, } @article {pmid34883857, year = {2021}, author = {Akram, J and Tahir, A and Munawar, HS and Akram, A and Kouzani, AZ and Mahmud, MAP}, title = {Cloud- and Fog-Integrated Smart Grid Model for Efficient Resource Utilisation.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883857}, issn = {1424-8220}, mesh = {Algorithms ; *Cloud Computing ; *Computer Systems ; Reproducibility of Results ; }, abstract = {The smart grid (SG) is a contemporary electrical network that enhances the network's performance, reliability, stability, and energy efficiency. The integration of cloud and fog computing with SG can increase its efficiency. The combination of SG with cloud computing enhances resource allocation. To minimise the burden on the Cloud and optimise resource allocation, the concept of fog computing integration with cloud computing is presented. Fog has three essential functionalities: location awareness, low latency, and mobility. We offer a cloud and fog-based architecture for information management in this study. By allocating virtual machines using a load-balancing mechanism, fog computing makes the system more efficient (VMs). We proposed a novel approach based on binary particle swarm optimisation with inertia weight adjusted using simulated annealing. The technique is named BPSOSA. Inertia weight is an important factor in BPSOSA which adjusts the size of the search space for finding the optimal solution. The BPSOSA technique is compared against the round robin, odds algorithm, and ant colony optimisation. In terms of response time, BPSOSA outperforms round robin, odds algorithm, and ant colony optimisation by 53.99 ms, 82.08 ms, and 81.58 ms, respectively. In terms of processing time, BPSOSA outperforms round robin, odds algorithm, and ant colony optimisation by 52.94 ms, 81.20 ms, and 80.56 ms, respectively. Compared to BPSOSA, ant colony optimisation has slightly better cost efficiency, however, the difference is insignificant.}, } @article {pmid34883848, year = {2021}, author = {Bravo-Arrabal, J and Toscano-Moreno, M and Fernandez-Lozano, JJ and Mandow, A and Gomez-Ruiz, JA and García-Cerezo, A}, title = {The Internet of Cooperative Agents Architecture (X-IoCA) for Robots, Hybrid Sensor Networks, and MEC Centers in Complex Environments: A Search and Rescue Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883848}, issn = {1424-8220}, support = {RTI2018-093421-B-I00//Spanish Ministerio de Ciencia, Innovación y Universidades, Gobierno de España/ ; BES-2016-077022//"Spanish Predoctoral Grant from the Subprograma Estatal de Formaci\'on del MICINN" co-financed by the European Social Fund./ ; C007/18-SP//"Piloto 5G Andalucía" initiative, promoted by the Ministerio de Asuntos Económicos y Transformación Digital, through Red.es, being developed by Vodafone and Huawei/ ; }, mesh = {*Disasters ; Feedback ; Humans ; *Internet of Things ; Rescue Work ; *Robotics ; }, abstract = {Cloud robotics and advanced communications can foster a step-change in cooperative robots and hybrid wireless sensor networks (H-WSN) for demanding environments (e.g., disaster response, mining, demolition, and nuclear sites) by enabling the timely sharing of data and computational resources between robot and human teams. However, the operational complexity of such multi-agent systems requires defining effective architectures, coping with implementation details, and testing in realistic deployments. This article proposes X-IoCA, an Internet of robotic things (IoRT) and communication architecture consisting of a hybrid and heterogeneous network of wireless transceivers (H2WTN), based on LoRa and BLE technologies, and a robot operating system (ROS) network. The IoRT is connected to a feedback information system (FIS) distributed among multi-access edge computing (MEC) centers. Furthermore, we present SAR-IoCA, an implementation of the architecture for search and rescue (SAR) integrated into a 5G network. The FIS for this application consists of an SAR-FIS (including a path planner for UGVs considering risks detected by a LoRa H-WSN) and an ROS-FIS (for real-time monitoring and processing of information published throughout the ROS network). Moreover, we discuss lessons learned from using SAR-IoCA in a realistic exercise where three UGVs, a UAV, and responders collaborated to rescue victims from a tunnel accessible through rough terrain.}, } @article {pmid34883819, year = {2021}, author = {Huang, CE and Li, YH and Aslam, MS and Chang, CC}, title = {Super-Resolution Generative Adversarial Network Based on the Dual Dimension Attention Mechanism for Biometric Image Super-Resolution.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883819}, issn = {1424-8220}, mesh = {*Biometry ; Humans ; *Image Processing, Computer-Assisted ; Research Design ; }, abstract = {There exist many types of intelligent security sensors in the environment of the Internet of Things (IoT) and cloud computing. Among them, the sensor for biometrics is one of the most important types. Biometric sensors capture the physiological or behavioral features of a person, which can be further processed with cloud computing to verify or identify the user. However, a low-resolution (LR) biometrics image causes the loss of feature details and reduces the recognition rate hugely. Moreover, the lack of resolution negatively affects the performance of image-based biometric technology. From a practical perspective, most of the IoT devices suffer from hardware constraints and the low-cost equipment may not be able to meet various requirements, particularly for image resolution, because it asks for additional storage to store high-resolution (HR) images, and a high bandwidth to transmit the HR image. Therefore, how to achieve high accuracy for the biometric system without using expensive and high-cost image sensors is an interesting and valuable issue in the field of intelligent security sensors. In this paper, we proposed DDA-SRGAN, which is a generative adversarial network (GAN)-based super-resolution (SR) framework using the dual-dimension attention mechanism. The proposed model can be trained to discover the regions of interest (ROI) automatically in the LR images without any given prior knowledge. The experiments were performed on the CASIA-Thousand-v4 and the CelebA datasets. The experimental results show that the proposed method is able to learn the details of features in crucial regions and achieve better performance in most cases.}, } @article {pmid34883778, year = {2021}, author = {Erhan, L and Di Mauro, M and Anjum, A and Bagdasar, O and Song, W and Liotta, A}, title = {Embedded Data Imputation for Environmental Intelligent Sensing: A Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {23}, pages = {}, pmid = {34883778}, issn = {1424-8220}, mesh = {Benchmarking ; *Cloud Computing ; *Machine Learning ; }, abstract = {Recent developments in cloud computing and the Internet of Things have enabled smart environments, in terms of both monitoring and actuation. Unfortunately, this often results in unsustainable cloud-based solutions, whereby, in the interest of simplicity, a wealth of raw (unprocessed) data are pushed from sensor nodes to the cloud. Herein, we advocate the use of machine learning at sensor nodes to perform essential data-cleaning operations, to avoid the transmission of corrupted (often unusable) data to the cloud. Starting from a public pollution dataset, we investigate how two machine learning techniques (kNN and missForest) may be embedded on Raspberry Pi to perform data imputation, without impacting the data collection process. Our experimental results demonstrate the accuracy and computational efficiency of edge-learning methods for filling in missing data values in corrupted data series. We find that kNN and missForest correctly impute up to 40% of randomly distributed missing values, with a density distribution of values that is indistinguishable from the benchmark. We also show a trade-off analysis for the case of bursty missing values, with recoverable blocks of up to 100 samples. Computation times are shorter than sampling periods, allowing for data imputation at the edge in a timely manner.}, } @article {pmid34876968, year = {2021}, author = {Liu, S and Jiang, L and Wang, X}, title = {Intelligent Internet of Things Medical Technology in Implantable Intravenous Infusion Port in Children with Malignant Tumors.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {8936820}, pmid = {34876968}, issn = {2040-2309}, mesh = {Child ; Cloud Computing ; Humans ; Infusions, Intravenous ; Internet ; *Internet of Things ; *Neoplasms/drug therapy ; Technology ; }, abstract = {Due to the recent technological revolution that is centered around information technology, the Internet of Medical Things (IoMT) has become an important research domain. IoMT is a combination of Internet of Things (IoT), big data, cloud computing, ubiquitous network, and three-dimensional holographic technology, which is used to build a smart medical diagnosis and treatment system. Additionally, this system should automate various activities, such as the patient's health record and health monitoring, which is an important issue in the development of modern and smart healthcare system. In this paper, we have thoroughly examined the role of a smart healthcare system architecture and other key supporting technologies in improving the health status of both indoor and outdoor patients. The proposed system has the capacity to investigate and predict (if feasible) the clinical application and nursing effects of totally implantable intravenous port (TIVAP) in pediatric hematological tumors. For this purpose, seventy children with hematologic tumors were treated with TIVAP, and IoMT-enabled care was provided to them, where the occurrence of adverse events, specifically after the treatment, was observed. The experimental results collected after the 70 children were treated and cared for by TIVAP show that there were five cases of adverse events, whereas the incidence rate of the adverse events was 7.14%. Moreover, TIVAP has significant efficacy in the treatment of hematologic tumors in children, and it equally reduces the vascular injury caused by chemotherapy in younger patients. Likewise, targeted care reduces the incidence of adverse events in children with expected ratio.}, } @article {pmid34876786, year = {2021}, author = {Hardy, NP and Cahill, RA}, title = {Digital surgery for gastroenterological diseases.}, journal = {World journal of gastroenterology}, volume = {27}, number = {42}, pages = {7240-7246}, pmid = {34876786}, issn = {2219-2840}, mesh = {Algorithms ; Artificial Intelligence ; *Gastroenterology ; Humans ; Machine Learning ; *Robotics ; }, abstract = {Advances in machine learning, computer vision and artificial intelligence methods, in combination with those in processing and cloud computing capability, portend the advent of true decision support during interventions in real-time and soon perhaps in automated surgical steps. Such capability, deployed alongside technology intraoperatively, is termed digital surgery and can be delivered without the need for high-end capital robotic investment. An area close to clinical usefulness right now harnesses advances in near infrared endolaparoscopy and fluorescence guidance for tissue characterisation through the use of biophysics-inspired algorithms. This represents a potential synergistic methodology for the deep learning methods currently advancing in ophthalmology, radiology, and recently gastroenterology via colonoscopy. As databanks of more general surgical videos are created, greater analytic insights can be derived across the operative spectrum of gastroenterological disease and operations (including instrumentation and operative step sequencing and recognition, followed over time by surgeon and instrument performance assessment) and linked to value-based outcomes. However, issues of legality, ethics and even morality need consideration, as do the limiting effects of monopolies, cartels and isolated data silos. Furthermore, the role of the surgeon, surgical societies and healthcare institutions in this evolving field needs active deliberation, as the default risks relegation to bystander or passive recipient. This editorial provides insight into this accelerating field by illuminating the near-future and next decade evolutionary steps towards widespread clinical integration for patient and societal benefit.}, } @article {pmid34849397, year = {2021}, author = {Karim, HMR and Singha, SK and Neema, PK and Baruah, TD and Ray, R and Mohanty, D and Siddiqui, MS and Nanda, R and Bodhey, NK}, title = {Information technology-based joint preoperative assessment, risk stratification and its impact on patient management, perioperative outcome, and cost.}, journal = {Discoveries (Craiova, Romania)}, volume = {9}, number = {2}, pages = {e130}, pmid = {34849397}, issn = {2359-7232}, abstract = {BACKGROUND: Despite negative recommendations, routine preoperative testing practice is nearly universal. Our aim is to bring the healthcare providers on one platform by using information-technology based preanaesthetic assessment and evaluate the routine preoperative testing's impact on patient outcome and cost.

METHODS: A prospective, non-randomised study was conducted in a teaching hospital during January 2019-August 2020. A locally developed software and cloud-computing were used as a tool to modify preanaesthesia evaluation. The number of investigations ordered, time taken, cost incurred, were compared with the routine practice. Further data were matched as per surgical invasiveness and the patient's physical status. Appropriate tests compared intergroup differences and p-value <0.05 was considered significant.  Results: Data from 114 patients (58 in routine and 56 in patient and surgery specific) were analysed. Patient and surgery specific investigation led to a reduction in the investigations by 80-90%, hospital visit by 50%, and the total cost by 80%, without increasing the day of surgery cancellation or complications.

CONCLUSION: Information technology-based joint preoperative assessment and risk stratification are feasible through locally developed software with minimal cost. It helps in applying patient and surgery specific investigation, reducing the number of tests, hospital visit, and cost, without adversely affecting the perioperative outcome. The application of the modified method will help in cost-effective, yet quality and safe perioperative healthcare delivery. It will also benefit the public from both service and economic perspective.}, } @article {pmid34848776, year = {2021}, author = {Yan, X and Wang, J}, title = {Dynamic monitoring of urban built-up object expansion trajectories in Karachi, Pakistan with time series images and the LandTrendr algorithm.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {23118}, pmid = {34848776}, issn = {2045-2322}, abstract = {In the complex process of urbanization, retrieving its dynamic expansion trajectories with an efficient method is challenging, especially for urban regions that are not clearly distinguished from the surroundings in arid regions. In this study, we propose a framework for extracting spatiotemporal change information on urban disturbances. First, the urban built-up object areas in 2000 and 2020 were obtained using object-oriented segmentation method. Second, we applied LandTrendr (LT) algorithm and multiple bands/indices to extract annual spatiotemporal information. This process was implemented effectively with the support of the cloud computing platform of Earth Observation big data. The overall accuracy of time information extraction, the kappa coefficient, and average detection error were 83.76%, 0.79, and 0.57 a, respectively. These results show that Karachi expanded continuously during 2000-2020, with an average annual growth rate of 4.7%. However, this expansion was not spatiotemporally balanced. The coastal area developed quickly within a shorter duration, whereas the main newly added urban regions locate in the northern and eastern inland areas. This study demonstrated an effective framework for extract the dynamic spatiotemporal change information of urban built-up objects and substantially eliminate the salt-and-pepper effect based on pixel detection. Methods used in our study are of general promotion significance in the monitoring of other disturbances caused by natural or human activities.}, } @article {pmid34847040, year = {2023}, author = {Ni, Z and Chen, H and Li, Z and Wang, X and Yan, N and Liu, W and Xia, F}, title = {MSCET: A Multi-Scenario Offloading Schedule for Biomedical Data Processing and Analysis in Cloud-Edge-Terminal Collaborative Vehicular Networks.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {20}, number = {4}, pages = {2376-2386}, doi = {10.1109/TCBB.2021.3131177}, pmid = {34847040}, issn = {1557-9964}, abstract = {With the rapid development of Artificial Intelligence (AI) and Internet of Things (IoTs), an increasing number of computation intensive or delay sensitive biomedical data processing and analysis tasks are produced in vehicles, bringing more and more challenges to the biometric monitoring of drivers. Edge computing is a new paradigm to solve these challenges by offloading tasks from the resource-limited vehicles to Edge Servers (ESs) in Road Side Units (RSUs). However, most of the traditional offloading schedules for vehicular networks concentrate on the edge, while some tasks may be too complex for ESs to process. To this end, we consider a collaborative vehicular network in which the cloud, edge and terminal can cooperate with each other to accomplish the tasks. The vehicles can offload the computation intensive tasks to the cloud to save the resource of edge. We further construct the virtual resource pool which can integrate the resource of multiple ESs since some regions may be covered by multiple RSUs. In this paper, we propose a Multi-Scenario offloading schedule for biomedical data processing and analysis in Cloud-Edge-Terminal collaborative vehicular networks called MSCET. The parameters of the proposed MSCET are optimized to maximize the system utility. We also conduct extensive simulations to evaluate the proposed MSCET and the results illustrate that MSCET outperforms other existing schedules.}, } @article {pmid34841104, year = {2021}, author = {Samudra, Y and Ahmad, T}, title = {Improved prediction error expansion and mirroring embedded samples for enhancing reversible audio data hiding.}, journal = {Heliyon}, volume = {7}, number = {11}, pages = {e08381}, pmid = {34841104}, issn = {2405-8440}, abstract = {Many applications work by processing either small or big data, including sensitive and confidential ones, through computer networks like cloud computing. However, many systems are public and may not provide enough security mechanisms. Meanwhile, once the data are compromised, the security and privacy of the users will suffer from serious problems. Therefore, security protection is much required in various aspects, and one of how it is done is by embedding the data (payload) in another form of data (cover) such as audio. However, the existing methods do not provide enough space to accommodate the payload, so bigger data can not be taken; the quality of the respective generated data is relatively low, making it much different from its corresponding cover. This research works on these problems by improving a prediction error expansion-based algorithm and designing a mirroring embedded sample scheme. Here, a processed audio sample is forced to be as close as possible to the original one. The experimental results show that this proposed method produces a higher quality of stego data considering the size of the payloads. It achieves more than 100 dB, which is higher than that of the compared algorithms. Additionally, this proposed method is reversible, which means that both the original payload and the audio cover can be fully reconstructed.}, } @article {pmid34833792, year = {2021}, author = {Shah, SC}, title = {Design of a Machine Learning-Based Intelligent Middleware Platform for a Heterogeneous Private Edge Cloud System.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {22}, pages = {}, pmid = {34833792}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Machine Learning ; Privacy ; }, abstract = {Recent advances in mobile technologies have facilitated the development of a new class of smart city and fifth-generation (5G) network applications. These applications have diverse requirements, such as low latencies, high data rates, significant amounts of computing and storage resources, and access to sensors and actuators. A heterogeneous private edge cloud system was proposed to address the requirements of these applications. The proposed heterogeneous private edge cloud system is characterized by a complex and dynamic multilayer network and computing infrastructure. Efficient management and utilization of this infrastructure may increase data rates and reduce data latency, data privacy risks, and traffic to the core Internet network. A novel intelligent middleware platform is proposed in the current study to manage and utilize heterogeneous private edge cloud infrastructure efficiently. The proposed platform aims to provide computing, data collection, and data storage services to support emerging resource-intensive and non-resource-intensive smart city and 5G network applications. It aims to leverage regression analysis and reinforcement learning methods to solve the problem of efficiently allocating heterogeneous resources to application tasks. This platform adopts parallel transmission techniques, dynamic interface allocation techniques, and machine learning-based algorithms in a dynamic multilayer network infrastructure to improve network and application performance. Moreover, it uses container and device virtualization technologies to address problems related to heterogeneous hardware and execution environments.}, } @article {pmid34833723, year = {2021}, author = {Fatima, M and Nisar, MW and Rashid, J and Kim, J and Kamran, M and Hussain, A}, title = {A Novel Fingerprinting Technique for Data Storing and Sharing through Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {22}, pages = {}, pmid = {34833723}, issn = {1424-8220}, support = {S3033853 and No.2020R1I1A3069700//National Research Foundation of Korea/ ; }, mesh = {Cloud Computing ; *Computer Security ; Confidentiality ; *Electronic Health Records ; Privacy ; Technology ; }, abstract = {With the emerging growth of digital data in information systems, technology faces the challenge of knowledge prevention, ownership rights protection, security, and privacy measurement of valuable and sensitive data. On-demand availability of various data as services in a shared and automated environment has become a reality with the advent of cloud computing. The digital fingerprinting technique has been adopted as an effective solution to protect the copyright and privacy of digital properties from illegal distribution and identification of malicious traitors over the cloud. Furthermore, it is used to trace the unauthorized distribution and the user of multimedia content distributed through the cloud. In this paper, we propose a novel fingerprinting technique for the cloud environment to protect numeric attributes in relational databases for digital privacy management. The proposed solution with the novel fingerprinting scheme is robust and efficient. It can address challenges such as embedding secure data over the cloud, essential to secure relational databases. The proposed technique provides a decoding accuracy of 100%, 90%, and 40% for 10% to 30%, 40%, and 50% of deleted records.}, } @article {pmid34831620, year = {2021}, author = {Huang, C and Yang, Q and Huang, W}, title = {Analysis of the Spatial and Temporal Changes of NDVI and Its Driving Factors in the Wei and Jing River Basins.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {22}, pages = {}, pmid = {34831620}, issn = {1660-4601}, mesh = {China ; *Climate Change ; Human Activities ; Humans ; *Rivers ; Seasons ; Temperature ; }, abstract = {This study aimed to explore the long-term vegetation cover change and its driving factors in the typical watershed of the Yellow River Basin. This research was based on the Google Earth Engine (GEE), a remote sensing cloud platform, and used the Landsat surface reflectance datasets and the Pearson correlation method to analyze the vegetation conditions in the areas above Xianyang on the Wei River and above Zhangjiashan on the Jing River. Random forest and decision tree models were used to analyze the effects of various climatic factors (precipitation, temperature, soil moisture, evapotranspiration, and drought index) on NDVI (normalized difference vegetation index). Then, based on the residual analysis method, the effects of human activities on NDVI were explored. The results showed that: (1) From 1987 to 2018, the NDVI of the two watersheds showed an increasing trend; in particular, after 2008, the average increase rate of NDVI in the growing season (April to September) increased from 0.0032/a and 0.003/a in the base period (1987-2008) to 0.0172/a and 0.01/a in the measurement period (2008-2018), for the Wei and Jing basins, respectively. In addition, the NDVI significantly increased from 21.78% and 31.32% in the baseline period (1987-2008) to 83.76% and 92.40% in the measurement period (2008-2018), respectively. (2) The random forest and classification and regression tree model (CART) can assess the contribution and sensitivity of various climate factors to NDVI. Precipitation, soil moisture, and temperature were found to be the three main factors that affect the NDVI of the study area, and their contributions were 37.05%, 26.42%, and 15.72%, respectively. The changes in precipitation and soil moisture in the entire Jing River Basin and the upper and middle reaches of the Wei River above Xianyang caused significant changes in NDVI. Furthermore, changes in precipitation and temperature led to significant changes in NDVI in the lower reaches of the Wei River. (3) The impact of human activities in the Wei and Jing basins on NDVI has gradually changed from negative to positive, which is mainly due to the implementation of soil and water conservation measures. The proportions of areas with positive effects of human activities were 80.88% and 81.95%, of which the proportions of areas with significant positive effects were 11.63% and 7.76%, respectively. These are mainly distributed in the upper reaches of the Wei River and the western and eastern regions of the Jing River. These areas are the key areas where soil and water conservation measures have been implemented in recent years, and the corresponding land use has transformed from cultivated land to forest and grassland. The negative effects accounted for 1.66% and 0.10% of the area, respectively, and were mainly caused by urban expansion and coal mining.}, } @article {pmid34828597, year = {2021}, author = {Bhatia, S and Malhotra, J}, title = {Morton Filter-Based Security Mechanism for Healthcare System in Cloud Computing.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {11}, pages = {}, pmid = {34828597}, issn = {2227-9032}, abstract = {Electronic health records contain the patient's sensitive information. If these data are acquired by a malicious user, it will not only cause the pilferage of the patient's personal data but also affect the diagnosis and treatment. One of the most challenging tasks in cloud-based healthcare systems is to provide security and privacy to electronic health records. Various probabilistic data structures and watermarking techniques were used in the cloud-based healthcare systems to secure patient's data. Most of the existing studies focus on cuckoo and bloom filters, without considering their throughputs. In this research, a novel cloud security mechanism is introduced, which supersedes the shortcomings of existing approaches. The proposed solution enhances security with methods such as fragile watermark, least significant bit replacement watermarking, class reliability factor, and Morton filters included in the formation of the security mechanism. A Morton filter is an approximate set membership data structure (ASMDS) that proves many improvements to other data structures, such as cuckoo, bloom, semi-sorting cuckoo, and rank and select quotient filters. The Morton filter improves security; it supports insertions, deletions, and lookups operations and improves their respective throughputs by 0.9× to 15.5×, 1.3× to 1.6×, and 1.3× to 2.5×, when compared to cuckoo filters. We used Hadoop version 0.20.3, and the platform was Red Hat Enterprise Linux 6; we executed five experiments, and the average of the results has been taken. The results of the simulation work show that our proposed security mechanism provides an effective solution for secure data storage in cloud-based healthcare systems, with a load factor of 0.9. Furthermore, to aid cloud security in healthcare systems, we presented the motivation, objectives, related works, major research gaps, and materials and methods; we, thus, presented and implemented a cloud security mechanism, in the form of an algorithm and a set of results and conclusions.}, } @article {pmid34823545, year = {2021}, author = {Wilson, PH and Rogers, JM and Vogel, K and Steenbergen, B and McGuckian, TB and Duckworth, J}, title = {Home-based (virtual) rehabilitation improves motor and cognitive function for stroke patients: a randomized controlled trial of the Elements (EDNA-22) system.}, journal = {Journal of neuroengineering and rehabilitation}, volume = {18}, number = {1}, pages = {165}, pmid = {34823545}, issn = {1743-0003}, mesh = {Adult ; Australia ; Cognition ; Humans ; Recovery of Function ; *Stroke ; *Stroke Rehabilitation/methods ; Treatment Outcome ; Upper Extremity ; }, abstract = {BACKGROUND: Home-based rehabilitation of arm function is a significant gap in service provision for adult stroke. The EDNA-22 tablet is a portable virtual rehabilitation-based system that provides a viable option for home-based rehabilitation using a suite of tailored movement tasks, and performance monitoring via cloud computing data storage. The study reported here aimed to compare use of the EDNA system with an active control (Graded Repetitive Arm Supplementary Program-GRASP training) group using a parallel RCT design.

METHODS: Of 19 originally randomized, 17 acute-care patients with upper-extremity dysfunction following unilateral stroke completed training in either the treatment (n = 10) or active control groups (n = 7), each receiving 8-weeks of in-home training involving 30-min sessions scheduled 3-4 times weekly. Performance was assessed across motor, cognitive and functional behaviour in the home. Primary motor measures, collected by a blinded assessor, were the Box and Blocks Task (BBT) and 9-Hole Pegboard Test (9HPT), and for cognition the Montreal Cognitive Assessment (MoCA). Functional behaviour was assessed using the Stroke Impact Scale (SIS) and Neurobehavioural Functioning Inventory (NFI).

RESULTS: One participant from each group withdrew for personal reasons. No adverse events were reported. Results showed a significant and large improvement in performance on the BBT for the more-affected hand in the EDNA training group, only (g = 0.90). There was a mild-to-moderate effect of training on the 9HPT for EDNA (g = 0.55) and control (g = 0.42) groups, again for the more affected hand. In relation to cognition, performance on the MoCA improved for the EDNA group (g = 0.70). Finally, the EDNA group showed moderate (but non-significant) improvement in functional behaviour on the SIS (g = 0.57) and NFI (g = 0.49).

CONCLUSION: A short course of home-based training using the EDNA-22 system can yield significant gains in motor and cognitive performance, over and above an active control training that also targets upper-limb function. Intriguingly, these changes in performance were corroborated only tentatively in the reports of caregivers. We suggest that future research consider how the implementation of home-based rehabilitation technology can be optimized. We contend that self-administered digitally-enhanced training needs to become part of the health literacy of all stakeholders who are impacted by stroke and other acquired brain injuries. Trial registration Australian New Zealand Clinical Trials Registry (ANZCTR) Number: ACTRN12619001557123. Registered 12 November 2019, http://www.anzctr.org.au/Trial/Registration/TrialReview.aspx?id=378298&isReview=true.}, } @article {pmid34817058, year = {2021}, author = {Suvakov, M and Panda, A and Diesh, C and Holmes, I and Abyzov, A}, title = {CNVpytor: a tool for copy number variation detection and analysis from read depth and allele imbalance in whole-genome sequencing.}, journal = {GigaScience}, volume = {10}, number = {11}, pages = {}, pmid = {34817058}, issn = {2047-217X}, support = {U24 CA220242/CA/NCI NIH HHS/United States ; }, mesh = {Alleles ; *DNA Copy Number Variations ; Genomics ; High-Throughput Nucleotide Sequencing ; Sequence Analysis, DNA ; *Software ; Whole Genome Sequencing ; }, abstract = {BACKGROUND: Detecting copy number variations (CNVs) and copy number alterations (CNAs) based on whole-genome sequencing data is important for personalized genomics and treatment. CNVnator is one of the most popular tools for CNV/CNA discovery and analysis based on read depth.

FINDINGS: Herein, we present an extension of CNVnator developed in Python-CNVpytor. CNVpytor inherits the reimplemented core engine of its predecessor and extends visualization, modularization, performance, and functionality. Additionally, CNVpytor uses B-allele frequency likelihood information from single-nucleotide polymorphisms and small indels data as additional evidence for CNVs/CNAs and as primary information for copy number-neutral losses of heterozygosity.

CONCLUSIONS: CNVpytor is significantly faster than CNVnator-particularly for parsing alignment files (2-20 times faster)-and has (20-50 times) smaller intermediate files. CNV calls can be filtered using several criteria, annotated, and merged over multiple samples. Modular architecture allows it to be used in shared and cloud environments such as Google Colab and Jupyter notebook. Data can be exported into JBrowse, while a lightweight plugin version of CNVpytor for JBrowse enables nearly instant and GUI-assisted analysis of CNVs by any user. CNVpytor release and the source code are available on GitHub at https://github.com/abyzovlab/CNVpytor under the MIT license.}, } @article {pmid34814342, year = {2021}, author = {Shamshirband, S and Joloudari, JH and Shirkharkolaie, SK and Mojrian, S and Rahmani, F and Mostafavi, S and Mansor, Z}, title = {Game theory and evolutionary optimization approaches applied to resource allocation problems in computing environments: A survey.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {6}, pages = {9190-9232}, doi = {10.3934/mbe.2021453}, pmid = {34814342}, issn = {1551-0018}, mesh = {Cloud Computing ; Computers ; *Game Theory ; *Internet of Things ; Resource Allocation ; }, abstract = {Today's intelligent computing environments, including the Internet of Things (IoT), Cloud Computing (CC), Fog Computing (FC), and Edge Computing (EC), allow many organizations worldwide to optimize their resource allocation regarding the quality of service and energy consumption. Due to the acute conditions of utilizing resources by users and the real-time nature of the data, a comprehensive and integrated computing environment has not yet provided a robust and reliable capability for proper resource allocation. Although traditional resource allocation approaches in a low-capacity hardware resource system are efficient for small-scale resource providers, for a complex system in the conditions of dynamic computing resources and fierce competition in obtaining resources, they cannot develop and adaptively manage the conditions optimally. To optimize the resource allocation with minimal delay, low energy consumption, minimum computational complexity, high scalability, and better resource utilization efficiency, CC/FC/EC/IoT-based computing architectures should be designed intelligently. Therefore, the objective of this research is a comprehensive survey on resource allocation problems using computational intelligence-based evolutionary optimization and mathematical game theory approaches in different computing environments according to the latest scientific research achievements.}, } @article {pmid34814341, year = {2021}, author = {Liu, Y and Huang, W and Wang, L and Zhu, Y and Chen, N}, title = {Dynamic computation offloading algorithm based on particle swarm optimization with a mutation operator in multi-access edge computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {6}, pages = {9163-9189}, doi = {10.3934/mbe.2021452}, pmid = {34814341}, issn = {1551-0018}, mesh = {*Algorithms ; Mutation ; Probability ; }, abstract = {The current computation offloading algorithm for the mobile cloud ignores the selection of offloading opportunities and does not consider the uninstall frequency, resource waste, and energy efficiency reduction of the user's offloading success probability. Therefore, in this study, a dynamic computation offloading algorithm based on particle swarm optimization with a mutation operator in a multi-access edge computing environment is proposed (DCO-PSOMO). According to the CPU utilization and the memory utilization rate of the mobile terminal, this method can dynamically obtain the overload time by using a strong, locally weighted regression method. After detecting the overload time, the probability of successful downloading is predicted by the mobile user's dwell time and edge computing communication range, and the offloading is either conducted immediately or delayed. A computation offloading model was established via the use of the response time and energy consumption of the mobile terminal. Additionally, the optimal computing offloading algorithm was designed via the use of a particle swarm with a mutation operator. Finally, the DCO-PSOMO algorithm was compared with the JOCAP, ECOMC and ESRLR algorithms, and the experimental results demonstrated that the DCO-PSOMO offloading method can effectively reduce the offloading cost and terminal energy consumption, and improves the success probability of offloading and the user's QoS.}, } @article {pmid34814262, year = {2021}, author = {Al-Zumia, FA and Tian, Y and Al-Rodhaan, M}, title = {A novel fault-tolerant privacy-preserving cloud-based data aggregation scheme for lightweight health data.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {6}, pages = {7539-7560}, doi = {10.3934/mbe.2021373}, pmid = {34814262}, issn = {1551-0018}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; Confidentiality ; Data Aggregation ; Humans ; *Privacy ; }, abstract = {Mobile health networks (MHNWs) have facilitated instant medical health care and remote health monitoring for patients. Currently, a vast amount of health data needs to be quickly collected, processed and analyzed. The main barrier to doing so is the limited amount of the computational storage resources that are required for MHNWs. Therefore, health data must be outsourced to the cloud. Although the cloud has the benefits of powerful computation capabilities and intensive storage resources, security and privacy concerns exist. Therefore, our study examines how to collect and aggregate these health data securely and efficiently, with a focus on the theoretical importance and application potential of the aggregated data. In this work, we propose a novel design for a private and fault-tolerant cloud-based data aggregation scheme. Our design is based on a future ciphertext mechanism for improving the fault tolerance capabilities of MHNWs. Our scheme is privatized via differential privacy, which is achieved by encrypting noisy health data and enabling the cloud to obtain the results of only the noisy sum. Our scheme is efficient, reliable and secure and combines different approaches and algorithms to improve the security and efficiency of the system. Our proposed scheme is evaluated with an extensive simulation study, and the simulation results show that it is efficient and reliable. The computational cost of our scheme is significantly less than that of the related scheme. The aggregation error is minimized from ${\rm{O}}\left({\sqrt {{\bf{w + 1}}} } \right)$ in the related scheme to O(1) in our scheme.}, } @article {pmid34812799, year = {2021}, author = {Huang, L and Tian, S and Zhao, W and Liu, K and Ma, X and Guo, J and Yin, M}, title = {5G-Enabled intelligent construction of a chest pain center with up-conversion lateral flow immunoassay.}, journal = {The Analyst}, volume = {146}, number = {24}, pages = {7702-7709}, doi = {10.1039/d1an01592c}, pmid = {34812799}, issn = {1364-5528}, mesh = {Biomarkers ; Cloud Computing ; Early Diagnosis ; Humans ; Immunoassay ; *Myocardial Infarction/diagnosis ; *Pain Clinics ; Smartphone ; Troponin I ; Wireless Technology ; }, abstract = {Acute myocardial infarction (AMI) has become a worldwide health problem because of its rapid onset and high mortality. Cardiac troponin I (cTnI) is the gold standard for diagnosis of AMI, and its rapid and accurate detection is critical for early diagnosis and management of AMI. Using a lateral flow immunoassay with upconverting nanoparticles as fluorescent probes, we developed an up-conversion fluorescence reader capable of rapidly quantifying the cTnI concentration in serum based upon the fluorescence intensity of the test and control lines on the test strip. Reliable detection of cTnI in the range 0.1-50 ng mL[-1] could be achieved in 15 min, with a lower detection limit of 0.1 ng mL[-1]. The reader was also adapted for use on a 5th generation (5G) mobile network enabled intelligent chest pain center. Through Bluetooth wireless communication, the results achieved using the reader on an ambulance heading to a central hospital could be transmitted to a 5G smartphone and uploaded for real-time edge computing and cloud storage. An application in the 5G smartphone allows users to upload their medical information to establish dedicated electronic health records and doctors to monitor patients' health status and provide remote medical services. Combined with mobile internet and big data, the 5G-enabled intelligent chest pain center with up-conversion lateral flow immunoassay may predict the onset of AMI and save valuable time for patients suffering an AMI.}, } @article {pmid34812394, year = {2021}, author = {Navaz, AN and Serhani, MA and El Kassabi, HT and Al-Qirim, N and Ismail, H}, title = {Trends, Technologies, and Key Challenges in Smart and Connected Healthcare.}, journal = {IEEE access : practical innovations, open solutions}, volume = {9}, number = {}, pages = {74044-74067}, pmid = {34812394}, issn = {2169-3536}, abstract = {Cardio Vascular Diseases (CVD) is the leading cause of death globally and is increasing at an alarming rate, according to the American Heart Association's Heart Attack and Stroke Statistics-2021. This increase has been further exacerbated because of the current coronavirus (COVID-19) pandemic, thereby increasing the pressure on existing healthcare resources. Smart and Connected Health (SCH) is a viable solution for the prevalent healthcare challenges. It can reshape the course of healthcare to be more strategic, preventive, and custom-designed, making it more effective with value-added services. This research endeavors to classify state-of-the-art SCH technologies via a thorough literature review and analysis to comprehensively define SCH features and identify the enabling technology-related challenges in SCH adoption. We also propose an architectural model that captures the technological aspect of the SCH solution, its environment, and its primary involved stakeholders. It serves as a reference model for SCH acceptance and implementation. We reflected the COVID-19 case study illustrating how some countries have tackled the pandemic differently in terms of leveraging the power of different SCH technologies, such as big data, cloud computing, Internet of Things, artificial intelligence, robotics, blockchain, and mobile applications. In combating the pandemic, SCH has been used efficiently at different stages such as disease diagnosis, virus detection, individual monitoring, tracking, controlling, and resource allocation. Furthermore, this review highlights the challenges to SCH acceptance, as well as the potential research directions for better patient-centric healthcare.}, } @article {pmid34812390, year = {2021}, author = {Dong, Y and Yao, YD}, title = {IoT Platform for COVID-19 Prevention and Control: A Survey.}, journal = {IEEE access : practical innovations, open solutions}, volume = {9}, number = {}, pages = {49929-49941}, pmid = {34812390}, issn = {2169-3536}, abstract = {As a result of the worldwide transmission of severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), coronavirus disease 2019 (COVID-19) has evolved into an unprecedented pandemic. Currently, with unavailable pharmaceutical treatments and low vaccination rates, this novel coronavirus results in a great impact on public health, human society, and global economy, which is likely to last for many years. One of the lessons learned from the COVID-19 pandemic is that a long-term system with non-pharmaceutical interventions for preventing and controlling new infectious diseases is desirable to be implemented. Internet of things (IoT) platform is preferred to be utilized to achieve this goal, due to its ubiquitous sensing ability and seamless connectivity. IoT technology is changing our lives through smart healthcare, smart home, and smart city, which aims to build a more convenient and intelligent community. This paper presents how the IoT could be incorporated into the epidemic prevention and control system. Specifically, we demonstrate a potential fog-cloud combined IoT platform that can be used in the systematic and intelligent COVID-19 prevention and control, which involves five interventions including COVID-19 Symptom Diagnosis, Quarantine Monitoring, Contact Tracing & Social Distancing, COVID-19 Outbreak Forecasting, and SARS-CoV-2 Mutation Tracking. We investigate and review the state-of-the-art literatures of these five interventions to present the capabilities of IoT in countering against the current COVID-19 pandemic or future infectious disease epidemics.}, } @article {pmid34812296, year = {2020}, author = {Wang, Y and Peng, D and Yu, L and Zhang, Y and Yin, J and Zhou, L and Zheng, S and Wang, F and Li, C}, title = {Monitoring Crop Growth During the Period of the Rapid Spread of COVID-19 in China by Remote Sensing.}, journal = {IEEE journal of selected topics in applied earth observations and remote sensing}, volume = {13}, number = {}, pages = {6195-6205}, pmid = {34812296}, issn = {1939-1404}, abstract = {The status of crop growth under the influence of COVID-19 is an important information for evaluating the current food security in China. This article used the cloud computing platform of Google Earth Engine, to access and analyze Sentinel-2, MODIS, and other multisource remote sensing data in the last five years to monitor the growth of crops in China, especially in Hubei province, during the period of the rapid spread of COVID-19 (i.e., from late January to mid-March 2020), and compared with the growth over the same period under similar climate conditions in the past four years. We further analyzed the indirect effects of COVID-19 on crop growth. The results showed that: the area of the crops with better growth (51%) was much more than that with worse growth (22%); the crops with better and worse growth were mainly distributed in the North China Plain (the main planting areas of winter wheat in China) and the South China regions (such as Guangxi, Guangdong province), respectively. The area of the crops with a similar growth occupied 27%. In Hubei province, the area of the crops with better growth (61%) was also more than that with worse growth (27%). It was found that there was no obvious effect from COVID-19 on the overall growth of crops in China during the period from late January to mid-March 2020 and the growth of crops was much better than that during the same period in previous years. The findings in this study are helpful in evaluating the impact of the COVID-19 on China's agriculture, which are conducive to serve the relevant agricultural policy formulation and to ensure food security.}, } @article {pmid34812247, year = {2021}, author = {Laxmi Lydia, E and Anupama, CSS and Beno, A and Elhoseny, M and Alshehri, MD and Selim, MM}, title = {Cognitive computing-based COVID-19 detection on Internet of things-enabled edge computing environment.}, journal = {Soft computing}, volume = {}, number = {}, pages = {1-12}, pmid = {34812247}, issn = {1432-7643}, abstract = {In the current pandemic, smart technologies such as cognitive computing, artificial intelligence, pattern recognition, chatbot, wearables, and blockchain can sufficiently support the collection, analysis, and processing of medical data for decision making. Particularly, to aid medical professionals in the disease diagnosis process, cognitive computing is helpful by processing massive quantities of data rapidly and generating customized smart recommendations. On the other hand, the present world is facing a pandemic of COVID-19 and an earlier detection process is essential to reduce the mortality rate. Deep learning (DL) models are useful in assisting radiologists to investigate the large quantity of chest X-ray images. However, they require a large amount of training data and it needs to be centralized for processing. Therefore, federated learning (FL) concept can be used to generate a shared model with no use of local data for DL-based COVID-19 detection. In this view, this paper presents a federated deep learning-based COVID-19 (FDL-COVID) detection model on an IoT-enabled edge computing environment. Primarily, the IoT devices capture the patient data, and then the DL model is designed using the SqueezeNet model. The IoT devices upload the encrypted variables into the cloud server which then performs FL on major variables using the SqueezeNet model to produce a global cloud model. Moreover, the glowworm swarm optimization algorithm is utilized to optimally tune the hyperparameters involved in the SqueezeNet architecture. A wide range of experiments were conducted on benchmark CXR dataset, and the outcomes are assessed with respect to different measures . The experimental outcomes pointed out the enhanced performance of the FDL-COVID technique over the other methods.}, } @article {pmid34812221, year = {2022}, author = {Nagajayanthi, B}, title = {Decades of Internet of Things Towards Twenty-first Century: A Research-Based Introspective.}, journal = {Wireless personal communications}, volume = {123}, number = {4}, pages = {3661-3697}, pmid = {34812221}, issn = {0929-6212}, abstract = {Internet connects people to people, people to machine, and machine to machine for a life of serendipity through a Cloud. Internet of Things networks objects or people and integrates them with software to collect and exchange data. The Internet of things (IoT) influences our lives based on how we ruminate, respond, and anticipate. IoT 2021 heralds from the fringes to the data ecosystem and panaches a comfort zone. IoT is overwhelmingly embraced by businessmen and consumers due to increased productivity and convenience. Internet of Things facilitates intelligent device control with cloud vendors like Amazon and Google using artificial intelligence for data analytics, and with digital assistants like Alexa and Siri providing a voice user interface. Smart IoT is all about duplex connecting, processing, and implementing. Centralized IoT architecture is vulnerable to cyber-attacks. With Block Chain, it is possible to maintain transparency and security of the transaction's data. Robotic Process Automation (RPA) using bots has automated laborious tasks in 2019. Embedded Internet using Facial Recognition could reduce the coronavirus pandemic crisis by making a paradigm shift from fingerprint sensors to facial recognition. Security concerns are addressed with micro-segmentation approaches. IoT, an incredible vision of the future makes systems adaptive with customized features, responsive with increased efficiency, and procurable with optimized cost. This research delivers a comprehensive insight into the technical perspectives of IoT, focusing on interoperability, flexibility, scalability, mobility, security, transparency, standardization, and low energy. A smart classroom is implemented based on the concepts of IoT.}, } @article {pmid34805975, year = {2021}, author = {Khan, FS and Bao, N}, title = {Quantum Prisoner's Dilemma and High Frequency Trading on the Quantum Cloud.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {769392}, pmid = {34805975}, issn = {2624-8212}, abstract = {High-frequency trading (HFT) offers an excellent use case and a potential killer application of the commercially available, first generation quasi-quantum computers. To this end, we offer here a simple game-theoretic model of HFT as the famous two player game, Prisoner's Dilemma. We explore the implementation of HFT as an instance of Prisoner's Dilemma on the (quasi) quantum cloud using the Eisert, Wilkens, and Lewenstein quantum mediated communication protocol, and how this implementation can not only increase transaction speed but also improve the lot of the players in HFT. Using cooperative game-theoretic reasoning, we also note that in the near future when the internet is properly quantum, players will be able to achieve Pareto-optimality in HFT as an instance of reinforced machine learning.}, } @article {pmid34805503, year = {2021}, author = {Farid, M and Latip, R and Hussin, M and Abdul Hamid, NAW}, title = {A fault-intrusion-tolerant system and deadline-aware algorithm for scheduling scientific workflow in the cloud.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e747}, pmid = {34805503}, issn = {2376-5992}, abstract = {BACKGROUND: Recent technological developments have enabled the execution of more scientific solutions on cloud platforms. Cloud-based scientific workflows are subject to various risks, such as security breaches and unauthorized access to resources. By attacking side channels or virtual machines, attackers may destroy servers, causing interruption and delay or incorrect output. Although cloud-based scientific workflows are often used for vital computational-intensive tasks, their failure can come at a great cost.

METHODOLOGY: To increase workflow reliability, we propose the Fault and Intrusion-tolerant Workflow Scheduling algorithm (FITSW). The proposed workflow system uses task executors consisting of many virtual machines to carry out workflow tasks. FITSW duplicates each sub-task three times, uses an intermediate data decision-making mechanism, and then employs a deadline partitioning method to determine sub-deadlines for each sub-task. This way, dynamism is achieved in task scheduling using the resource flow. The proposed technique generates or recycles task executors, keeps the workflow clean, and improves efficiency. Experiments were conducted on WorkflowSim to evaluate the effectiveness of FITSW using metrics such as task completion rate, success rate and completion time.

RESULTS: The results show that FITSW not only raises the success rate by about 12%, it also improves the task completion rate by 6.2% and minimizes the completion time by about 15.6% in comparison with intrusion tolerant scientific workflow ITSW system.}, } @article {pmid34804767, year = {2022}, author = {Quy, VK and Hau, NV and Anh, DV and Ngoc, LA}, title = {Smart healthcare IoT applications based on fog computing: architecture, applications and challenges.}, journal = {Complex & intelligent systems}, volume = {8}, number = {5}, pages = {3805-3815}, pmid = {34804767}, issn = {2198-6053}, abstract = {The history of human development has proven that medical and healthcare applications for humanity always are the main driving force behind the development of science and technology. The advent of Cloud technology for the first time allows providing systems infrastructure as a service, platform as a service and software as a service. Cloud technology has dominated healthcare information systems for decades now. However, one limitation of cloud-based applications is the high service response time. In some emergency scenarios, the control and monitoring of patient status, decision-making with related resources are limited such as hospital, ambulance, doctor, medical conditions in seconds and has a direct impact on the life of patients. To solve these challenges, optimal computing technologies have been proposed such as cloud computing, edge computing, and fog computing technologies. In this article, we make a comparison between computing technologies. Then, we present a common architectural framework based on fog computing for Internet of Health Things (Fog-IoHT) applications. Besides, we also indicate possible applications and challenges in integrating fog computing into IoT Healthcare applications. The analysis results indicated that there is huge potential for IoHT applications based on fog computing. We hope, this study will be an important guide for the future development of fog-based Healthcare IoT applications.}, } @article {pmid34804200, year = {2021}, author = {Hasanin, T and Alsobhi, A and Khadidos, A and Qahmash, A and Khadidos, A and Ogunmola, GA}, title = {Efficient Multiuser Computation for Mobile-Edge Computing in IoT Application Using Optimization Algorithm.}, journal = {Applied bionics and biomechanics}, volume = {2021}, number = {}, pages = {9014559}, pmid = {34804200}, issn = {1176-2322}, abstract = {Mobile edge computing (MEC) is a paradigm novel computing that promises the dramatic effect of reduction in latency and consumption of energy by computation offloading intensive; these tasks to the edge clouds in proximity close to the smart mobile users. In this research, reduce the offloading and latency between the edge computing and multiusers under the environment IoT application in 5G using bald eagle search optimization algorithm. The deep learning approach may consume high computational complexity and more time. In an edge computing system, devices can offload their computation-intensive tasks to the edge servers to save energy and shorten their latency. The bald eagle algorithm (BES) is the advanced optimization algorithm that resembles the strategy of eagle hunting. The strategies are select, search, and swooping stages. Previously, the BES algorithm is used to consume the energy and distance; to improve the better energy and reduce the offloading latency in this research and some delays occur when devices increase causes demand for cloud data, it can be improved by offering ROS (resource) estimation. To enhance the BES algorithm that introduces the ROS estimation stage to select the better ROSs, an edge system, which offloads the most appropriate IoT subtasks to edge servers then the expected time of execution, got minimized. Based on multiuser offloading, we proposed a bald eagle search optimization algorithm that can effectively reduce the end-end time to get fast and near-optimal IoT devices. The latency is reduced from the cloud to the local; this can be overcome by using edge computing, and deep learning expects faster and better results from the network. This can be proposed by BES algorithm technique that is better than other conventional methods that are compared on results to minimize the offloading latency. Then, the simulation is done to show the efficiency and stability by reducing the offloading latency.}, } @article {pmid34801873, year = {2021}, author = {Retico, A and Avanzo, M and Boccali, T and Bonacorsi, D and Botta, F and Cuttone, G and Martelli, B and Salomoni, D and Spiga, D and Trianni, A and Stasi, M and Iori, M and Talamonti, C}, title = {Enhancing the impact of Artificial Intelligence in Medicine: A joint AIFM-INFN Italian initiative for a dedicated cloud-based computing infrastructure.}, journal = {Physica medica : PM : an international journal devoted to the applications of physics to medicine and biology : official journal of the Italian Association of Biomedical Physics (AIFB)}, volume = {91}, number = {}, pages = {140-150}, doi = {10.1016/j.ejmp.2021.10.005}, pmid = {34801873}, issn = {1724-191X}, mesh = {*Artificial Intelligence ; *Cloud Computing ; Humans ; Italy ; Nuclear Physics ; Precision Medicine ; }, abstract = {Artificial Intelligence (AI) techniques have been implemented in the field of Medical Imaging for more than forty years. Medical Physicists, Clinicians and Computer Scientists have been collaborating since the beginning to realize software solutions to enhance the informative content of medical images, including AI-based support systems for image interpretation. Despite the recent massive progress in this field due to the current emphasis on Radiomics, Machine Learning and Deep Learning, there are still some barriers to overcome before these tools are fully integrated into the clinical workflows to finally enable a precision medicine approach to patients' care. Nowadays, as Medical Imaging has entered the Big Data era, innovative solutions to efficiently deal with huge amounts of data and to exploit large and distributed computing resources are urgently needed. In the framework of a collaboration agreement between the Italian Association of Medical Physicists (AIFM) and the National Institute for Nuclear Physics (INFN), we propose a model of an intensive computing infrastructure, especially suited for training AI models, equipped with secure storage systems, compliant with data protection regulation, which will accelerate the development and extensive validation of AI-based solutions in the Medical Imaging field of research. This solution can be developed and made operational by Physicists and Computer Scientists working on complementary fields of research in Physics, such as High Energy Physics and Medical Physics, who have all the necessary skills to tailor the AI-technology to the needs of the Medical Imaging community and to shorten the pathway towards the clinical applicability of AI-based decision support systems.}, } @article {pmid34798231, year = {2021}, author = {Dong, L and Li, J and Zou, Q and Zhang, Y and Zhao, L and Wen, X and Gong, J and Li, F and Liu, T and Evans, AC and Valdes-Sosa, PA and Yao, D}, title = {WeBrain: A web-based brainformatics platform of computational ecosystem for EEG big data analysis.}, journal = {NeuroImage}, volume = {245}, number = {}, pages = {118713}, doi = {10.1016/j.neuroimage.2021.118713}, pmid = {34798231}, issn = {1095-9572}, mesh = {Big Data ; *Cloud Computing ; *Computational Biology ; *Electroencephalography ; Humans ; Software ; Systems Integration ; }, abstract = {The current evolution of 'cloud neuroscience' leads to more efforts with the large-scale EEG applications, by using EEG pipelines to handle the rapidly accumulating EEG data. However, there are a few specific cloud platforms that seek to address the cloud computational challenges of EEG big data analysis to benefit the EEG community. In response to the challenges, a WeBrain cloud platform (https://webrain.uestc.edu.cn/) is designed as a web-based brainformatics platform and computational ecosystem to enable large-scale EEG data storage, exploration and analysis using cloud high-performance computing (HPC) facilities. WeBrain connects researchers from different fields to EEG and multimodal tools that have become the norm in the field and the cloud processing power required to handle those large EEG datasets. This platform provides an easy-to-use system for novice users (even no computer programming skills) and provides satisfactory maintainability, sustainability and flexibility for IT administrators and tool developers. A range of resources are also available on https://webrain.uestc.edu.cn/, including documents, manuals, example datasets related to WeBrain, and collected links to open EEG datasets and tools. It is not necessary for users or administrators to install any software or system, and all that is needed is a modern web browser, which reduces the technical expertise required to use or manage WeBrain. The WeBrain platform is sponsored and driven by the China-Canada-Cuba international brain cooperation project (CCC-Axis, http://ccc-axis.org/), and we hope that WeBrain will be a promising cloud brainformatics platform for exploring brain information in large-scale EEG applications in the EEG community.}, } @article {pmid34777973, year = {2021}, author = {Moursi, AS and El-Fishawy, N and Djahel, S and Shouman, MA}, title = {An IoT enabled system for enhanced air quality monitoring and prediction on the edge.}, journal = {Complex & intelligent systems}, volume = {7}, number = {6}, pages = {2923-2947}, pmid = {34777973}, issn = {2198-6053}, abstract = {Air pollution is a major issue resulting from the excessive use of conventional energy sources in developing countries and worldwide. Particulate Matter less than 2.5 µm in diameter (PM2.5) is the most dangerous air pollutant invading the human respiratory system and causing lung and heart diseases. Therefore, innovative air pollution forecasting methods and systems are required to reduce such risk. To that end, this paper proposes an Internet of Things (IoT) enabled system for monitoring and predicting PM2.5 concentration on both edge devices and the cloud. This system employs a hybrid prediction architecture using several Machine Learning (ML) algorithms hosted by Nonlinear AutoRegression with eXogenous input (NARX). It uses the past 24 h of PM2.5, cumulated wind speed and cumulated rain hours to predict the next hour of PM2.5. This system was tested on a PC to evaluate cloud prediction and a Raspberry P i to evaluate edge devices' prediction. Such a system is essential, responding quickly to air pollution in remote areas with low bandwidth or no internet connection. The performance of our system was assessed using Root Mean Square Error (RMSE), Normalized Root Mean Square Error (NRMSE), coefficient of determination (R [2]), Index of Agreement (IA), and duration in seconds. The obtained results highlighted that NARX/LSTM achieved the highest R [2] and IA and the least RMSE and NRMSE, outperforming other previously proposed deep learning hybrid algorithms. In contrast, NARX/XGBRF achieved the best balance between accuracy and speed on the Raspberry P i .}, } @article {pmid34777904, year = {2021}, author = {Zhang, M and Dai, D and Hou, S and Liu, W and Gao, F and Xu, D and Hu, Y}, title = {Thinking on the informatization development of China's healthcare system in the post-COVID-19 era.}, journal = {Intelligent medicine}, volume = {1}, number = {1}, pages = {24-28}, pmid = {34777904}, issn = {2667-1026}, abstract = {With the application of Internet of Things, big data, cloud computing, artificial intelligence, and other cutting-edge technologies, China's medical informatization is developing rapidly. In this paper, we summaried the role of information technology in healthcare sector's battle against the coronavirus disease 2019 (COVID-19) from the perspectives of early warning and monitoring, screening and diagnosis, medical treatment and scientific research, analyzes the bottlenecks of the development of information technology in the post-COVID-19 era, and puts forward feasible suggestions for further promoting the construction of medical informatization from the perspectives of sharing, convenience, and safety.}, } @article {pmid34777850, year = {2021}, author = {Alipour, J and Mehdipour, Y and Karimi, A and Sharifian, R}, title = {Affecting factors of cloud computing adoption in public hospitals affiliated with Zahedan University of Medical Sciences: A cross-sectional study in the Southeast of Iran.}, journal = {Digital health}, volume = {7}, number = {}, pages = {20552076211033428}, pmid = {34777850}, issn = {2055-2076}, abstract = {OBJECTIVE: Health care organizations require cloud computing to remain efficient and cost-effective, and provide high-quality health care services. Adoption of this technology by users plays a critical role in the success of its application. This study aimed to determine factors affecting cloud computing adoption in public hospitals affiliated with Zahedan University of Medical Sciences.

METHODS: A cross-sectional descriptive and analytic study was performed in 2017. The study population comprised information technology and hospital information system authorities and hospital information system users. The sample consisted of 573 participants. The data were collected using a questionnaire and analyzed with the Statistical Package for Social Sciences software using descriptive and analytical statistics.

RESULTS: The mean score of environmental, human, organizational, technological, and intention dimensions of cloud computing adoption was 3.39 ± 0.81, 3.27 ± 0.63, 3.19 ± 0.71, 3 ± 0.43, and 3.55 ± 1.10, respectively. Furthermore, a significant positive relationship was found between intention of cloud computing adoption and environmental (R = 0.521, p = 0.000), organizational (R = 0.426, p = 0.000), human (R = 0.492, p = 0.000), and technological dimensions (R = 0.157, p = 0.000).

CONCLUSIONS: Benefits of cloud computing adoption, relative advantage, and competitive pressure were identified as the most influential factors in accepting cloud computing. Simplifying the users' understanding of this technology and its application, improving the staff's technical capabilities, promoting executive managers' understanding of the nature and functions of cloud computing, and fully supporting and increasing governmental mandates for adoption of new technologies are necessary for facilitating the adoption of cloud computing in given hospitals.}, } @article {pmid34776626, year = {2022}, author = {Durai, CAD and Begum, A and Jebaseeli, J and Sabahath, A}, title = {COVID-19 pandemic, predictions and control in Saudi Arabia using SIR-F and age-structured SEIR model.}, journal = {The Journal of supercomputing}, volume = {78}, number = {5}, pages = {7341-7353}, pmid = {34776626}, issn = {0920-8542}, abstract = {COVID-19 has affected every individual physically or physiologically, leading to substantial impacts on how they perceive and respond to the pandemic's danger. Due to the lack of vaccines or effective medicines to cure the infection, an urgent control measure is required to prevent the continued spread of COVID-19. This can be achieved using advanced computing, such as artificial intelligence (AI), machine learning (ML), deep learning (DL), cloud computing, and edge computing. To control the exponential spread of the novel virus, it is crucial for countries to contain and mitigate interventions. To prevent exponential growth, several control measures have been applied in the Kingdom of Saudi Arabia to mitigate the COVID-19 epidemic. As the pandemic has been spreading globally for more than a year, an ample amount of data is available for researchers to predict and forecast the effect of the pandemic in the near future. This article interprets the effects of COVID-19 using the Susceptible-Infected-Recovered (SIR-F) while F-stands for 'Fatal with confirmation,' age-structured SEIR (Susceptible Exposed Infectious Removed) and machine learning for smart health care and the well-being of citizens of Saudi Arabia. Additionally, it examines the different control measure scenarios produced by the modified SEIR model. The evolution of the simulation results shows that the interventions are vital to flatten the virus spread curve, which can delay the peak and decrease the fatality rate.}, } @article {pmid34770615, year = {2021}, author = {Ala'anzy, MA and Othman, M and Hanapi, ZM and Alrshah, MA}, title = {Locust Inspired Algorithm for Cloudlet Scheduling in Cloud Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770615}, issn = {1424-8220}, mesh = {Algorithms ; Animals ; *Cloud Computing ; Computers ; *Grasshoppers ; Heuristics ; }, abstract = {Cloud computing is an emerging paradigm that offers flexible and seamless services for users based on their needs, including user budget savings. However, the involvement of a vast number of cloud users has made the scheduling of users' tasks (i.e., cloudlets) a challenging issue in selecting suitable data centres, servers (hosts), and virtual machines (VMs). Cloudlet scheduling is an NP-complete problem that can be solved using various meta-heuristic algorithms, which are quite popular due to their effectiveness. Massive user tasks and rapid growth in cloud resources have become increasingly complex challenges; therefore, an efficient algorithm is necessary for allocating cloudlets efficiently to attain better execution times, resource utilisation, and waiting times. This paper proposes a cloudlet scheduling, locust inspired algorithm to reduce the average makespan and waiting time and to boost VM and server utilisation. The CloudSim toolkit was used to evaluate our algorithm's efficiency, and the obtained results revealed that our algorithm outperforms other state-of-the-art nature-inspired algorithms, improving the average makespan, waiting time, and resource utilisation.}, } @article {pmid34770606, year = {2021}, author = {Singamaneni, KK and Ramana, K and Dhiman, G and Singh, S and Yoon, B}, title = {A Novel Blockchain and Bi-Linear Polynomial-Based QCP-ABE Framework for Privacy and Security over the Complex Cloud Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770606}, issn = {1424-8220}, support = {2019R1A2C1085388//National Research Foundation of Korea/ ; S-2020-G0001-00050.//Dongguk University Research Fund/ ; }, mesh = {Algorithms ; *Blockchain ; Cloud Computing ; Computer Security ; Privacy ; }, abstract = {As a result of the limited resources available in IoT local devices, the large scale cloud consumer's data that are produced by IoT related machines are contracted out to the cloud. Cloud computing is unreliable, using it can compromise user privacy, and data may be leaked. Because cloud-data and grid infrastructure are both growing exponentially, there is an urgent need to explore computational sources and cloud large-data protection. Numerous cloud service categories are assimilated into numerous fields, such as defense systems and pharmaceutical databases, to compute information space and allocation of resources. Attribute Based Encryption (ABE) is a sophisticated approach which can permit employees to specify a higher level of security for data stored in cloud storage facilities. Numerous obsolete ABE techniques are practical when applied to small data sets to generate cryptograms with restricted computational properties; their properties are used to generate the key, encrypt it, and decrypt it. To address the current concerns, a dynamic non-linear polynomial chaotic quantum hash technique on top of secure block chain model can be used for enhancing cloud data security while maintaining user privacy. In the proposed method, customer attributes are guaranteed by using a dynamic non- polynomial chaotic map function for the key initialization, encryption, and decryption. In the proposed model, both organized and unorganized massive clinical data are considered to be inputs for reliable corroboration and encoding. Compared to existing models, the real-time simulation results demonstrate that the stated standard is more precise than 90% in terms of bit change and more precise than 95% in terms of dynamic key generation, encipherment, and decipherment time.}, } @article {pmid34770582, year = {2021}, author = {Roig, PJ and Alcaraz, S and Gilly, K and Bernad, C and Juiz, C}, title = {Modeling of a Generic Edge Computing Application Design.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770582}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {Edge computing applications leverage advances in edge computing along with the latest trends of convolutional neural networks in order to achieve ultra-low latency, high-speed processing, low-power consumptions scenarios, which are necessary for deploying real-time Internet of Things deployments efficiently. As the importance of such scenarios is growing by the day, we propose to undertake two different kind of models, such as an algebraic models, with a process algebra called ACP and a coding model with a modeling language called Promela. Both approaches have been used to build models considering an edge infrastructure with a cloud backup, which has been further extended with the addition of extra fog nodes, and after having applied the proper verification techniques, they have all been duly verified. Specifically, a generic edge computing design has been specified in an algebraic manner with ACP, being followed by its corresponding algebraic verification, whereas it has also been specified by means of Promela code, which has been verified by means of the model checker Spin.}, } @article {pmid34770545, year = {2021}, author = {Ahmad, Z and Jehangiri, AI and Ala'anzy, MA and Othman, M and Umar, AI}, title = {Fault-Tolerant and Data-Intensive Resource Scheduling and Management for Scientific Applications in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770545}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computer Simulation ; Heuristics ; Workflow ; }, abstract = {Cloud computing is a fully fledged, matured and flexible computing paradigm that provides services to scientific and business applications in a subscription-based environment. Scientific applications such as Montage and CyberShake are organized scientific workflows with data and compute-intensive tasks and also have some special characteristics. These characteristics include the tasks of scientific workflows that are executed in terms of integration, disintegration, pipeline, and parallelism, and thus require special attention to task management and data-oriented resource scheduling and management. The tasks executed during pipeline are considered as bottleneck executions, the failure of which result in the wholly futile execution, which requires a fault-tolerant-aware execution. The tasks executed during parallelism require similar instances of cloud resources, and thus, cluster-based execution may upgrade the system performance in terms of make-span and execution cost. Therefore, this research work presents a cluster-based, fault-tolerant and data-intensive (CFD) scheduling for scientific applications in cloud environments. The CFD strategy addresses the data intensiveness of tasks of scientific workflows with cluster-based, fault-tolerant mechanisms. The Montage scientific workflow is considered as a simulation and the results of the CFD strategy were compared with three well-known heuristic scheduling policies: (a) MCT, (b) Max-min, and (c) Min-min. The simulation results showed that the CFD strategy reduced the make-span by 14.28%, 20.37%, and 11.77%, respectively, as compared with the existing three policies. Similarly, the CFD reduces the execution cost by 1.27%, 5.3%, and 2.21%, respectively, as compared with the existing three policies. In case of the CFD strategy, the SLA is not violated with regard to time and cost constraints, whereas it is violated by the existing policies numerous times.}, } @article {pmid34770533, year = {2021}, author = {da Costa Bezerra, SF and Filho, ASM and Delicato, FC and da Rocha, AR}, title = {Processing Complex Events in Fog-Based Internet of Things Systems for Smart Agriculture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770533}, issn = {1424-8220}, mesh = {Agriculture ; Cloud Computing ; *Internet of Things ; }, abstract = {The recent growth of the Internet of Things' services and applications has increased data processing and storage requirements. The Edge computing concept aims to leverage the processing capabilities of the IoT and other devices placed at the edge of the network. One embodiment of this paradigm is Fog computing, which provides an intermediate and often hierarchical processing tier between the data sources and the remote Cloud. Among the major benefits of this concept, the end-to-end latency can be decreased, thus favoring time-sensitive applications. Moreover, the data traffic at the network core and the Cloud computing workload can be reduced. Combining the Fog computing paradigm with Complex Event Processing (CEP) and data fusion techniques has excellent potential for generating valuable knowledge and aiding decision-making processes in the Internet of Things' systems. In this context, we propose a multi-tier complex event processing approach (sensor node, Fog, and Cloud) that promotes fast decision making and is based on information with 98% accuracy. The experiments show a reduction of 77% in the average time of sending messages in the network. In addition, we achieved a reduction of 82% in data traffic.}, } @article {pmid34770496, year = {2021}, author = {Matesanz, P and Graen, T and Fiege, A and Nolting, M and Nejdl, W}, title = {Demand-Driven Data Acquisition for Large Scale Fleets.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770496}, issn = {1424-8220}, support = {01 MD 19007A//Federal Ministry for Economic Affairs and Energy/ ; }, mesh = {Humans ; *Software ; }, abstract = {Automakers manage vast fleets of connected vehicles and face an ever-increasing demand for their sensor readings. This demand originates from many stakeholders, each potentially requiring different sensors from different vehicles. Currently, this demand remains largely unfulfilled due to a lack of systems that can handle such diverse demands efficiently. Vehicles are usually passive participants in data acquisition, each continuously reading and transmitting the same static set of sensors. However, in a multi-tenant setup with diverse data demands, each vehicle potentially needs to provide different data instead. We present a system that performs such vehicle-specific minimization of data acquisition by mapping individual data demands to individual vehicles. We collect personal data only after prior consent and fulfill the requirements of the GDPR. Non-personal data can be collected by directly addressing individual vehicles. The system consists of a software component natively integrated with a major automaker's vehicle platform and a cloud platform brokering access to acquired data. Sensor readings are either provided via near real-time streaming or as recorded trip files that provide specific consistency guarantees. A performance evaluation with over 200,000 simulated vehicles has shown that our system can increase server capacity on-demand and process streaming data within 269 ms on average during peak load. The resulting architecture can be used by other automakers or operators of large sensor networks. Native vehicle integration is not mandatory; the architecture can also be used with retrofitted hardware such as OBD readers.}, } @article {pmid34770413, year = {2021}, author = {Calvo, I and Villar, E and Napole, C and Fernández, A and Barambones, O and Gil-García, JM}, title = {Reliable Control Applications with Wireless Communication Technologies: Application to Robotic Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770413}, issn = {1424-8220}, support = {EKOHEGAZ (ELKARTEK KK-2021/00092)//Eusko Jaurlaritza/ ; GIU20/063//Euskal Herriko Unibertsitatea (UPV/EHU)/ ; CONAVANTER//Diputación Foral de Alava (DFA)/ ; }, mesh = {Communication ; Fuzzy Logic ; Reproducibility of Results ; *Robotic Surgical Procedures ; Wireless Technology ; }, abstract = {The nature of wireless propagation may reduce the QoS of the applications, such that some packages can be delayed or lost. For this reason, the design of wireless control applications must be faced in a holistic way to avoid degrading the performance of the control algorithms. This paper is aimed at improving the reliability of wireless control applications in the event of communication degradation or temporary loss at the wireless links. Two controller levels are used: sophisticated algorithms providing better performance are executed in a central node, whereas local independent controllers, implemented as back-up controllers, are executed next to the process in case of QoS degradation. This work presents a reliable strategy for switching between central and local controllers avoiding that plants may become uncontrolled. For validation purposes, the presented approach was used to control a planar robot. A Fuzzy Logic control algorithm was implemented as a main controller at a high performance computing platform. A back-up controller was implemented on an edge device. This approach avoids the robot becoming uncontrolled in case of communication failure. Although a planar robot was chosen in this work, the presented approach may be extended to other processes. XBee 900 MHz communication technology was selected for control tasks, leaving the 2.4 GHz band for integration with cloud services. Several experiments are presented to analyze the behavior of the control application under different circumstances. The results proved that our approach allows the use of wireless communications, even in critical control applications.}, } @article {pmid34770308, year = {2021}, author = {Simić, M and Sladić, G and Zarić, M and Markoski, B}, title = {Infrastructure as Software in Micro Clouds at the Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770308}, issn = {1424-8220}, abstract = {Edge computing offers cloud services closer to data sources and end-users, making the foundation for novel applications. The infrastructure deployment is taking off, bringing new challenges: how to use geo-distribution properly, or harness the advantages of having resources at a specific location? New real-time applications require multi-tier infrastructure, preferably doing data preprocessing locally, but using the cloud for heavy workloads. We present a model, able to organize geo-distributed nodes into micro clouds dynamically, allowing resource reorganization to best serve population needs. Such elasticity is achieved by relying on cloud organization principles, adapted for a different environment. The desired state is specified descriptively, and the system handles the rest. As such, infrastructure is abstracted to the software level, thus enabling "infrastructure as software" at the edge. We argue about blending the proposed model into existing tools, allowing cloud providers to offer future micro clouds as a service.}, } @article {pmid34770286, year = {2021}, author = {Kil, BH and Park, JS and Ryu, MH and Park, CY and Kim, YS and Kim, JD}, title = {Cloud-Based Software Architecture for Fully Automated Point-of-Care Molecular Diagnostic Device.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770286}, issn = {1424-8220}, support = {HW20C2053//Korea Health Industry Development Institute/Republic of Korea ; }, mesh = {*Cloud Computing ; Computers ; Pathology, Molecular ; *Point-of-Care Systems ; Software ; }, abstract = {This paper proposes a cloud-based software architecture for fully automated point-of-care molecular diagnostic devices. The target system operates a cartridge consisting of an extraction body for DNA extraction and a PCR chip for amplification and fluorescence detection. To facilitate control and monitoring via the cloud, a socket server was employed for fundamental molecular diagnostic functions such as DNA extraction, amplification, and fluorescence detection. The user interface for experimental control and monitoring was constructed with the RESTful application programming interface, allowing access from the terminal device, edge, and cloud. Furthermore, it can also be accessed through any web-based user interface on smart computing devices such as smart phones or tablets. An emulator with the proposed software architecture was fabricated to validate successful operation.}, } @article {pmid34770256, year = {2021}, author = {Aleisa, MA and Abuhussein, A and Alsubaei, FS and Sheldon, FT}, title = {Examining the Performance of Fog-Aided, Cloud-Centered IoT in a Real-World Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {21}, pages = {}, pmid = {34770256}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {The fog layer provides substantial benefits in cloud-based IoT applications because it can serve as an aggregation layer and it moves the computation resources nearer to the IoT devices; however, it is important to ensure adequate performance is achieved in such applications, as the devices usually communicate frequently and authenticate with the cloud. This can cause performance and availability issues, which can be dangerous in critical applications such as in the healthcare sector. In this paper, we analyze the efficacy of the fog layer in different architectures in a real-world environment by examining performance metrics for the cloud and fog layers using different numbers of IoT devices. We also implement the fog layer using two methods to determine whether different fog implementation frameworks can affect the performance. The results show that including a fog layer with semi-heavyweight computation capability results in higher capital costs, although the in the long run resources, time, and money are saved. This study can serve as a reference for fundamental fog computing concepts. It can also be used to walk practitioners through different implementation frameworks of fog-aided IoT and to show tradeoffs in order to inform when to use each implementation framework based on one's objectives.}, } @article {pmid34766274, year = {2022}, author = {Lacar, B}, title = {Generation of Centered Log-Ratio Normalized Antibody-Derived Tag Counts from Large Single-Cell Sequencing Datasets.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2386}, number = {}, pages = {203-217}, pmid = {34766274}, issn = {1940-6029}, mesh = {Antibodies ; Gene Expression Profiling ; High-Throughput Nucleotide Sequencing ; Sequence Analysis, RNA ; *Single-Cell Analysis ; }, abstract = {Recent developments in single-cell analysis has provided the ability to assay >50 surface-level proteins by combining oligo-conjugated antibodies with sequencing technology. These methods, such as CITE-seq and REAP-seq, have added another modality to single-cell analysis, enhancing insight across many biological subdisciplines. While packages like Seurat have greatly facilitated analysis of single-cell protein expression, the practical steps to carry out the analysis with increasingly larger datasets have been fragmented. In addition, using data visualizations, I will highlight some details about the centered log-ratio (CLR) normalization of antibody-derived tag (ADT) counts that may be overlooked. In this method chapter, I provide detailed steps to generate CLR-normalized CITE-seq data using cloud computing from a large CITE-seq dataset.}, } @article {pmid34760334, year = {2021}, author = {Bhawsar, PMS and Abubakar, M and Schmidt, MK and Camp, NJ and Cessna, MH and Duggan, MA and García-Closas, M and Almeida, JS}, title = {Browser-based Data Annotation, Active Learning, and Real-Time Distribution of Artificial Intelligence Models: From Tumor Tissue Microarrays to COVID-19 Radiology.}, journal = {Journal of pathology informatics}, volume = {12}, number = {}, pages = {38}, pmid = {34760334}, issn = {2229-5089}, abstract = {BACKGROUND: Artificial intelligence (AI) is fast becoming the tool of choice for scalable and reliable analysis of medical images. However, constraints in sharing medical data outside the institutional or geographical space, as well as difficulties in getting AI models and modeling platforms to work across different environments, have led to a "reproducibility crisis" in digital medicine.

METHODS: This study details the implementation of a web platform that can be used to mitigate these challenges by orchestrating a digital pathology AI pipeline, from raw data to model inference, entirely on the local machine. We discuss how this federated platform provides governed access to data by consuming the Application Program Interfaces exposed by cloud storage services, allows the addition of user-defined annotations, facilitates active learning for training models iteratively, and provides model inference computed directly in the web browser at practically zero cost. The latter is of particular relevance to clinical workflows because the code, including the AI model, travels to the user's data, which stays private to the governance domain where it was acquired.

RESULTS: We demonstrate that the web browser can be a means of democratizing AI and advancing data socialization in medical imaging backed by consumer-facing cloud infrastructure such as Box.com. As a case study, we test the accompanying platform end-to-end on a large dataset of digital breast cancer tissue microarray core images. We also showcase how it can be applied in contexts separate from digital pathology by applying it to a radiology dataset containing COVID-19 computed tomography images.

CONCLUSIONS: The platform described in this report resolves the challenges to the findable, accessible, interoperable, reusable stewardship of data and AI models by integrating with cloud storage to maintain user-centric governance over the data. It also enables distributed, federated computation for AI inference over those data and proves the viability of client-side AI in medical imaging.

AVAILABILITY: The open-source application is publicly available at , with a short video demonstration at .}, } @article {pmid34750391, year = {2021}, author = {Chatenoux, B and Richard, JP and Small, D and Roeoesli, C and Wingate, V and Poussin, C and Rodila, D and Peduzzi, P and Steinmeier, C and Ginzler, C and Psomas, A and Schaepman, ME and Giuliani, G}, title = {The Swiss data cube, analysis ready data archive using earth observations of Switzerland.}, journal = {Scientific data}, volume = {8}, number = {1}, pages = {295}, pmid = {34750391}, issn = {2052-4463}, abstract = {Since the opening of Earth Observation (EO) archives (USGS/NASA Landsat and EC/ESA Sentinels), large collections of EO data are freely available, offering scientists new possibilities to better understand and quantify environmental changes. Fully exploiting these satellite EO data will require new approaches for their acquisition, management, distribution, and analysis. Given rapid environmental changes and the emergence of big data, innovative solutions are needed to support policy frameworks and related actions toward sustainable development. Here we present the Swiss Data Cube (SDC), unleashing the information power of Big Earth Data for monitoring the environment, providing Analysis Ready Data over the geographic extent of Switzerland since 1984, which is updated on a daily basis. Based on a cloud-computing platform allowing to access, visualize and analyse optical (Sentinel-2; Landsat 5, 7, 8) and radar (Sentinel-1) imagery, the SDC minimizes the time and knowledge required for environmental analyses, by offering consistent calibrated and spatially co-registered satellite observations. SDC derived analysis ready data supports generation of environmental information, allowing to inform a variety of environmental policies with unprecedented timeliness and quality.}, } @article {pmid34749633, year = {2021}, author = {Tangaro, MA and Mandreoli, P and Chiara, M and Donvito, G and Antonacci, M and Parisi, A and Bianco, A and Romano, A and Bianchi, DM and Cangelosi, D and Uva, P and Molineris, I and Nosi, V and Calogero, RA and Alessandri, L and Pedrini, E and Mordenti, M and Bonetti, E and Sangiorgi, L and Pesole, G and Zambelli, F}, title = {Laniakea@ReCaS: exploring the potential of customisable Galaxy on-demand instances as a cloud-based service.}, journal = {BMC bioinformatics}, volume = {22}, number = {Suppl 15}, pages = {544}, pmid = {34749633}, issn = {1471-2105}, support = {653549//Horizon 2020 Framework Programme/ ; 857650//Horizon 2020 Framework Programme/ ; 824087//Horizon 2020 Framework Programme/ ; }, mesh = {*COVID-19 ; *Cloud Computing ; Computational Biology ; Humans ; SARS-CoV-2 ; Software ; }, abstract = {BACKGROUND: Improving the availability and usability of data and analytical tools is a critical precondition for further advancing modern biological and biomedical research. For instance, one of the many ramifications of the COVID-19 global pandemic has been to make even more evident the importance of having bioinformatics tools and data readily actionable by researchers through convenient access points and supported by adequate IT infrastructures. One of the most successful efforts in improving the availability and usability of bioinformatics tools and data is represented by the Galaxy workflow manager and its thriving community. In 2020 we introduced Laniakea, a software platform conceived to streamline the configuration and deployment of "on-demand" Galaxy instances over the cloud. By facilitating the set-up and configuration of Galaxy web servers, Laniakea provides researchers with a powerful and highly customisable platform for executing complex bioinformatics analyses. The system can be accessed through a dedicated and user-friendly web interface that allows the Galaxy web server's initial configuration and deployment.

RESULTS: "Laniakea@ReCaS", the first instance of a Laniakea-based service, is managed by ELIXIR-IT and was officially launched in February 2020, after about one year of development and testing that involved several users. Researchers can request access to Laniakea@ReCaS through an open-ended call for use-cases. Ten project proposals have been accepted since then, totalling 18 Galaxy on-demand virtual servers that employ ~ 100 CPUs, ~ 250 GB of RAM and ~ 5 TB of storage and serve several different communities and purposes. Herein, we present eight use cases demonstrating the versatility of the platform.

CONCLUSIONS: During this first year of activity, the Laniakea-based service emerged as a flexible platform that facilitated the rapid development of bioinformatics tools, the efficient delivery of training activities, and the provision of public bioinformatics services in different settings, including food safety and clinical research. Laniakea@ReCaS provides a proof of concept of how enabling access to appropriate, reliable IT resources and ready-to-use bioinformatics tools can considerably streamline researchers' work.}, } @article {pmid34746087, year = {2021}, author = {Shah, A and Ahirrao, S and Pandya, S and Kotecha, K and Rathod, S}, title = {Smart Cardiac Framework for an Early Detection of Cardiac Arrest Condition and Risk.}, journal = {Frontiers in public health}, volume = {9}, number = {}, pages = {762303}, pmid = {34746087}, issn = {2296-2565}, mesh = {Bayes Theorem ; *Heart Arrest/diagnosis ; Humans ; *Machine Learning ; Neural Networks, Computer ; Support Vector Machine ; }, abstract = {Cardiovascular disease (CVD) is considered to be one of the most epidemic diseases in the world today. Predicting CVDs, such as cardiac arrest, is a difficult task in the area of healthcare. The healthcare industry has a vast collection of datasets for analysis and prediction purposes. Somehow, the predictions made on these publicly available datasets may be erroneous. To make the prediction accurate, real-time data need to be collected. This study collected real-time data using sensors and stored it on a cloud computing platform, such as Google Firebase. The acquired data is then classified using six machine-learning algorithms: Artificial Neural Network (ANN), Random Forest Classifier (RFC), Gradient Boost Extreme Gradient Boosting (XGBoost) classifier, Support Vector Machine (SVM), Naïve Bayes (NB), and Decision Tree (DT). Furthermore, we have presented two novel gender-based risk classification and age-wise risk classification approach in the undertaken study. The presented approaches have used Kaplan-Meier and Cox regression survival analysis methodologies for risk detection and classification. The presented approaches also assist health experts in identifying the risk probability risk and the 10-year risk score prediction. The proposed system is an economical alternative to the existing system due to its low cost. The outcome obtained shows an enhanced level of performance with an overall accuracy of 98% using DT on our collected dataset for cardiac risk prediction. We also introduced two risk classification models for gender- and age-wise people to detect their survival probability. The outcome of the proposed model shows accurate probability in both classes.}, } @article {pmid34745491, year = {2021}, author = {Fu, X and Wang, Y and Belkacem, AN and Zhang, Q and Xie, C and Cao, Y and Cheng, H and Chen, S}, title = {Integrating Optimized Multiscale Entropy Model with Machine Learning for the Localization of Epileptogenic Hemisphere in Temporal Lobe Epilepsy Using Resting-State fMRI.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {1834123}, pmid = {34745491}, issn = {2040-2309}, mesh = {Brain ; Entropy ; *Epilepsy, Temporal Lobe/diagnostic imaging ; Functional Laterality ; Humans ; Machine Learning ; Magnetic Resonance Imaging ; }, abstract = {The bottleneck associated with the validation of the parameters of the entropy model has limited the application of this model to modern functional imaging technologies such as the resting-state functional magnetic resonance imaging (rfMRI). In this study, an optimization algorithm that could choose the parameters of the multiscale entropy (MSE) model was developed, while the optimized effectiveness for localizing the epileptogenic hemisphere was validated through the classification rate with a supervised machine learning method. The rfMRI data of 20 mesial temporal lobe epilepsy patients with positive indicators (the indicators of epileptogenic hemisphere in clinic) in the hippocampal formation on either left or right hemisphere (equally divided into two groups) on the structural MRI were collected and preprocessed. Then, three parameters in the MSE model were statistically optimized by both receiver operating characteristic (ROC) curve and the area under the ROC curve value in the sensitivity analysis, and the intergroup significance of optimized entropy values was utilized to confirm the biomarked brain areas sensitive to the epileptogenic hemisphere. Finally, the optimized entropy values of these biomarked brain areas were regarded as the feature vectors input for a support vector machine to classify the epileptogenic hemisphere, and the classification effectiveness was cross-validated. Nine biomarked brain areas were confirmed by the optimized entropy values, including medial superior frontal gyrus and superior parietal gyrus (p < .01). The mean classification accuracy was greater than 90%. It can be concluded that combination of the optimized MSE model with the machine learning model can accurately confirm the epileptogenic hemisphere by rfMRI. With the powerful information interaction capabilities of 5G communication, the epilepsy side-fixing algorithm that requires computing power can be integrated into a cloud platform. The demand side only needs to upload patient data to the service platform to realize the preoperative assessment of epilepsy.}, } @article {pmid34744856, year = {2021}, author = {Smart, PR}, title = {Shedding Light on the Extended Mind: HoloLens, Holograms, and Internet-Extended Knowledge.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {675184}, pmid = {34744856}, issn = {1664-1078}, abstract = {The application of extended mind theory to the Internet and Web yields the possibility of Internet-extended knowledge-a form of extended knowledge that arises as a result of an individual's interactions with the online environment. The present paper seeks to advance our understanding of Internet-extended knowledge by describing the functionality of a real-world application, called the HoloArt app. In part, the goal of the paper is illustrative: it is intended to show how recent advances in mixed reality, cloud-computing, and machine intelligence might be combined so as to yield a putative case of Internet-extended knowledge. Beyond this, however, the paper is intended to support the philosophical effort to understand the notions of extended knowledge and the extended mind. In particular, the HoloArt app raises questions about the universality of some of the criteria that have been used to evaluate putative cases of cognitive extension. The upshot is a better appreciation of the way in which claims about extended knowledge and the extended mind might be affected by a consideration of technologically-advanced resources.}, } @article {pmid34740667, year = {2022}, author = {Munawar, HS and Mojtahedi, M and Hammad, AWA and Kouzani, A and Mahmud, MAP}, title = {Disruptive technologies as a solution for disaster risk management: A review.}, journal = {The Science of the total environment}, volume = {806}, number = {Pt 3}, pages = {151351}, doi = {10.1016/j.scitotenv.2021.151351}, pmid = {34740667}, issn = {1879-1026}, mesh = {Artificial Intelligence ; Big Data ; Data Science ; *Disasters ; *Disruptive Technology ; }, abstract = {Integrating disruptive technologies within smart cities improves the infrastructure needed to potentially deal with disasters. This paper provides a perspective review of disruptive technologies such as the Internet of Things (IoT), image processing, artificial intelligence (AI), big data and smartphone applications which are in use and have been proposed for future improvements in disaster management of urban regions. The key focus of this paper is exploring ways in which smart cities could be established to harness the potential of disruptive technologies and improve post-disaster management. The key questions explored are a) what are the gaps or barriers to the utilization of disruptive technologies in the area of disaster management and b) How can the existing methods of disaster management be improved through the application of disruptive technologies. To respond to these questions, a novel framework based on integrated approaches based on big data analytics and AI is proposed for developing disaster management solutions using disruptive technologies.}, } @article {pmid34737350, year = {2021}, author = {Alammari, A and Moiz, SA and Negi, A}, title = {Enhanced layered fog architecture for IoT sensing and actuation as a service.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {21693}, pmid = {34737350}, issn = {2045-2322}, abstract = {The reduced service cost offered by Sensing and Actuation as a Service paradigm, particularly in Internet of Things (IoT) era, has encouraged many establishments to start without worrying about having their own infrastructure. Such a paradigm is typically managed by a centralized cloud service provider. Fog paradigm has emerged as a mini-cloud that if designed with care to assist the cloud, together will achieve better performance. This article introduces a layered fog architecture called Sensors and Actuator Layered Fog Services Delivery (SALFSD) for IoT ecosystems. The significance of SALFSD is being fault resistant; it dynamically reassigns tasks of the failed node to the nearest active node to maintain the network connection. Besides, SALFSD monitors end users pre-specified cases closer to the physical devices hired by end users to fasten generating the actuation commands. Such node may offload its monitoring responsibility to its parent node in case it is overloaded. SALFSD is evaluated using Yet Another Fog Simulator in different scenarios (numbers of users, sensors, actuators, and areas). A comparison was made for Sensing and Actuating as a Service (SAaaS) with/without layered fog, and layered fog with/without (failure reassignment, pre-specified cases in fog nodes, and offloading). The comparison was conducted in terms of computing/communication latencies and the number of missed messages for both observations and actuation commands. Results show that failure reassignment prevented losing messages and maintained network connectivity. Also, wisely selecting the monitoring fog node per end user pre-specified cases and the offloading scheme decreased actuation latency.}, } @article {pmid34736170, year = {2021}, author = {Alassafi, MO}, title = {Success indicators for an efficient utilization of cloud computing in healthcare organizations: Saudi healthcare as case study.}, journal = {Computer methods and programs in biomedicine}, volume = {212}, number = {}, pages = {106466}, doi = {10.1016/j.cmpb.2021.106466}, pmid = {34736170}, issn = {1872-7565}, mesh = {*Cloud Computing ; *Computer Security ; Delivery of Health Care ; Reproducibility of Results ; Saudi Arabia ; }, abstract = {The population in Saudi Arabia is expected to reach 40 million by 2025. Consequently, healthcare information will become critical to manage. Despite the fact that adopting cloud computing in the Saudi healthcare organizations can facilitate cost reduction, capacity building, institutional interoperability, and get access to data analytics, the adoption rate is very low. Hence, a new model is proposed to adopt cloud computing in the Saudi healthcare organization. The novelty of this work comes from using a quantitative method to test users' attitudes, data security, data control, data privacy, compliance, and reliability influence on the cloud computing adoption intention in the context of Saudi Arabian healthcare organizations. Partial Least Squares (PLS) method based Structural Equation Modeling (SEM) was used for model development. About 160 respondents from the relevant health organizations participated. The result shows that the attitude towards using technology, data security, compliance, and reliability of the cloud computing services are important determining factors in the adoption of cloud computing in Saudi healthcare organizations. However, the distinction in the findings regarding Data privacy and Data control in the Saudi healthcare organizational context is a clear manifestation of the fact that there is a need for policy formation for data privacy, data control, and data protection legislation in Saudi Arabia. Therefore, raising awareness regarding the practice of data privacy and data control policies among IT managers is essential. Future study should use a more holistic and industry-specific framework such as the technology-organization-environment (TOE) framework to find new influencing factors from the domains of technological context, the organizational context, and the environmental context.}, } @article {pmid34731183, year = {2021}, author = {Ran, H}, title = {Construction and optimization of inventory management system via cloud-edge collaborative computing in supply chain environment in the Internet of Things era.}, journal = {PloS one}, volume = {16}, number = {11}, pages = {e0259284}, pmid = {34731183}, issn = {1932-6203}, mesh = {Algorithms ; Cloud Computing ; Commerce/*methods ; Equipment and Supplies ; Internet of Things ; }, abstract = {The present work aims to strengthen the core competitiveness of industrial enterprises in the supply chain environment, and enhance the efficiency of inventory management and the utilization rate of inventory resources. First, an analysis is performed on the supply and demand relationship between suppliers and manufacturers in the supply chain environment and the production mode of intelligent plant based on cloud manufacturing. It is found that the efficient management of spare parts inventory can effectively reduce costs and improve service levels. On this basis, different prediction methods are proposed for different data types of spare parts demand, which are all verified. Finally, the inventory management system based on cloud-edge collaborative computing is constructed, and the genetic algorithm is selected as a comparison to validate the performance of the system reported here. The experimental results indicate that prediction method based on weighted summation of eigenvalues and fitting proposed here has the smallest error and the best fitting effect in the demand prediction of machine spare parts, and the minimum error after fitting is only 2.2%. Besides, the spare parts demand prediction method can well complete the prediction in the face of three different types of time series of spare parts demand data, and the relative error of prediction is maintained at about 10%. This prediction system can meet the basic requirements of spare parts demand prediction and achieve higher prediction accuracy than the periodic prediction method. Moreover, the inventory management system based on cloud-edge collaborative computing has shorter processing time, higher efficiency, better stability, and better overall performance than genetic algorithm. The research results provide reference and ideas for the application of edge computing in inventory management, which have certain reference significance and application value.}, } @article {pmid34729386, year = {2021}, author = {Simonetti, D and Pimple, U and Langner, A and Marelli, A}, title = {Pan-tropical Sentinel-2 cloud-free annual composite datasets.}, journal = {Data in brief}, volume = {39}, number = {}, pages = {107488}, pmid = {34729386}, issn = {2352-3409}, abstract = {Sentinel-2 MSI is one of the core missions of the Copernicus Earth Observation programme of the European Union. This mission shows great potential to map the regional high-resolution spatio-temporal dynamics of land use and land cover. In tropical regions, despite the high revisiting time of 5 days including both Sentinel-2A and 2B satellites, the frequent presence of clouds, cloud-shadows, haze and other atmospheric contaminants are precluding the visibility of the Earth surface up to several months. In this paper we present four annual pan-tropical cloud-free composites computed and exported from Google Earth Engine (GEE) by making use of available Sentinel-2 L1C collection for the period spanning from 2015 to 2020. We furthermore propose empirical approaches to reduce the BRDF effect over tropical forest areas by showing pros and cons of image-based versus swath-based methodologies. Additionally, we provide a dedicated web-platform offering a fast and intuitive way to browse and explore the proposed annual composites as well as layers of potential annual changes as a ready-to-use means to visually identify and verify degradation and deforestation activities as well as other land cover changes.}, } @article {pmid34729056, year = {2021}, author = {Chen, SW and Gu, XW and Wang, JJ and Zhu, HS}, title = {AIoT Used for COVID-19 Pandemic Prevention and Control.}, journal = {Contrast media & molecular imaging}, volume = {2021}, number = {}, pages = {3257035}, pmid = {34729056}, issn = {1555-4317}, mesh = {*Artificial Intelligence ; COVID-19/*prevention & control/virology ; Delivery of Health Care/*standards ; Humans ; Internet of Things/*statistics & numerical data ; *Machine Learning ; SARS-CoV-2/*isolation & purification ; }, abstract = {The pandemic of COVID-19 is continuing to wreak havoc in 2021, with at least 170 million victims around the world. Healthcare systems are overwhelmed by the large-scale virus infection. Luckily, Internet of Things (IoT) is one of the most effective paradigms in the intelligent world, in which the technology of artificial intelligence (AI), like cloud computing and big data analysis, is playing a vital role in preventing the spread of the pandemic of COVID-19. AI and 5G technologies are advancing by leaps and bounds, further strengthening the intelligence and connectivity of IoT applications, and conventional IoT has been gradually upgraded to be more powerful AI + IoT (AIoT). For example, in terms of remote screening and diagnosis of COVID-19 patients, AI technology based on machine learning and deep learning has recently upgraded medical equipment significantly and has reshaped the workflow with minimal contact with patients, so medical specialists can make clinical decisions more efficiently, providing the best protection not only to patients but also to specialists themselves. This paper reviews the latest progress made in combating COVID-19 with both IoT and AI and also provides comprehensive details on how to combat the pandemic of COVID-19 as well as the technologies that may be applied in the future.}, } @article {pmid34723173, year = {2021}, author = {Zhang, W and Wang, Y and Ji, X and Wu, Y and Zhao, R}, title = {ROA: A Rapid Learning Scheme for In-Situ Memristor Networks.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {692065}, pmid = {34723173}, issn = {2624-8212}, abstract = {Memristors show great promise in neuromorphic computing owing to their high-density integration, fast computing and low-energy consumption. However, the non-ideal update of synaptic weight in memristor devices, including nonlinearity, asymmetry and device variation, still poses challenges to the in-situ learning of memristors, thereby limiting their broad applications. Although the existing offline learning schemes can avoid this problem by transferring the weight optimization process into cloud, it is difficult to adapt to unseen tasks and uncertain environments. Here, we propose a bi-level meta-learning scheme that can alleviate the non-ideal update problem, and achieve fast adaptation and high accuracy, named Rapid One-step Adaption (ROA). By introducing a special regularization constraint and a dynamic learning rate strategy for in-situ learning, the ROA method effectively combines offline pre-training and online rapid one-step adaption. Furthermore, we implemented it on memristor-based neural networks to solve few-shot learning tasks, proving its superiority over the pure offline and online schemes under noisy conditions. This method can solve in-situ learning in non-ideal memristor networks, providing potential applications of on-chip neuromorphic learning and edge computing.}, } @article {pmid34715889, year = {2021}, author = {Amselem, S and Gueguen, S and Weinbach, J and Clement, A and Landais, P and , }, title = {RaDiCo, the French national research program on rare disease cohorts.}, journal = {Orphanet journal of rare diseases}, volume = {16}, number = {1}, pages = {454}, pmid = {34715889}, issn = {1750-1172}, mesh = {Europe ; France/epidemiology ; Humans ; *Rare Diseases/epidemiology/genetics ; }, abstract = {BACKGROUND: Rare diseases (RDs) affect nearly 3 million people in France and at least 26-30 million people in Europe. These diseases, which represent a major medical concern, are mainly of genetic origin, often chronic, progressive, degenerative, life threatening and disabling, accounting for more than one third of all deaths occurring during infancy. In this context, there are needs for coordinated information on RDs at national/international levels, based on high quality, interoperable and sharable data. The main objective of the RaDiCo (Rare Disease Cohorts) program, coordinated by Inserm, was the development of RD e-cohorts via a national platform. The cohort projects were selected through a national call in 2014. The e-cohorts are supported by an interoperable platform, equivalent to an infrastructure, constructed on the "cloud computing" principle and in compliance with the European General Data Protection Regulation. It is dedicated to allow a continuous monitoring of data quality and consistency, in line with the French Health Data Hub.

RESULTS: Depending on cohorts, the objectives are to describe the natural history of the studied RD(s), identify the underlying disease genes, establish phenotype-genotype correlations, decipher their pathophysiology, assess their societal and medico-economic impact, and/or identify patients eligible for new therapeutic approaches. Inclusion of prevalent and incident cases started at the end of 2016. As of April 2021, 5558 patients have been included within 13 RD e-cohorts covering 67 diseases integrated in 10 European Reference Networks and contributing to the European Joint Program on RDs. Several original results have been obtained in relation with the secondary objectives of the RaDiCo cohorts. They deal with discovery of new disease genes, assessment of treatment management, deciphering the underlying pathophysiological mechanisms, diagnostic approaches, genotype-phenotype relationships, development and validation of questionnaires relative to disease burden, or methodological aspects.

CONCLUSION: RaDiCo currently hosts 13 RD e-cohorts on a sharable and interoperable platform constructed on the "cloud computing" principle. New RD e-cohorts at the European and international levels are targeted.}, } @article {pmid34713107, year = {2021}, author = {Spanakis, EG and Sfakianakis, S and Bonomi, S and Ciccotelli, C and Magalini, S and Sakkalis, V}, title = {Emerging and Established Trends to Support Secure Health Information Exchange.}, journal = {Frontiers in digital health}, volume = {3}, number = {}, pages = {636082}, pmid = {34713107}, issn = {2673-253X}, abstract = {This work aims to provide information, guidelines, established practices and standards, and an extensive evaluation on new and promising technologies for the implementation of a secure information sharing platform for health-related data. We focus strictly on the technical aspects and specifically on the sharing of health information, studying innovative techniques for secure information sharing within the health-care domain, and we describe our solution and evaluate the use of blockchain methodologically for integrating within our implementation. To do so, we analyze health information sharing within the concept of the PANACEA project that facilitates the design, implementation, and deployment of a relevant platform. The research presented in this paper provides evidence and argumentation toward advanced and novel implementation strategies for a state-of-the-art information sharing environment; a description of high-level requirements for the transfer of data between different health-care organizations or cross-border; technologies to support the secure interconnectivity and trust between information technology (IT) systems participating in a sharing-data "community"; standards, guidelines, and interoperability specifications for implementing a common understanding and integration in the sharing of clinical information; and the use of cloud computing and prospectively more advanced technologies such as blockchain. The technologies described and the possible implementation approaches are presented in the design of an innovative secure information sharing platform in the health-care domain.}, } @article {pmid34708196, year = {2021}, author = {Li, L and Thompson, C and Henselman-Petrusek, G and Giusti, C and Ziegelmeier, L}, title = {Minimal Cycle Representatives in Persistent Homology Using Linear Programming: An Empirical Study With User's Guide.}, journal = {Frontiers in artificial intelligence}, volume = {4}, number = {}, pages = {681117}, pmid = {34708196}, issn = {2624-8212}, abstract = {Cycle representatives of persistent homology classes can be used to provide descriptions of topological features in data. However, the non-uniqueness of these representatives creates ambiguity and can lead to many different interpretations of the same set of classes. One approach to solving this problem is to optimize the choice of representative against some measure that is meaningful in the context of the data. In this work, we provide a study of the effectiveness and computational cost of several ℓ 1 minimization optimization procedures for constructing homological cycle bases for persistent homology with rational coefficients in dimension one, including uniform-weighted and length-weighted edge-loss algorithms as well as uniform-weighted and area-weighted triangle-loss algorithms. We conduct these optimizations via standard linear programming methods, applying general-purpose solvers to optimize over column bases of simplicial boundary matrices. Our key findings are: 1) optimization is effective in reducing the size of cycle representatives, though the extent of the reduction varies according to the dimension and distribution of the underlying data, 2) the computational cost of optimizing a basis of cycle representatives exceeds the cost of computing such a basis, in most data sets we consider, 3) the choice of linear solvers matters a lot to the computation time of optimizing cycles, 4) the computation time of solving an integer program is not significantly longer than the computation time of solving a linear program for most of the cycle representatives, using the Gurobi linear solver, 5) strikingly, whether requiring integer solutions or not, we almost always obtain a solution with the same cost and almost all solutions found have entries in { - 1,0,1 } and therefore, are also solutions to a restricted ℓ 0 optimization problem, and 6) we obtain qualitatively different results for generators in Erdős-Rényi random clique complexes than in real-world and synthetic point cloud data.}, } @article {pmid34702704, year = {2021}, author = {Zheng, GY and Zeng, T and Li, YX}, title = {Application and prospect of cutting-edge information technology in biomedical big data.}, journal = {Yi chuan = Hereditas}, volume = {43}, number = {10}, pages = {924-929}, doi = {10.16288/j.yczz21-192}, pmid = {34702704}, issn = {0253-9772}, mesh = {Artificial Intelligence ; Big Data ; *Biomedical Research ; Cloud Computing ; *Information Technology ; }, abstract = {In recent years, with the development of various high-throughput omics based biological technologies (BT), biomedical research began to enter the era of big data. In the face of high-dimensional, multi-domain and multi-modal biomedical big data, scientific research requires a new paradigm of data intensive scientific research. The vigorous development of cutting-edge information technologies (IT) such as cloud computing, blockchain and artificial intelligence provides technical means for the practice of this new research paradigm. Here,we describe the application of such cutting-edge information technologies in biomedical big data, and propose a forward-looking prospect for the construction of a new paradigm supporting environment for data intensive scientific research. We expect to establish a new research scheme and new scientific research paradigm integrating BT & IT technology, which can finally promote the great leap forward development of biomedical research.}, } @article {pmid34696135, year = {2021}, author = {Mutlag, AA and Ghani, MKA and Mohammed, MA and Lakhan, A and Mohd, O and Abdulkareem, KH and Garcia-Zapirain, B}, title = {Multi-Agent Systems in Fog-Cloud Computing for Critical Healthcare Task Management Model (CHTM) Used for ECG Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696135}, issn = {1424-8220}, mesh = {*Cloud Computing ; Computer Simulation ; Delivery of Health Care ; *Electrocardiography ; Models, Theoretical ; }, abstract = {In the last decade, the developments in healthcare technologies have been increasing progressively in practice. Healthcare applications such as ECG monitoring, heartbeat analysis, and blood pressure control connect with external servers in a manner called cloud computing. The emerging cloud paradigm offers different models, such as fog computing and edge computing, to enhance the performances of healthcare applications with minimum end-to-end delay in the network. However, many research challenges exist in the fog-cloud enabled network for healthcare applications. Therefore, in this paper, a Critical Healthcare Task Management (CHTM) model is proposed and implemented using an ECG dataset. We design a resource scheduling model among fog nodes at the fog level. A multi-agent system is proposed to provide the complete management of the network from the edge to the cloud. The proposed model overcomes the limitations of providing interoperability, resource sharing, scheduling, and dynamic task allocation to manage critical tasks significantly. The simulation results show that our model, in comparison with the cloud, significantly reduces the network usage by 79%, the response time by 90%, the network delay by 65%, the energy consumption by 81%, and the instance cost by 80%.}, } @article {pmid34696070, year = {2021}, author = {Andreazi, GT and Estrella, JC and Bruschi, SM and Immich, R and Guidoni, D and Alves Pereira Júnior, L and Meneguette, RI}, title = {MoHRiPA-An Architecture for Hybrid Resources Management of Private Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696070}, issn = {1424-8220}, support = {2021/02//ITA's Programa de Pós-graduação em Aplicações Operacionais (ITA/PPGAO)/ ; #2020/07162-0//Fundação de Amparo à Pesquisa do Estado de São Paulo/ ; }, mesh = {Algorithms ; *Cloud Computing ; *Ecosystem ; Workload ; }, abstract = {The high demand for data processing in web applications has grown in recent years due to the increased computing infrastructure supply as a service in a cloud computing ecosystem. This ecosystem offers benefits such as broad network access, elasticity, and resource sharing, among others. However, properly exploiting these benefits requires optimized provisioning of computational resources in the target infrastructure. Several studies in the literature improve the quality of this management, which involves enhancing the scalability of the infrastructure, either through cost management policies or strategies aimed at resource scaling. However, few studies adequately explore performance evaluation mechanisms. In this context, we present the MoHRiPA-Management of Hybrid Resources in Private cloud Architecture. MoHRiPA has a modular design encompassing scheduling algorithms, virtualization tools, and monitoring tools. The proposed architecture solution allows assessing the overall system's performance by using complete factorial planning to identify the general behavior of architecture under high demand of requests. It also evaluates workload behavior, the number of virtualized resources, and provides an elastic resource manager. A composite metric is also proposed and adopted as a criterion for resource scaling. This work presents a performance evaluation by using formal techniques, which analyses the scheduling algorithms of architecture and the experiment bottlenecks analysis, average response time, and latency. In summary, the proposed MoHRiPA mapping resources algorithm (HashRefresh) showed significant improvement results than the analyzed competitor, decreasing about 7% percent in the uniform average compared to ListSheduling (LS).}, } @article {pmid34696034, year = {2021}, author = {Song, M and Sang, Y}, title = {Secure Outsourcing of Matrix Determinant Computation under the Malicious Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696034}, issn = {1424-8220}, support = {201904010209//the Science and Technology Program of Guangzhou, China/ ; 2017A010101039//the Science and Technology Program of Guangdong Province, China/ ; }, abstract = {Computing the determinant of large matrix is a time-consuming task, which is appearing more and more widely in science and engineering problems in the era of big data. Fortunately, cloud computing can provide large storage and computation resources, and thus, act as an ideal platform to complete computation outsourced from resource-constrained devices. However, cloud computing also causes security issues. For example, the curious cloud may spy on user privacy through outsourced data. The malicious cloud violating computing scripts, as well as cloud hardware failure, will lead to incorrect results. Therefore, we propose a secure outsourcing algorithm to compute the determinant of large matrix under the malicious cloud mode in this paper. The algorithm protects the privacy of the original matrix by applying row/column permutation and other transformations to the matrix. To resist malicious cheating on the computation tasks, a new verification method is utilized in our algorithm. Unlike previous algorithms that require multiple rounds of verification, our verification requires only one round without trading off the cheating detectability, which greatly reduces the local computation burden. Both theoretical and experimental analysis demonstrate that our algorithm achieves a better efficiency on local users than previous ones on various dimensions of matrices, without sacrificing the security requirements in terms of privacy protection and cheating detectability.}, } @article {pmid34696006, year = {2021}, author = {Ilgner, P and Cika, P and Stusek, M}, title = {SCADA-Based Message Generator for Multi-Vendor Smart Grids: Distributed Integration and Verification of TASE.2.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34696006}, issn = {1424-8220}, support = {TN01000077//Technology Agency of the Czech Republic/ ; }, mesh = {Communication ; *Computer Systems ; *Information Storage and Retrieval ; Reproducibility of Results ; }, abstract = {Recent developments in massive machine-type communication (mMTC) scenarios have given rise to never-seen requirements, which triggered the Industry 4.0 revolution. The new scenarios bring even more pressure to comply with the reliability and communication security and enable flawless functionality of the critical infrastructure, e.g., smart grid infrastructure. We discuss typical network grid architecture, communication strategies, and methods for building scalable and high-speed data processing and storage platform. This paper focuses on the data transmissions using the sets of standards IEC 60870-6 (ICCP/TASE.2). The main goal is to introduce the TASE.2 traffic generator and the data collection back-end with the implemented load balancing functionality to understand the limits of current protocols used in the smart grids. To this end, the assessment framework enabling generating and collecting TASE.2 communication with long-term data storage providing high availability and load balancing capabilities was developed. The designed proof-of-concept supports complete cryptographic security and allows users to perform the complex testing and verification of the TASE.2 network nodes configuration. Implemented components were tested in a cloud-based Microsoft Azure environment in four geographically separated locations. The findings from the testing indicate the high performance and scalability of the proposed platform, allowing the proposed generator to be also used for high-speed load testing purposes. The load-balancing performance shows the CPU usage of the load-balancer below 15% while processing 5000 messages per second. This makes it possible to achieve up to a 7-fold improvement of performance resulting in processing up to 35,000 messages per second.}, } @article {pmid34695973, year = {2021}, author = {Buckley, T and Ghosh, B and Pakrashi, V}, title = {Edge Structural Health Monitoring (E-SHM) Using Low-Power Wireless Sensing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {20}, pages = {}, pmid = {34695973}, issn = {1424-8220}, support = {12/RC/2302_2//Science Foundation Ireland/Ireland ; EAPA_826/2018//EU Interreg/ ; PBA/BIO/18/02//ERA-NET COFUND/ ; }, mesh = {*Acceleration ; *Electric Power Supplies ; Monitoring, Physiologic ; }, abstract = {Effective Structural Health Monitoring (SHM) often requires continuous monitoring to capture changes of features of interest in structures, which are often located far from power sources. A key challenge lies in continuous low-power data transmission from sensors. Despite significant developments in long-range, low-power telecommunication (e.g., LoRa NB-IoT), there are inadequate demonstrative benchmarks for low-power SHM. Damage detection is often based on monitoring features computed from acceleration signals where data are extensive due to the frequency of sampling (~100-500 Hz). Low-power, long-range telecommunications are restricted in both the size and frequency of data packets. However, microcontrollers are becoming more efficient, enabling local computing of damage-sensitive features. This paper demonstrates the implementation of an Edge-SHM framework through low-power, long-range, wireless, low-cost and off-the-shelf components. A bespoke setup is developed with a low-power MEM accelerometer and a microcontroller where frequency and time domain features are computed over set time intervals before sending them to a cloud platform. A cantilever beam excited by an electrodynamic shaker is monitored, where damage is introduced through the controlled loosening of bolts at the fixed boundary, thereby introducing rotation at its fixed end. The results demonstrate how an IoT-driven edge platform can benefit continuous monitoring.}, } @article {pmid34693068, year = {2020}, author = {Gorgulla, C and Fackeldey, K and Wagner, G and Arthanari, H}, title = {Accounting of Receptor Flexibility in Ultra-Large Virtual Screens with VirtualFlow Using a Grey Wolf Optimization Method.}, journal = {Supercomputing frontiers and innovations}, volume = {7}, number = {3}, pages = {4-12}, pmid = {34693068}, issn = {2313-8734}, support = {R01 AI037581/AI/NIAID NIH HHS/United States ; R01 AI150709/AI/NIAID NIH HHS/United States ; R01 CA200913/CA/NCI NIH HHS/United States ; R01 GM129026/GM/NIGMS NIH HHS/United States ; }, abstract = {Structure-based virtual screening approaches have the ability to dramatically reduce the time and costs associated to the discovery of new drug candidates. Studies have shown that the true hit rate of virtual screenings improves with the scale of the screened ligand libraries. Therefore, we have recently developed an open source drug discovery platform (VirtualFlow), which is able to routinely carry out ultra-large virtual screenings. One of the primary challenges of molecular docking is the circumstance when the protein is highly dynamic or when the structure of the protein cannot be captured by a static pose. To accommodate protein dynamics, we report the extension of VirtualFlow to allow the docking of ligands using a grey wolf optimization algorithm using the docking program GWOVina, which substantially improves the quality and efficiency of flexible receptor docking compared to AutoDock Vina. We demonstrate the linear scaling behavior of VirtualFlow utilizing GWOVina up to 128 000 CPUs. The newly supported docking method will be valuable for drug discovery projects in which protein dynamics and flexibility play a significant role.}, } @article {pmid34690611, year = {2021}, author = {Nour, B and Mastorakis, S and Mtibaa, A}, title = {Whispering: Joint Service Offloading and Computation Reuse in Cloud-Edge Networks.}, journal = {IEEE International Conference on Communications : [proceedings]. IEEE International Conference on Communications}, volume = {2021}, number = {}, pages = {}, pmid = {34690611}, issn = {1938-1883}, support = {P20 GM109090/GM/NIGMS NIH HHS/United States ; }, abstract = {Due to the proliferation of Internet of Things (IoT) and application/user demands that challenge communication and computation, edge computing has emerged as the paradigm to bring computing resources closer to users. In this paper, we present Whispering, an analytical model for the migration of services (service offloading) from the cloud to the edge, in order to minimize the completion time of computational tasks offloaded by user devices and improve the utilization of resources. We also empirically investigate the impact of reusing the results of previously executed tasks for the execution of newly received tasks (computation reuse) and propose an adaptive task offloading scheme between edge and cloud. Our evaluation results show that Whispering achieves up to 35% and 97% (when coupled with computation reuse) lower task completion times than cases where tasks are executed exclusively at the edge or the cloud.}, } @article {pmid34690529, year = {2022}, author = {Sheikh Sofla, M and Haghi Kashani, M and Mahdipour, E and Faghih Mirzaee, R}, title = {Towards effective offloading mechanisms in fog computing.}, journal = {Multimedia tools and applications}, volume = {81}, number = {2}, pages = {1997-2042}, pmid = {34690529}, issn = {1380-7501}, abstract = {Fog computing is considered a formidable next-generation complement to cloud computing. Nowadays, in light of the dramatic rise in the number of IoT devices, several problems have been raised in cloud architectures. By introducing fog computing as a mediate layer between the user devices and the cloud, one can extend cloud computing's processing and storage capability. Offloading can be utilized as a mechanism that transfers computations, data, and energy consumption from the resource-limited user devices to resource-rich fog/cloud layers to achieve an optimal experience in the quality of applications and improve the system performance. This paper provides a systematic and comprehensive study to evaluate fog offloading mechanisms' current and recent works. Each selected paper's pros and cons are explored and analyzed to state and address the present potentialities and issues of offloading mechanisms in a fog environment efficiently. We classify offloading mechanisms in a fog system into four groups, including computation-based, energy-based, storage-based, and hybrid approaches. Furthermore, this paper explores offloading metrics, applied algorithms, and evaluation methods related to the chosen offloading mechanisms in fog systems. Additionally, the open challenges and future trends derived from the reviewed studies are discussed.}, } @article {pmid34686040, year = {2021}, author = {Li, XG and Blaiszik, B and Schwarting, ME and Jacobs, R and Scourtas, A and Schmidt, KJ and Voyles, PM and Morgan, D}, title = {Graph network based deep learning of bandgaps.}, journal = {The Journal of chemical physics}, volume = {155}, number = {15}, pages = {154702}, doi = {10.1063/5.0066009}, pmid = {34686040}, issn = {1089-7690}, abstract = {Recent machine learning models for bandgap prediction that explicitly encode the structure information to the model feature set significantly improve the model accuracy compared to both traditional machine learning and non-graph-based deep learning methods. The ongoing rapid growth of open-access bandgap databases can benefit such model construction not only by expanding their domain of applicability but also by requiring constant updating of the model. Here, we build a new state-of-the-art multi-fidelity graph network model for bandgap prediction of crystalline compounds from a large bandgap database of experimental and density functional theory (DFT) computed bandgaps with over 806 600 entries (1500 experimental, 775 700 low-fidelity DFT, and 29 400 high-fidelity DFT). The model predicts bandgaps with a 0.23 eV mean absolute error in cross validation for high-fidelity data, and including the mixed data from all different fidelities improves the prediction of the high-fidelity data. The prediction error is smaller for high-symmetry crystals than for low symmetry crystals. Our data are published through a new cloud-based computing environment, called the "Foundry," which supports easy creation and revision of standardized data structures and will enable cloud accessible containerized models, allowing for continuous model development and data accumulation in the future.}, } @article {pmid34681121, year = {2021}, author = {Lim, HG and Hsiao, SH and Lee, YG}, title = {Orchestrating an Optimized Next-Generation Sequencing-Based Cloud Workflow for Robust Viral Identification during Pandemics.}, journal = {Biology}, volume = {10}, number = {10}, pages = {}, pmid = {34681121}, issn = {2079-7737}, support = {MOST 109-2221-E-038-016//Ministry of Science and Technology, Taiwan/ ; HHSN261201400008C//National Institutes of Health/ ; }, abstract = {Coronavirus disease 2019 (COVID-19), caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), has recently become a novel pandemic event following the swine flu that occurred in 2009, which was caused by the influenza A virus (H1N1 subtype). The accurate identification of the huge number of samples during a pandemic still remains a challenge. In this study, we integrate two technologies, next-generation sequencing and cloud computing, into an optimized workflow version that uses a specific identification algorithm on the designated cloud platform. We use 182 samples (92 for COVID-19 and 90 for swine flu) with short-read sequencing data from two open-access datasets to represent each pandemic and evaluate our workflow performance based on an index specifically created for SARS-CoV-2 or H1N1. Results show that our workflow could differentiate cases between the two pandemics with a higher accuracy depending on the index used, especially when the index that exclusively represented each dataset was used. Our workflow substantially outperforms the original complete identification workflow available on the same platform in terms of time and cost by preserving essential tools internally. Our workflow can serve as a powerful tool for the robust identification of cases and, thus, aid in controlling the current and future pandemics.}, } @article {pmid34680183, year = {2021}, author = {Prakash, A and Taylor, L and Varkey, M and Hoxie, N and Mohammed, Y and Goo, YA and Peterman, S and Moghekar, A and Yuan, Y and Glaros, T and Steele, JR and Faridi, P and Parihari, S and Srivastava, S and Otto, JJ and Nyalwidhe, JO and Semmes, OJ and Moran, MF and Madugundu, A and Mun, DG and Pandey, A and Mahoney, KE and Shabanowitz, J and Saxena, S and Orsburn, BC}, title = {Reinspection of a Clinical Proteomics Tumor Analysis Consortium (CPTAC) Dataset with Cloud Computing Reveals Abundant Post-Translational Modifications and Protein Sequence Variants.}, journal = {Cancers}, volume = {13}, number = {20}, pages = {}, pmid = {34680183}, issn = {2072-6694}, support = {P30 CA015083/CA/NCI NIH HHS/United States ; }, abstract = {The Clinical Proteomic Tumor Analysis Consortium (CPTAC) has provided some of the most in-depth analyses of the phenotypes of human tumors ever constructed. Today, the majority of proteomic data analysis is still performed using software housed on desktop computers which limits the number of sequence variants and post-translational modifications that can be considered. The original CPTAC studies limited the search for PTMs to only samples that were chemically enriched for those modified peptides. Similarly, the only sequence variants considered were those with strong evidence at the exon or transcript level. In this multi-institutional collaborative reanalysis, we utilized unbiased protein databases containing millions of human sequence variants in conjunction with hundreds of common post-translational modifications. Using these tools, we identified tens of thousands of high-confidence PTMs and sequence variants. We identified 4132 phosphorylated peptides in nonenriched samples, 93% of which were confirmed in the samples which were chemically enriched for phosphopeptides. In addition, our results also cover 90% of the high-confidence variants reported by the original proteogenomics study, without the need for sample specific next-generation sequencing. Finally, we report fivefold more somatic and germline variants that have an independent evidence at the peptide level, including mutations in ERRB2 and BCAS1. In this reanalysis of CPTAC proteomic data with cloud computing, we present an openly available and searchable web resource of the highest-coverage proteomic profiling of human tumors described to date.}, } @article {pmid34677328, year = {2021}, author = {Mamdiwar, SD and R, A and Shakruwala, Z and Chadha, U and Srinivasan, K and Chang, CY}, title = {Recent Advances on IoT-Assisted Wearable Sensor Systems for Healthcare Monitoring.}, journal = {Biosensors}, volume = {11}, number = {10}, pages = {}, pmid = {34677328}, issn = {2079-6374}, support = {MOST109-2221-E-224-048- MY2//Ministry of Science and Technology, Taiwan/ ; Higher Education Sprout Project//Ministry of Education/ ; }, mesh = {Delivery of Health Care ; Humans ; *Wearable Electronic Devices ; }, abstract = {IoT has played an essential role in many industries over the last few decades. Recent advancements in the healthcare industry have made it possible to make healthcare accessible to more people and improve their overall health. The next step in healthcare is to integrate it with IoT-assisted wearable sensor systems seamlessly. This review rigorously discusses the various IoT architectures, different methods of data processing, transfer, and computing paradigms. It compiles various communication technologies and the devices commonly used in IoT-assisted wearable sensor systems and deals with its various applications in healthcare and their advantages to the world. A comparative analysis of all the wearable technology in healthcare is also discussed with tabulation of various research and technology. This review also analyses all the problems commonly faced in IoT-assisted wearable sensor systems and the specific issues that need to be tackled to optimize these systems in healthcare and describes the various future implementations that can be made to the architecture and the technology to improve the healthcare industry.}, } @article {pmid34660507, year = {2021}, author = {Senthilkumar, S and Brindha, K and Kryvinska, N and Bhattacharya, S and Reddy Bojja, G}, title = {SCB-HC-ECC-Based Privacy Safeguard Protocol for Secure Cloud Storage of Smart Card-Based Health Care System.}, journal = {Frontiers in public health}, volume = {9}, number = {}, pages = {688399}, pmid = {34660507}, issn = {2296-2565}, mesh = {Cloud Computing ; Computer Security ; Confidentiality ; Delivery of Health Care ; *Health Smart Cards ; Humans ; Privacy ; }, abstract = {The advent of the internet has brought an era of unprecedented connectivity between networked devices, making one distributed computing, called cloud computing, and popular. This has also resulted in a dire need for remote authentication schemes for transferring files of a sensitive nature, especially health-related information between patients, smart health cards, and cloud servers via smart health card solution providers. In this article, we elaborate on our proposed approach for such a system and accomplish an informal analysis to demonstrate the claim that this scheme provides sufficient security while maintaining usability.}, } @article {pmid34656885, year = {2021}, author = {Landman, T and Nissim, N}, title = {Deep-Hook: A trusted deep learning-based framework for unknown malware detection and classification in Linux cloud environments.}, journal = {Neural networks : the official journal of the International Neural Network Society}, volume = {144}, number = {}, pages = {648-685}, doi = {10.1016/j.neunet.2021.09.019}, pmid = {34656885}, issn = {1879-2782}, mesh = {Cloud Computing ; *Deep Learning ; Neural Networks, Computer ; Software ; }, abstract = {Since the beginning of the 21st century, the use of cloud computing has increased rapidly, and it currently plays a significant role among most organizations' information technology (IT) infrastructure. Virtualization technologies, particularly virtual machines (VMs), are widely used and lie at the core of cloud computing. While different operating systems can run on top of VM instances, in public cloud environments the Linux operating system is used 90% of the time. Because of their prevalence, organizational Linux-based virtual servers have become an attractive target for cyber-attacks, mainly launched by sophisticated malware designed at causing harm, sabotaging operations, obtaining data, or gaining financial profit. This has resulted in the need for an advanced and reliable unknown malware detection mechanism for Linux cloud-based environments. Antivirus software and today's even more advanced malware detection solutions have limitations in detecting new, unseen, and evasive malware. Moreover, many existing solutions are considered untrusted, as they operate on the inspected machine and can be interfered with, and can even be detected by the malware itself, allowing malware to evade detection and cause damage. In this paper, we propose Deep-Hook, a trusted framework for unknown malware detection in Linux-based cloud environments. Deep-Hook hooks the VM's volatile memory in a trusted manner and acquires the memory dump to discover malware footprints while the VM operates. The memory dumps are transformed into visual images which are analyzed using a convolutional neural network (CNN) based classifier. The proposed framework has some key advantages, such as its agility, its ability to eliminate the need for features defined by a cyber domain expert, and most importantly, its ability to analyze the entire memory dump and thus to better utilize the existing indication it conceals, thus allowing the induction of a more accurate detection model. Deep-Hook was evaluated on widely used Linux virtual servers; four state-of-the-art CNN architectures; eight image resolutions; and a total of 22,400 volatile memory dumps representing the execution of a broad set of benign and malicious Linux applications. Our experimental evaluation results demonstrate Deep-Hook's ability to effectively, efficiently, and accurately detect and classify unknown malware (even evasive malware like rootkits), with an AUC and accuracy of up to 99.9%.}, } @article {pmid34641012, year = {2021}, author = {Hannan, A and Shafiq, MZ and Hussain, F and Pires, IM}, title = {A Portable Smart Fitness Suite for Real-Time Exercise Monitoring and Posture Correction.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34641012}, issn = {1424-8220}, support = {UIDB/50008/2020//Fundação para a Ciência e a Tecnologia/ ; }, mesh = {*COVID-19 ; Exercise ; Humans ; *Pandemics ; Posture ; SARS-CoV-2 ; }, abstract = {Fitness and sport have drawn significant attention in wearable and persuasive computing. Physical activities are worthwhile for health, well-being, improved fitness levels, lower mental pressure and tension levels. Nonetheless, during high-power and commanding workouts, there is a high likelihood that physical fitness is seriously influenced. Jarring motions and improper posture during workouts can lead to temporary or permanent disability. With the advent of technological advances, activity acknowledgment dependent on wearable sensors has pulled in countless studies. Still, a fully portable smart fitness suite is not industrialized, which is the central need of today's time, especially in the Covid-19 pandemic. Considering the effectiveness of this issue, we proposed a fully portable smart fitness suite for the household to carry on their routine exercises without any physical gym trainer and gym environment. The proposed system considers two exercises, i.e., T-bar and bicep curl with the assistance of the virtual real-time android application, acting as a gym trainer overall. The proposed fitness suite is embedded with a gyroscope and EMG sensory modules for performing the above two exercises. It provided alerts on unhealthy, wrong posture movements over an android app and is guided to the best possible posture based on sensor values. The KNN classification model is used for prediction and guidance for the user while performing a particular exercise with the help of an android application-based virtual gym trainer through a text-to-speech module. The proposed system attained 89% accuracy, which is quite effective with portability and a virtually assisted gym trainer feature.}, } @article {pmid34640861, year = {2021}, author = {Detti, A and Nakazato, H and Martínez Navarro, JA and Tropea, G and Funari, L and Petrucci, L and Sánchez Segado, JA and Kanai, K}, title = {VirIoT: A Cloud of Things That Offers IoT Infrastructures as a Service.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34640861}, issn = {1424-8220}, support = {814918//H2020-EUJ-2018/ ; }, abstract = {Many cloud providers offer IoT services that simplify the collection and processing of IoT information. However, the IoT infrastructure composed of sensors and actuators that produces this information remains outside the cloud; therefore, application developers must install, connect and manage the cloud. This requirement can be a market barrier, especially for small/medium software companies that cannot afford the infrastructural costs associated with it and would only prefer to focus on IoT application developments. Motivated by the wish to eliminate this barrier, this paper proposes a Cloud of Things platform, called VirIoT, which fully brings the Infrastructure as a service model typical of cloud computing to the world of Internet of Things. VirIoT provides users with virtual IoT infrastructures (Virtual Silos) composed of virtual things, with which users can interact through dedicated and standardized broker servers in which the technology can be chosen among those offered by the platform, such as oneM2M, NGSI and NGSI-LD. VirIoT allows developers to focus their efforts exclusively on IoT applications without worrying about infrastructure management and allows cloud providers to expand their IoT services portfolio. VirIoT uses external things and cloud/edge computing resources to deliver the IoT virtualization services. Its open-source architecture is microservice-based and runs on top of a distributed Kubernetes platform with nodes in central and edge data centers. The architecture is scalable, efficient and able to support the continuous integration of heterogeneous things and IoT standards, taking care of interoperability issues. Using a VirIoT deployment spanning data centers in Europe and Japan, we conducted a performance evaluation with a two-fold objective: showing the efficiency and scalability of the architecture; and leveraging VirIoT's ability to integrate different IoT standards in order to make a fair comparison of some open-source IoT Broker implementations, namely Mobius for oneM2M, Orion for NGSIv2, Orion-LD and Scorpio for NGSI-LD.}, } @article {pmid34640825, year = {2021}, author = {Kashmar, N and Adda, M and Ibrahim, H}, title = {HEAD Metamodel: Hierarchical, Extensible, Advanced, and Dynamic Access Control Metamodel for Dynamic and Heterogeneous Structures.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34640825}, issn = {1424-8220}, abstract = {The substantial advancements in information technologies have brought unprecedented concepts and challenges to provide solutions and integrate advanced and self-ruling systems in critical and heterogeneous structures. The new generation of networking environments (e.g., the Internet of Things (IoT), cloud computing, etc.) are dynamic and ever-evolving environments. They are composed of various private and public networks, where all resources are distributed and accessed from everywhere. Protecting resources by controlling access to them is a complicated task, especially with the presence of cybercriminals and cyberattacks. What makes this reality also challenging is the diversity and the heterogeneity of access control (AC) models, which are implemented and integrated with a countless number of information systems. The evolution of ubiquitous computing, especially the concept of Industry 4.0 and IoT applications, imposes the need to enhance AC methods since the traditional methods are not able to answer the increasing demand for privacy and security standards. To address this issue, we propose a Hierarchical, Extensible, Advanced, and Dynamic (HEAD) AC metamodel for dynamic and heterogeneous structures that is able to encompass the heterogeneity of the existing AC models. Various AC models can be derived, and different static and dynamic AC policies can be generated using its components. We use Eclipse (xtext) to define the grammar of our AC metamodel. We illustrate our approach with several successful instantiations for various models and hybrid models. Additionally, we provide some examples to show how some of the derived models can be implemented to generate AC policies.}, } @article {pmid34640820, year = {2021}, author = {Li, S and Hu, X and Du, Y}, title = {Deep Reinforcement Learning for Computation Offloading and Resource Allocation in Unmanned-Aerial-Vehicle Assisted Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {19}, pages = {}, pmid = {34640820}, issn = {1424-8220}, support = {11461038//National Natural Science Foundation of China/ ; 2020A-033//Innovation Foundation of Colleges and Universities in Gansu Province/ ; }, abstract = {Computation offloading technology extends cloud computing to the edge of the access network close to users, bringing many benefits to terminal devices with limited battery and computational resources. Nevertheless, the existing computation offloading approaches are challenging to apply to specific scenarios, such as the dense distribution of end-users and the sparse distribution of network infrastructure. The technological revolution in the unmanned aerial vehicle (UAV) and chip industry has granted UAVs more computing resources and promoted the emergence of UAV-assisted mobile edge computing (MEC) technology, which could be applied to those scenarios. However, in the MEC system with multiple users and multiple servers, making reasonable offloading decisions and allocating system resources is still a severe challenge. This paper studies the offloading decision and resource allocation problem in the UAV-assisted MEC environment with multiple users and servers. To ensure the quality of service for end-users, we set the weighted total cost of delay, energy consumption, and the size of discarded tasks as our optimization objective. We further formulate the joint optimization problem as a Markov decision process and apply the soft actor-critic (SAC) deep reinforcement learning algorithm to optimize the offloading policy. Numerical simulation results show that the offloading policy optimized by our proposed SAC-based dynamic computing offloading (SACDCO) algorithm effectively reduces the delay, energy consumption, and size of discarded tasks for the UAV-assisted MEC system. Compared with the fixed local-UAV scheme in the specific simulation setting, our proposed approach reduces system delay and energy consumption by approximately 50% and 200%, respectively.}, } @article {pmid34631002, year = {2021}, author = {Jiang, N and Wang, L and Xu, X}, title = {Research on Smart Healthcare Services: Based on the Design of APP Health Service Platform.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9922389}, pmid = {34631002}, issn = {2040-2309}, mesh = {Big Data ; *Cloud Computing ; Delivery of Health Care ; *Health Services ; Humans ; }, abstract = {With the development of information technology, big data, and cloud computing, the concept of smart healthcare has gradually become more and more important. Compared with the traditional healthcare service, the new model, health service platform, is becoming increasingly popular and convenient. The use of wearable monitoring devices and some APPs is improving the health monitoring efficacy and effectiveness. To improve and facilitate the smart healthcare services, an effective and convenient app health service platform is needed urgently to serve the older and younger. Based on the above, this paper elaborates the principles of health service system and health information perception terminal design of the APP health service platform.}, } @article {pmid34621367, year = {2021}, author = {Mehrtak, M and SeyedAlinaghi, S and MohsseniPour, M and Noori, T and Karimi, A and Shamsabadi, A and Heydari, M and Barzegary, A and Mirzapour, P and Soleymanzadeh, M and Vahedi, F and Mehraeen, E and Dadras, O}, title = {Security challenges and solutions using healthcare cloud computing.}, journal = {Journal of medicine and life}, volume = {14}, number = {4}, pages = {448-461}, pmid = {34621367}, issn = {1844-3117}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Delivery of Health Care ; Humans ; Software ; }, abstract = {Cloud computing is among the most beneficial solutions to digital problems. Security is one of the focal issues in cloud computing technology, and this study aims at investigating security issues of cloud computing and their probable solutions. A systematic review was performed using Scopus, Pubmed, Science Direct, and Web of Science databases. Once the title and abstract were evaluated, the quality of studies was assessed in order to choose the most relevant according to exclusion and inclusion criteria. Then, the full texts of studies selected were read thoroughly to extract the necessary results. According to the review, data security, availability, and integrity, as well as information confidentiality and network security, were the major challenges in cloud security. Further, data encryption, authentication, and classification, besides application programming interfaces (API), were security solutions to cloud infrastructure. Data encryption could be applied to store and retrieve data from the cloud in order to provide secure communication. Besides, several central challenges, which make the cloud security engineering process problematic, have been considered in this study.}, } @article {pmid34616887, year = {2021}, author = {B M Mansour, M and Abdelkader, T and Hashem, M and El-Horbaty, EM}, title = {An integrated three-tier trust management framework in mobile edge computing using fuzzy logic.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e700}, pmid = {34616887}, issn = {2376-5992}, abstract = {Mobile edge computing (MEC) is introduced as part of edge computing paradigm, that exploit cloud computing resources, at a nearer premises to service users. Cloud service users often search for cloud service providers to meet their computational demands. Due to the lack of previous experience between cloud service providers and users, users hold several doubts related to their data security and privacy, job completion and processing performance efficiency of service providers. This paper presents an integrated three-tier trust management framework that evaluates cloud service providers in three main domains: Tier I, which evaluates service provider compliance to the agreed upon service level agreement; Tier II, which computes the processing performance of a service provider based on its number of successful processes; and Tier III, which measures the violations committed by a service provider, per computational interval, during its processing in the MEC network. The three-tier evaluation is performed during Phase I computation. In Phase II, a service provider total trust value and status are gained through the integration of the three tiers using the developed overall trust fuzzy inference system (FIS). The simulation results of Phase I show the service provider trust value in terms of service level agreement compliance, processing performance and measurement of violations independently. This disseminates service provider's points of failure, which enables a service provider to enhance its future performance for the evaluated domains. The Phase II results show the overall trust value and status per service provider after integrating the three tiers using overall trust FIS. The proposed model is distinguished among other models by evaluating different parameters for a service provider.}, } @article {pmid34616535, year = {2021}, author = {Yu, Y}, title = {Cloud Computing into Respiratory Rehabilitation Training-Assisted Treatment of Patients with Pneumonia.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {5884174}, pmid = {34616535}, issn = {2040-2309}, mesh = {*Cloud Computing ; Humans ; *Pneumonia ; }, abstract = {In order to study the therapeutic effect of respiratory rehabilitation training on patients with pneumonia, this paper proposes an integrated adjuvant therapy program based on the cloud computing model. A total of 60 pneumonia patients admitted to Zhujiang Hospital of Southern Medical University from January to July 2020 were selected as the research objects and Southern Medical University pneumonia patients as the research object, to be evenly divided into two groups, each group of 30 people. The control group was treated with conventional anti-infection treatment, and the observation group was treated with supplementary respiratory rehabilitation training on the basis of conventional treatment. The therapeutic effects of the two groups were compared. The results showed that the absorption time of lung lesions was (9.17 ± 3.46) days in the observation group and (13.97 ± 3.07) days in the control group, and the difference between the two groups was statistically significant (t = 5.683, P < 0.001). Respiratory therapy based on the cloud computing model has the characteristics of integration and extensibility, which can be effectively applied to the treatment effect analysis of patients with pneumonia and is of great significance for the effective analysis of patients' blood gas indexes and lung function indexes.}, } @article {pmid34616533, year = {2021}, author = {Du, Z and Hu, X and Wu, J}, title = {Application of Cloud Computing in the Prediction of Exercise Improvement of Cardiovascular and Digestive Systems in Obese Patients.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {4695722}, pmid = {34616533}, issn = {2040-2309}, mesh = {Body Mass Index ; *Cloud Computing ; Exercise ; Humans ; *Obesity ; Overweight ; }, abstract = {Based on the cardiovascular and digestive problems of obese patients, this paper adopted the cloud computing method and selected 100 subjects with big data (23 normal weight subjects, 3740 overweight patients, and 40 obese patients) as the research objects, studying the heart configuration and their digestive system of obese people. Results show that BMI = L (24 ≥ BMI > 27.9) and BMI = XL (BMI > 27.9) were identified as target correlation projects in this experiment, associated with each cardiac structural parameters, respectively. Cloud computing facilitates early detection, early prevention, and early intervention in heart configuration changes in overweight and obese patients.}, } @article {pmid34614001, year = {2021}, author = {Zhang, R and Song, Y}, title = {Relationship between employees' career maturity and career planning of edge computing and cloud collaboration from the perspective of organizational behavior.}, journal = {PloS one}, volume = {16}, number = {10}, pages = {e0257582}, pmid = {34614001}, issn = {1932-6203}, mesh = {Algorithms ; *Career Mobility ; *Cloud Computing ; Computer Simulation ; Humans ; *Internet of Things ; }, abstract = {A new IoT (Internet of Things) analysis platform is designed based on edge computing and cloud collaboration from the perspective of organizational behavior, to fundamentally understand the relationship between enterprise career maturity and career planning, and meet the actual needs of enterprises. The performance of the proposed model is further determined according to the characteristic of the edge near data sources, with the help of factor analysis, and through the study and analysis of relevant enterprise data. The model is finally used to analyze the relationship between enterprise career maturity and career planning through simulation experiments. The research results prove that career maturity positively affects career planning, and vocational delay of gratification plays a mediating role in career maturity and career planning. Besides, the content of career choice in career maturity is influenced by mental acuity, result acuity and loyalty. The experimental results indicate that when the load at both ends of the edge and cloud exceeds 80%, the edge delay of the IoT analysis platform based on edge computing and cloud collaboration is 10s faster than that of other models. Meanwhile, the system slowdown is reduced by 36% while the stability is increased when the IoT analysis platform analyzes data. The results of the edge-cloud collaboration scheduling scheme are similar to all scheduling to the edge end, which saves 19% of the time compared with cloud computing to the cloud end. In Optical Character Recognition and Aeneas, compared with the single edge-cloud coordination mode, the model with the Nesterov Accelerated Gradient algorithm achieves the optimal performance. Specifically, the communication delay is reduced by about 25% on average, and the communication time decreased by 61% compared with cloud computing to the edge end. This work has significant reference value for analyzing the relationship between enterprise psychology, behavior, and career planning.}, } @article {pmid34608413, year = {2021}, author = {Chao, G and Gang, W}, title = {Sports Training Teaching Device Based on Big Data and Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {7339486}, pmid = {34608413}, issn = {2040-2309}, mesh = {*Big Data ; *Cloud Computing ; Humans ; Research Design ; }, abstract = {With the advent of the era of big data (BD), people have higher requirements for information, knowledge, and technology. Taking the Internet as the carrier, the use of cloud computing technology for distance education has become a trend. Our country's physical training teaching has also begun to change from traditional mode to modern mode. In order to improve the overall quality of our country's national sports, this paper studies the teaching device of sports training based on BD and cloud computing. This article mainly uses the questionnaire survey method, the experimental analysis method, the data analysis method, and the data statistics method to have an in-depth understanding of the research theme and uses swimming as an example to design the sports training device. 52% of people think that water in the ears and itching during swimming are more serious problems. After further understanding, an experimental design was carried out. Experimental studies have shown that the combination of BD and cloud computing can effectively solve the problems existing in the traditional teaching model, so as to achieve the goal of efficient and rapid development.}, } @article {pmid34604515, year = {2021}, author = {Khedr, AE and Idrees, AM and Salem, R}, title = {Enhancing the e-learning system based on a novel tasks' classification load-balancing algorithm.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e669}, pmid = {34604515}, issn = {2376-5992}, abstract = {In the educational field, the system performance, as well as the stakeholders' satisfaction, are considered a bottleneck in the e-learning system due to the high number of users who are represented in the educational system's stakeholders including instructors and students. On the other hand, successful resource utilization in cloud systems is one of the key factors for increasing system performance which is strongly related to the ability for the optimal load distribution. In this study, a novel load-balancing algorithm is proposed. The proposed algorithm aims to optimize the educational system's performance and, consequently, the users' satisfaction in the educational field represented by the students. The proposed enhancement in the e-learning system has been evaluated by two methods, first, a simulation experiment for confirming the applicability of the proposed algorithm. Then a real-case experiment has been applied to the e-learning system at Helwan University. The results revealed the advantages of the proposed algorithm over other well-known load balancing algorithms. A questionnaire was also developed to measure the users' satisfaction with the system's performance. A total of 3,670 thousand out of 5,000 students have responded, and the results have revealed a satisfaction percentage of 95.4% in the e-learning field represented by the students.}, } @article {pmid34603926, year = {2021}, author = {Loeza-Mejía, CI and Sánchez-DelaCruz, E and Pozos-Parra, P and Landero-Hernández, LA}, title = {The potential and challenges of Health 4.0 to face COVID-19 pandemic: a rapid review.}, journal = {Health and technology}, volume = {11}, number = {6}, pages = {1321-1330}, pmid = {34603926}, issn = {2190-7188}, abstract = {The COVID-19 pandemic has generated the need to evolve health services to reduce the risk of contagion and promote a collaborative environment even remotely. Advances in Industry 4.0, including the internet of things, mobile networks, cloud computing, and artificial intelligence make Health 4.0 possible to connect patients with healthcare professionals. Hence, the focus of this work is analyzing the potentiality, and challenges of state-of-the-art Health 4.0 applications to face the COVID-19 pandemic including augmented environments, diagnosis of the virus, forecasts, medical robotics, and remote clinical services. It is concluded that Health 4.0 can be applied in the prevention of contagion, improve diagnosis, promote virtual learning environments, and offer remote services. However, there are still ethical, technical, security, and legal challenges to be addressed. Additionally, more imaging datasets for COVID-19 detection need to be made available to the scientific community. Working in the areas of opportunity will help to address the new normal. Likewise, Health 4.0 can be applied not only in the COVID-19 pandemic, but also in future global viruses and natural disasters.}, } @article {pmid34597967, year = {2022}, author = {Nagel, GW and de Moraes Novo, EML and Martins, VS and Campos-Silva, JV and Barbosa, CCF and Bonnet, MP}, title = {Impacts of meander migration on the Amazon riverine communities using Landsat time series and cloud computing.}, journal = {The Science of the total environment}, volume = {806}, number = {Pt 2}, pages = {150449}, doi = {10.1016/j.scitotenv.2021.150449}, pmid = {34597967}, issn = {1879-1026}, mesh = {Animals ; *Cloud Computing ; *Ecosystem ; Models, Theoretical ; Rivers ; Time Factors ; }, abstract = {River meander migration is a process that maintains biodiverse riparian ecosystems by producing highly sinuous rivers, and oxbow lakes. However, although the floodplains support communities with fish and other practices in the region, meandering rivers can directly affect the life of local communities. For example, erosion of river banks promotes the loss of land on community shores, while sedimentation increases the distance from house to the river. Therefore, communities living along the Juruá River, one of the most sinuous rivers on Earth, are vulnerable to long-term meander migration. In this study, the river meander migration was detected by using Landsat 5-8 data from 1984 to 2020. A per-pixel Water Surface Change Detection Algorithm (WSCDA) was developed to classify regions subject to erosion and sedimentation processes by applying temporal regressions on the water index, called Modified Normalized Difference Water Index (mNDWI). The WSCDA classified the meander migration with omission and commission errors lower than 13.44% and 7.08%, respectively. Then, the number of riparian communities was mapped using high spatial resolution SPOT images. A total of 369 communities with no road access were identified, the majority of which living in stable regions (58.8%), followed by sedimentation (26.02%) and erosion (15.18%) areas. Furthermore, we identified that larger communities (>20 houses) tend to live in more stable locations (70%) compared to smaller communities (1-10 houses) with 55.6%. A theoretical model was proposed to illustrate the main impacts of meander migration on the communities, related to Inundation, Mobility Change, and Food Security. This is the first study exploring the relationship between meander migration and riverine communities at watershed-level, and the results support the identification of vulnerable communities to improve local planning and floodplain conservation.}, } @article {pmid34596963, year = {2021}, author = {Nikam, RD and Lee, J and Choi, W and Banerjee, W and Kwak, M and Yadav, M and Hwang, H}, title = {Ionic Sieving Through One-Atom-Thick 2D Material Enables Analog Nonvolatile Memory for Neuromorphic Computing.}, journal = {Small (Weinheim an der Bergstrasse, Germany)}, volume = {17}, number = {44}, pages = {e2103543}, doi = {10.1002/smll.202103543}, pmid = {34596963}, issn = {1613-6829}, mesh = {*Electronics ; Ions ; }, abstract = {The first report on ion transport through atomic sieves of atomically thin 2D material is provided to solve critical limitations of electrochemical random-access memory (ECRAM) devices. Conventional ECRAMs have random and localized ion migration paths; as a result, the analog switching efficiency is inadequate to perform in-memory logic operations. Herein ion transport path scaled down to the one-atom-thick (≈0.33 nm) hexagonal boron nitride (hBN), and the ionic transport area is confined to a small pore (≈0.3 nm[2]) at the single-hexagonal ring. One-atom-thick hBN has ion-permeable pores at the center of each hexagonal ring due to weakened electron cloud and highly polarized B-N bond. The experimental evidence indicates that the activation energy barrier for H[+] ion transport through single-layer hBN is ≈0.51 eV. Benefiting from the controlled ionic sieving through single-layer hBN, the ECRAMs exhibit superior nonvolatile analog switching with good memory retention and high endurance. The proposed approach enables atomically thin 2D material as an ion transport layer to regulate the switching of various ECRAM devices for artificial synaptic electronics.}, } @article {pmid34595915, year = {2021}, author = {Arantes, PR and Polêto, MD and Pedebos, C and Ligabue-Braun, R}, title = {Making it Rain: Cloud-Based Molecular Simulations for Everyone.}, journal = {Journal of chemical information and modeling}, volume = {61}, number = {10}, pages = {4852-4856}, doi = {10.1021/acs.jcim.1c00998}, pmid = {34595915}, issn = {1549-960X}, mesh = {*Cloud Computing ; *Molecular Dynamics Simulation ; }, abstract = {We present a user-friendly front-end for running molecular dynamics (MD) simulations using the OpenMM toolkit on the Google Colab framework. Our goals are (1) to highlight the usage of a cloud-computing scheme for educational purposes for a hands-on approach when learning MD simulations and (2) to exemplify how low-income research groups can perform MD simulations in the microsecond time scale. We hope this work facilitates teaching and learning of molecular simulation throughout the community.}, } @article {pmid34591938, year = {2021}, author = {Yang, X and Xi, W and Chen, A and Wang, C}, title = {An environmental monitoring data sharing scheme based on attribute encryption in cloud-fog computing.}, journal = {PloS one}, volume = {16}, number = {9}, pages = {e0258062}, pmid = {34591938}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Computer Security ; Environmental Monitoring/*methods ; Information Dissemination ; Information Technology ; }, abstract = {Environmental monitoring plays a vital role in environmental protection, especially for the management and conservation of natural resources. However, environmental monitoring data is usually difficult to resist malicious attacks because it is transmitted in an open and insecure channel. In our paper, a new data sharing scheme is proposed by using attribute-based encryption, identity-based signature and cloud computing technology to meet the requirements of confidentiality, integrity, verifiability, and unforgerability of environmental monitoring data. The monitoring equipment encrypts the monitored environmental data and uploads it to the environmental cloud server. Then, monitoring users can request access to the environmental cloud server. If the monitoring user meets the access policy, the plaintext is finally obtained through the fog node decryption. Our proposal mainly uses attribute-based encryption technology to realize the privacy protection and fine-grained access control of monitoring data. The integrity and unforgeability of the monitoring data are ensured by the digital signature. In addition, outsourcing computing technology saves the computing overhead of monitoring equipment and monitoring users. The security analysis illustrates that our proposal can achieve security purposes. Finally, the performance of our proposal and related schemes is evaluated from the aspects of communication overhead and computing overhead. The results indicate that our proposal is secure and efficient in environmental monitoring.}, } @article {pmid34591883, year = {2021}, author = {Shi, W}, title = {Analyzing enterprise asset structure and profitability using cloud computing and strategic management accounting.}, journal = {PloS one}, volume = {16}, number = {9}, pages = {e0257826}, pmid = {34591883}, issn = {1932-6203}, mesh = {Accounting/*methods ; Cloud Computing ; Humans ; Industry/economics ; Investments/*economics ; }, abstract = {The study expects to further exploring the role of asset structure in enterprise profitability, and analyze the relationship between them in detail. Taking the express industry as the research object, from strategic management accounting, the study uses edge computing and related analysis tools and compares the financial and non-financial indicators of existing express enterprises. The study also discusses the differences between asset structure allocation and sustainable profitability, and constructs the corresponding analysis framework. The results reveal that SF's total assets are obviously large and the profit margin increases. While the total assets of other express enterprises are small, and the express revenue drops sharply. Heavy assets can improve the enterprises' profitability to a certain extent. SF has a good asset management ability. With the support of the capital market, SF's net asset growth ability has been greatly improved. The edge computing method used has higher local data processing ability, and the analysis framework has higher performance than the big data processing method. The study can provide some research ideas and practical value for the asset structure analysis and profitability evaluation of express enterprises.}, } @article {pmid34580553, year = {2022}, author = {Ullah, A and Nawi, NM and Ouhame, S}, title = {Recent advancement in VM task allocation system for cloud computing: review from 2015 to2021.}, journal = {Artificial intelligence review}, volume = {55}, number = {3}, pages = {2529-2573}, pmid = {34580553}, issn = {0269-2821}, abstract = {Cloud computing is new technology that has considerably changed human life at different aspect over the last decade. Especially after the COVID-19 pandemic, almost all life activity shifted into cloud base. Cloud computing is a utility where different hardware and software resources are accessed on pay per user ground base. Most of these resources are available in virtualized form and virtual machine (VM) is one of the main elements of visualization.VM used in data center for distribution of resource and application according to benefactor demand. Cloud data center faces different issue in respect of performance and efficiency for improvement of these issues different approaches are used. Virtual machine play important role for improvement of data center performance therefore different approach are used for improvement of virtual machine efficiency (i-e) load balancing of resource and task. For the improvement of this section different parameter of VM improve like makespan, quality of service, energy, data accuracy and network utilization. Improvement of different parameter in VM directly improve the performance of cloud computing. Therefore, we conducting this review paper that we can discuss about various improvements that took place in VM from 2015 to 20,201. This review paper also contain information about various parameter of cloud computing and final section of paper present the role of machine learning algorithm in VM as well load balancing approach along with the future direction of VM in cloud data center.}, } @article {pmid34577655, year = {2021}, author = {Sun, M and Bao, T and Xie, D and Lv, H and Si, G}, title = {Towards Application-Driven Task Offloading in Edge Computing Based on Deep Reinforcement Learning.}, journal = {Micromachines}, volume = {12}, number = {9}, pages = {}, pmid = {34577655}, issn = {2072-666X}, abstract = {Edge computing is a new paradigm, which provides storage, computing, and network resources between the traditional cloud data center and terminal devices. In this paper, we concentrate on the application-driven task offloading problem in edge computing by considering the strong dependencies of sub-tasks for multiple users. Our objective is to joint optimize the total delay and energy generated by applications, while guaranteeing the quality of services of users. First, we formulate the problem for the application-driven tasks in edge computing by jointly considering the delays and the energy consumption. Based on that, we propose a novel Application-driven Task Offloading Strategy (ATOS) based on deep reinforcement learning by adding a preliminary sorting mechanism to realize the joint optimization. Specifically, we analyze the characteristics of application-driven tasks and propose a heuristic algorithm by introducing a new factor to determine the processing order of parallelism sub-tasks. Finally, extensive experiments validate the effectiveness and reliability of the proposed algorithm. To be specific, compared with the baseline strategies, the total cost reduction by ATOS can be up to 64.5% on average.}, } @article {pmid34577465, year = {2021}, author = {Molnár, S and Kelényi, B and Tamas, L}, title = {Feature Pyramid Network Based Efficient Normal Estimation and Filtering for Time-of-Flight Depth Cameras.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577465}, issn = {1424-8220}, support = {PN-III-P2-2.1-PTE-2019-0367//Unitatea Executiva pentru Finantarea Invatamantului Superior, a Cercetarii, Dezvoltarii si Inovarii/ ; }, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {In this paper, an efficient normal estimation and filtering method for depth images acquired by Time-of-Flight (ToF) cameras is proposed. The method is based on a common feature pyramid networks (FPN) architecture. The normal estimation method is called ToFNest, and the filtering method ToFClean. Both of these low-level 3D point cloud processing methods start from the 2D depth images, projecting the measured data into the 3D space and computing a task-specific loss function. Despite the simplicity, the methods prove to be efficient in terms of robustness and runtime. In order to validate the methods, extensive evaluations on public and custom datasets were performed. Compared with the state-of-the-art methods, the ToFNest and ToFClean algorithms are faster by an order of magnitude without losing precision on public datasets.}, } @article {pmid34577460, year = {2021}, author = {Nguyen, TA and Fe, I and Brito, C and Kaliappan, VK and Choi, E and Min, D and Lee, JW and Silva, FA}, title = {Performability Evaluation of Load Balancing and Fail-over Strategies for Medical Information Systems with Edge/Fog Computing Using Stochastic Reward Nets.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577460}, issn = {1424-8220}, support = {309335/2017-5//Brazilian National Council for Scientific and Technological Development - CNPq/ ; 2020R1A6A1A03046811//Basic Science Research Program through the National Research Foundation of Korea(NRF) funded by the Ministry of Education/ ; }, mesh = {*Cloud Computing ; Computer Simulation ; *Computer Systems ; Humans ; Information Systems ; Reward ; }, abstract = {The aggressive waves of ongoing world-wide virus pandemics urge us to conduct further studies on the performability of local computing infrastructures at hospitals/medical centers to provide a high level of assurance and trustworthiness of medical services and treatment to patients, and to help diminish the burden and chaos of medical management and operations. Previous studies contributed tremendous progress on the dependability quantification of existing computing paradigms (e.g., cloud, grid computing) at remote data centers, while a few works investigated the performance of provided medical services under the constraints of operational availability of devices and systems at local medical centers. Therefore, it is critical to rapidly develop appropriate models to quantify the operational metrics of medical services provided and sustained by medical information systems (MIS) even before practical implementation. In this paper, we propose a comprehensive performability SRN model of an edge/fog based MIS for the performability quantification of medical data transaction and services in local hospitals or medical centers. The model elaborates different failure modes of fog nodes and their VMs under the implementation of fail-over mechanisms. Sophisticated behaviors and dependencies between the performance and availability of data transactions are elaborated in a comprehensive manner when adopting three main load-balancing techniques including: (i) probability-based, (ii) random-based and (iii) shortest queue-based approaches for medical data distribution from edge to fog layers along with/without fail-over mechanisms in the cases of component failures at two levels of fog nodes and fog virtual machines (VMs). Different performability metrics of interest are analyzed including (i) recover token rate, (ii) mean response time, (iii) drop probability, (iv) throughput, (v) queue utilization of network devices and fog nodes to assimilate the impact of load-balancing techniques and fail-over mechanisms. Discrete-event simulation results highlight the effectiveness of the combination of these for enhancing the performability of medical services provided by an MIS. Particularly, performability metrics of medical service continuity and quality are improved with fail-over mechanisms in the MIS while load balancing techniques help to enhance system performance metrics. The implementation of both load balancing techniques along with fail-over mechanisms provide better performability metrics compared to the separate cases. The harmony of the integrated strategies eventually provides the trustworthiness of medical services at a high level of performability. This study can help improve the design of MIS systems integrated with different load-balancing techniques and fail-over mechanisms to maintain continuous performance under the availability constraints of medical services with heavy computing workloads in local hospitals/medical centers, to combat with new waves of virus pandemics.}, } @article {pmid34577450, year = {2021}, author = {Gendreau Chakarov, A and Biddy, Q and Hennessy Elliott, C and Recker, M}, title = {The Data Sensor Hub (DaSH): A Physical Computing System to Support Middle School Inquiry Science Instruction.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577450}, issn = {1424-8220}, support = {1742053//National Science Foundation/ ; 1742046//National Science Foundation/ ; 2019805//National Science Foundation/ ; 220020587//James S. McDonald Foundation/ ; }, mesh = {Humans ; Schools ; *Science ; Students ; Writing ; }, abstract = {This article describes a sensor-based physical computing system, called the Data Sensor Hub (DaSH), which enables students to process, analyze, and display data streams collected using a variety of sensors. The system is built around the portable and affordable BBC micro:bit microcontroller (expanded with the gator:bit), which students program using a visual, cloud-based programming environment intended for novices. Students connect a variety of sensors (measuring temperature, humidity, carbon dioxide, sound, acceleration, magnetism, etc.) and write programs to analyze and visualize the collected sensor data streams. The article also describes two instructional units intended for middle grade science classes that use this sensor-based system. These inquiry-oriented units engage students in designing the system to collect data from the world around them to investigate scientific phenomena of interest. The units are designed to help students develop the ability to meaningfully integrate computing as they engage in place-based learning activities while using tools that more closely approximate the practices of contemporary scientists as well as other STEM workers. Finally, the article articulates how the DaSH and units have elicited different kinds of teacher practices using student drawn modeling activities, facilitating debugging practices, and developing place-based science practices.}, } @article {pmid34577425, year = {2021}, author = {Corches, C and Daraban, M and Miclea, L}, title = {Availability of an RFID Object-Identification System in IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577425}, issn = {1424-8220}, mesh = {Cloud Computing ; Humans ; *Internet of Things ; *Radio Frequency Identification Device ; Technology ; }, abstract = {Through the latest technological and conceptual developments, the centralized cloud-computing approach has moved to structures such as edge, fog, and the Internet of Things (IoT), approaching end users. As mobile network operators (MNOs) implement the new 5G standards, enterprise computing function shifts to the edge. In parallel to interconnection topics, there is the issue of global impact over the environment. The idea is to develop IoT devices to eliminate the greenhouse effect of current applications. Radio-frequency identification (RFID) is the technology that has this potential, and it can be used in applications ranging from identifying a person to granting access in a building. Past studies have focused on how to improve RFID communication or to achieve maximal throughput. However, for many applications, system latency and availability are critical aspects. This paper examines, through stochastic Petri nets (SPNs), the availability, dependability, and latency of an object-identification system that uses RFID tags. Through the performed analysis, the optimal balance between latency and throughput was identified. Analyzing multiple communication scenarios revealed the availability of such a system when deployed at the edge layer.}, } @article {pmid34577416, year = {2021}, author = {Chen, X and Xiao, S}, title = {Multi-Objective and Parallel Particle Swarm Optimization Algorithm for Container-Based Microservice Scheduling.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577416}, issn = {1424-8220}, abstract = {An application based on a microservice architecture with a set of independent, fine-grained modular services is desirable, due to its low management cost, simple deployment, and high portability. This type of container technology has been widely used in cloud computing. Several methods have been applied to container-based microservice scheduling, but they come with significant disadvantages, such as high network transmission overhead, ineffective load balancing, and low service reliability. In order to overcome these disadvantages, in this study, we present a multi-objective optimization problem for container-based microservice scheduling. Our approach is based on the particle swarm optimization algorithm, combined parallel computing, and Pareto-optimal theory. The particle swarm optimization algorithm has fast convergence speed, fewer parameters, and many other advantages. First, we detail the various resources of the physical nodes, cluster, local load balancing, failure rate, and other aspects. Then, we discuss our improvement with respect to the relevant parameters. Second, we create a multi-objective optimization model and use a multi-objective optimization parallel particle swarm optimization algorithm for container-based microservice scheduling (MOPPSO-CMS). This algorithm is based on user needs and can effectively balance the performance of the cluster. After comparative experiments, we found that the algorithm can achieve good results, in terms of load balancing, network transmission overhead, and optimization speed.}, } @article {pmid34577258, year = {2021}, author = {Belabed, T and Ramos Gomes da Silva, V and Quenon, A and Valderamma, C and Souani, C}, title = {A Novel Automate Python Edge-to-Edge: From Automated Generation on Cloud to User Application Deployment on Edge of Deep Neural Networks for Low Power IoT Systems FPGA-Based Acceleration.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577258}, issn = {1424-8220}, mesh = {Acceleration ; Computers ; *Neural Networks, Computer ; *Software ; }, abstract = {Deep Neural Networks (DNNs) deployment for IoT Edge applications requires strong skills in hardware and software. In this paper, a novel design framework fully automated for Edge applications is proposed to perform such a deployment on System-on-Chips. Based on a high-level Python interface that mimics the leading Deep Learning software frameworks, it offers an easy way to implement a hardware-accelerated DNN on an FPGA. To do this, our design methodology covers the three main phases: (a) customization: where the user specifies the optimizations needed on each DNN layer, (b) generation: the framework generates on the Cloud the necessary binaries for both FPGA and software parts, and (c) deployment: the SoC on the Edge receives the resulting files serving to program the FPGA and related Python libraries for user applications. Among the study cases, an optimized DNN for the MNIST database can speed up more than 60× a software version on the ZYNQ 7020 SoC and still consume less than 0.43W. A comparison with the state-of-the-art frameworks demonstrates that our methodology offers the best trade-off between throughput, power consumption, and system cost.}, } @article {pmid34577248, year = {2021}, author = {Li, H and An, Z and Zuo, S and Zhu, W and Zhang, Z and Zhang, S and Zhang, C and Song, W and Mao, Q and Mu, Y and Li, E and García, JDP}, title = {Artificial Intelligence-Enabled ECG Algorithm Based on Improved Residual Network for Wearable ECG.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {18}, pages = {}, pmid = {34577248}, issn = {1424-8220}, support = {61675154//National Natural Science Foundation of China/ ; 19YFZCSY00180//Tianjin Key Research and Development Program/ ; 18ZXJMTG00260//Tianjin Major Project for Civil-Military Integration of Science and Technology/ ; 20YDTPJC01380//Tianjin Science and Technology Program/ ; XB202007//Tianjin Municipal Special Foundation for Key Cultivation of China/ ; }, mesh = {Algorithms ; Artificial Intelligence ; *Atrial Fibrillation ; Electrocardiography ; Humans ; *Wearable Electronic Devices ; }, abstract = {Heart disease is the leading cause of death for men and women globally. The residual network (ResNet) evolution of electrocardiogram (ECG) technology has contributed to our understanding of cardiac physiology. We propose an artificial intelligence-enabled ECG algorithm based on an improved ResNet for a wearable ECG. The system hardware consists of a wearable ECG with conductive fabric electrodes, a wireless ECG acquisition module, a mobile terminal App, and a cloud diagnostic platform. The algorithm adopted in this study is based on an improved ResNet for the rapid classification of different types of arrhythmia. First, we visualize ECG data and convert one-dimensional ECG signals into two-dimensional images using Gramian angular fields. Then, we improve the ResNet-50 network model, add multistage shortcut branches to the network, and optimize the residual block. The ReLu activation function is replaced by a scaled exponential linear units (SELUs) activation function to improve the expression ability of the model. Finally, the images are input into the improved ResNet network for classification. The average recognition rate of this classification algorithm against seven types of arrhythmia signals (atrial fibrillation, atrial premature beat, ventricular premature beat, normal beat, ventricular tachycardia, atrial tachycardia, and sinus bradycardia) is 98.3%.}, } @article {pmid34574966, year = {2021}, author = {da Fonseca, MH and Kovaleski, F and Picinin, CT and Pedroso, B and Rubbo, P}, title = {E-Health Practices and Technologies: A Systematic Review from 2014 to 2019.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {9}, pages = {}, pmid = {34574966}, issn = {2227-9032}, support = {0001//Coordenação de Aperfeiçoamento de Pessoal de Nível Superior/ ; 0001//Fundação Araucária/ ; }, abstract = {E-health can be defined as a set of technologies applied with the help of the internet, in which healthcare services are provided to improve quality of life and facilitate healthcare delivery. As there is a lack of similar studies on the topic, this analysis uses a systematic literature review of articles published from 2014 to 2019 to identify the most common e-health practices used worldwide, as well as the main services provided, diseases treated, and the associated technologies that assist in e-health practices. Some of the key results were the identification of the four most common practices used (mhealth or mobile health; telehealth or telemedicine; technology; and others) and the most widely used technologies associated with e-health (IoT, cloud computing, Big Data, security, and systems).}, } @article {pmid34574593, year = {2021}, author = {Oh, SR and Seo, YD and Lee, E and Kim, YG}, title = {A Comprehensive Survey on Security and Privacy for Electronic Health Data.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {18}, pages = {}, pmid = {34574593}, issn = {1660-4601}, mesh = {Cloud Computing ; *Computer Security ; Delivery of Health Care ; Electronic Health Records ; *Privacy ; }, abstract = {Recently, the integration of state-of-the-art technologies, such as modern sensors, networks, and cloud computing, has revolutionized the conventional healthcare system. However, security concerns have increasingly been emerging due to the integration of technologies. Therefore, the security and privacy issues associated with e-health data must be properly explored. In this paper, to investigate the security and privacy of e-health systems, we identified major components of the modern e-health systems (i.e., e-health data, medical devices, medical networks and edge/fog/cloud). Then, we reviewed recent security and privacy studies that focus on each component of the e-health systems. Based on the review, we obtained research taxonomy, security concerns, requirements, solutions, research trends, and open challenges for the components with strengths and weaknesses of the analyzed studies. In particular, edge and fog computing studies for e-health security and privacy were reviewed since the studies had mostly not been analyzed in other survey papers.}, } @article {pmid34573662, year = {2021}, author = {Dineva, K and Atanasova, T}, title = {Design of Scalable IoT Architecture Based on AWS for Smart Livestock.}, journal = {Animals : an open access journal from MDPI}, volume = {11}, number = {9}, pages = {}, pmid = {34573662}, issn = {2076-2615}, abstract = {In the ecological future of the planet, intelligent agriculture relies on CPS and IoT to free up human resources and increase production efficiency. Due to the growing number of connected IoT devices, the maximum scalability capacity, and available computing power of the existing architectural frameworks will be reached. This necessitates finding a solution that meets the continuously growing demands in smart farming. Cloud-based IoT solutions are achieving increasingly high popularity. The aim of this study was to design a scalable cloud-based architecture for a smart livestock monitoring system following Agile methodology and featuring environmental monitoring, health, growth, behaviour, reproduction, emotional state, and stress levels of animals. The AWS services used, and their specific tasks related to the proposed architecture are explained in detail. A stress test was performed to prove the data ingesting and processing capability of the proposed architecture. Experimental results proved that the proposed architecture using AWS automated scaling mechanisms and IoT devices are fully capable of processing the growing amount of data, which in turn allow for meeting the required needs of the constantly expanding number of CPS systems.}, } @article {pmid34566264, year = {2023}, author = {Kumar, R and Al-Turjman, F and Srinivas, LNB and Braveen, M and Ramakrishnan, J}, title = {ANFIS for prediction of epidemic peak and infected cases for COVID-19 in India.}, journal = {Neural computing & applications}, volume = {35}, number = {10}, pages = {7207-7220}, pmid = {34566264}, issn = {0941-0643}, abstract = {Corona Virus Disease 2019 (COVID-19) is a continuing extensive incident globally affecting several million people's health and sometimes leading to death. The outbreak prediction and making cautious steps is the only way to prevent the spread of COVID-19. This paper presents an Adaptive Neuro-fuzzy Inference System (ANFIS)-based machine learning technique to predict the possible outbreak in India. The proposed ANFIS-based prediction system tracks the growth of epidemic based on the previous data sets fetched from cloud computing. The proposed ANFIS technique predicts the epidemic peak and COVID-19 infected cases through the cloud data sets. The ANFIS is chosen for this study as it has both numerical and linguistic knowledge, and also has ability to classify data and identify patterns. The proposed technique not only predicts the outbreak but also tracks the disease and suggests a measurable policy to manage the COVID-19 epidemic. The obtained prediction shows that the proposed technique very effectively tracks the growth of the COVID-19 epidemic. The result shows the growth of infection rate decreases at end of 2020 and also has delay epidemic peak by 40-60 days. The prediction result using the proposed ANFIS technique shows a low Mean Square Error (MSE) of 1.184 × 10[-3] with an accuracy of 86%. The study provides important information for public health providers and the government to control the COVID-19 epidemic.}, } @article {pmid34566262, year = {2021}, author = {Rufino Henrique, PS and Prasad, R}, title = {6G Networks for Next Generation of Digital TV Beyond 2030.}, journal = {Wireless personal communications}, volume = {121}, number = {2}, pages = {1363-1378}, pmid = {34566262}, issn = {0929-6212}, abstract = {This paper prosed a novel 6G QoS over the future 6G wireless architecture to offer excellent Quality of Service (QoS) for the next generation of digital TV beyond 2030. During the last 20 years, the way society used to watch and consume TV and Cinema has changed radically. The creation of the Over The Top content platforms based on Cloud Services followed by its commercial video consumption model, offering flexibility for subscribers such as n Video on Demand. Besides the new business model created, the network infrastructure and wireless technologies also permitted the streaming of high-quality TV and film formats such as High Definition, followed by the latest widespread TV standardization Ultra-High- Definition TV. Mobile Broadband services onset the possibility for consumers to watch TV or Video content anywhere at any time. However, the network infrastructure needs continuous improvement, primarily when crises, like the coronavirus disease (COVID-19) and the worldwide pandemic, creates immense network traffic congestions. The outcome of that congestion was the decrease of QoS for such multimedia services, impacting the user's experience. More power-hungry video applications are commencing to test the networks' resilience and future roadmap of 5G and Beyond 5G (B5G). For this, 6G architecture planning must be focused on offering the ultimate QoS for prosumers beyond 2030.}, } @article {pmid34563896, year = {2021}, author = {Jennings, MR and Turner, C and Bond, RR and Kennedy, A and Thantilage, R and Kechadi, MT and Le-Khac, NA and McLaughlin, J and Finlay, DD}, title = {Code-free cloud computing service to facilitate rapid biomedical digital signal processing and algorithm development.}, journal = {Computer methods and programs in biomedicine}, volume = {211}, number = {}, pages = {106398}, doi = {10.1016/j.cmpb.2021.106398}, pmid = {34563896}, issn = {1872-7565}, mesh = {Algorithms ; *Cloud Computing ; Programming Languages ; Signal Processing, Computer-Assisted ; *Software ; }, abstract = {BACKGROUND AND OBJECTIVE: Cloud computing has the ability to offload processing tasks to a remote computing resources. Presently, the majority of biomedical digital signal processing involves a ground-up approach by writing code in a variety of languages. This may reduce the time a researcher or health professional has to process data, while increasing the barrier to entry to those with little or no software development experience. In this study, we aim to provide a service capable of handling and processing biomedical data via a code-free interface. Furthermore, our solution should support multiple file formats and processing languages while saving user inputs for repeated use.

METHODS: A web interface via the Python-based Django framework was developed with the potential to shorten the time taken to create an algorithm, encourage code reuse, and democratise digital signal processing tasks for non-technical users using a code-free user interface. A user can upload data, create an algorithm and download the result. Using discrete functions and multi-lingual scripts (e.g. MATLAB or Python), the user can manipulate data rapidly in a repeatable manner. Multiple data file formats are supported by a decision-based file handler and user authentication-based storage allocation method.

RESULTS: The proposed system has been demonstrated as effective in handling multiple input data types in various programming languages, including Python and MATLAB. This, in turn, has the potential to reduce currently experienced bottlenecks in cross-platform development of bio-signal processing algorithms. The source code for this system has been made available to encourage reuse. A cloud service for digital signal processing has the ability to reduce the apparent complexity and abstract the need to understand the intricacies of signal processing.

CONCLUSION: We have introduced a web-based system capable of reducing the barrier to entry for inexperienced programmers. Furthermore, our system is reproducable and scalable for use in a variety of clinical or research fields.}, } @article {pmid34554331, year = {2021}, author = {Setiani, P and Devianto, LA and Ramdani, F}, title = {Rapid estimation of CO2 emissions from forest fire events using cloud-based computation of google earth engine.}, journal = {Environmental monitoring and assessment}, volume = {193}, number = {10}, pages = {669}, pmid = {34554331}, issn = {1573-2959}, mesh = {Carbon Dioxide/analysis ; Cloud Computing ; Environmental Monitoring ; *Fires ; Search Engine ; *Wildfires ; }, abstract = {One of the main sources of greenhouse gases is forest fire, with carbon dioxide as its main constituent. With increasing global surface temperatures, the probability of forest fire events also increases. A method that enables rapid quantification of emissions is even more necessary to estimate the environmental impact. This study introduces the application of the Google Earth Engine platform to monitor burned areas in forest fire events in Mount Arjuno, Indonesia, during the 2016-2019 period, using Landsat-8 and Sentinel-2 satellite imageries. The events particularly affected grassland and tropical forest areas, as well as a fraction of agricultural areas, with a total estimated emission of 2.5 × 10[3] tCO2/km[2] burned area. Higher carbon dioxide emissions were also observed, consistent with the higher local surface temperature as well as the CO total column mixing ratio average retrieved from Sentinel-5 p Tropospheric Monitoring Instrument during the period of analysis.}, } @article {pmid34549196, year = {2021}, author = {Alharbi, A and Abdur Rahman, MD}, title = {Review of Recent Technologies for Tackling COVID-19.}, journal = {SN computer science}, volume = {2}, number = {6}, pages = {460}, pmid = {34549196}, issn = {2661-8907}, abstract = {The current pandemic caused by the COVID-19 virus requires more effort, experience, and science-sharing to overcome the damage caused by the pathogen. The fast and wide human-to-human transmission of the COVID-19 virus demands a significant role of the newest technologies in the form of local and global computing and information sharing, data privacy, and accurate tests. The advancements of deep neural networks, cloud computing solutions, blockchain technology, and beyond 5G (B5G) communication have contributed to the better management of the COVID-19 impacts on society. This paper reviews recent attempts to tackle the COVID-19 situation using these technological advancements.}, } @article {pmid34543209, year = {2022}, author = {Biasi, LD and Citarella, AA and Risi, M and Tortora, G}, title = {A Cloud Approach for Melanoma Detection Based on Deep Learning Networks.}, journal = {IEEE journal of biomedical and health informatics}, volume = {26}, number = {3}, pages = {962-972}, doi = {10.1109/JBHI.2021.3113609}, pmid = {34543209}, issn = {2168-2208}, mesh = {*Deep Learning ; Humans ; Machine Learning ; *Melanoma/diagnostic imaging ; Neural Networks, Computer ; }, abstract = {In the era of digitized images, the goal is to extract information from them and create new knowledge thanks to Computer Vision techniques, Machine Learning and Deep Learning. This enables the use of images for early diagnosis and subsequent treatment of a wide range of diseases. In the dermatological field, deep neural networks are used to distinguish between melanoma and non-melanoma images. In this paper, we have underlined two essential points of melanoma detection research. The first aspect considered is how even a simple modification of the parameters in the dataset determines a change of the accuracy of classifiers. In this case, we investigated the Transfer Learning issues. Following the results of this first analysis, we suggest that continuous training-test iterations are needed to provide robust prediction models. The second point is the need to have a more flexible system architecture that can handle changes in the training datasets. In this context, we proposed the development and implementation of a hybrid architecture based on Cloud, Fog and Edge Computing to provide a Melanoma Detection service based on clinical and dermoscopic images. At the same time, this architecture must deal with the amount of data to be analyzed by reducing the running time of the continuous retrain. This fact has been highlighted with experiments carried out on a single machine and different distribution systems, showing how a distributed approach guarantees output achievement in a much more sufficient time.}, } @article {pmid34541313, year = {2021}, author = {Qawqzeh, Y and Alharbi, MT and Jaradat, A and Abdul Sattar, KN}, title = {A review of swarm intelligence algorithms deployment for scheduling and optimization in cloud computing environments.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e696}, pmid = {34541313}, issn = {2376-5992}, abstract = {BACKGROUND: This review focuses on reviewing the recent publications of swarm intelligence algorithms (particle swarm optimization (PSO), ant colony optimization (ACO), artificial bee colony (ABC), and the firefly algorithm (FA)) in scheduling and optimization problems. Swarm intelligence (SI) can be described as the intelligent behavior of natural living animals, fishes, and insects. In fact, it is based on agent groups or populations in which they have a reliable connection among them and with their environment. Inside such a group or population, each agent (member) performs according to certain rules that make it capable of maximizing the overall utility of that certain group or population. It can be described as a collective intelligence among self-organized members in certain group or population. In fact, biology inspired many researchers to mimic the behavior of certain natural swarms (birds, animals, or insects) to solve some computational problems effectively.

METHODOLOGY: SI techniques were utilized in cloud computing environment seeking optimum scheduling strategies. Hence, the most recent publications (2015-2021) that belongs to SI algorithms are reviewed and summarized.

RESULTS: It is clear that the number of algorithms for cloud computing optimization is increasing rapidly. The number of PSO, ACO, ABC, and FA related journal papers has been visibility increased. However, it is noticeably that many recently emerging algorithms were emerged based on the amendment on the original SI algorithms especially the PSO algorithm.

CONCLUSIONS: The major intention of this work is to motivate interested researchers to develop and innovate new SI-based solutions that can handle complex and multi-objective computational problems.}, } @article {pmid34541307, year = {2021}, author = {Ali, O and Ishak, MK and Bhatti, MKL}, title = {Emerging IoT domains, current standings and open research challenges: a review.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e659}, pmid = {34541307}, issn = {2376-5992}, abstract = {Over the last decade, the Internet of Things (IoT) domain has grown dramatically, from ultra-low-power hardware design to cloud-based solutions, and now, with the rise of 5G technology, a new horizon for edge computing on IoT devices will be introduced. A wide range of communication technologies has steadily evolved in recent years, representing a diverse range of domain areas and communication specifications. Because of the heterogeneity of technology and interconnectivity, the true realisation of the IoT ecosystem is currently hampered by multiple dynamic integration challenges. In this context, several emerging IoT domains necessitate a complete re-modeling, design, and standardisation from the ground up in order to achieve seamless IoT ecosystem integration. The Internet of Nano-Things (IoNT), Internet of Space-Things (IoST), Internet of Underwater-Things (IoUT) and Social Internet of Things (SIoT) are investigated in this paper with a broad future scope based on their integration and ability to source other IoT domains by highlighting their application domains, state-of-the-art research, and open challenges. To the best of our knowledge, there is little or no information on the current state of these ecosystems, which is the motivating factor behind this article. Finally, the paper summarises the integration of these ecosystems with current IoT domains and suggests future directions for overcoming the challenges.}, } @article {pmid34531717, year = {2021}, author = {Fletcher, MD}, title = {Can Haptic Stimulation Enhance Music Perception in Hearing-Impaired Listeners?.}, journal = {Frontiers in neuroscience}, volume = {15}, number = {}, pages = {723877}, pmid = {34531717}, issn = {1662-4548}, abstract = {Cochlear implants (CIs) have been remarkably successful at restoring hearing in severely-to-profoundly hearing-impaired individuals. However, users often struggle to deconstruct complex auditory scenes with multiple simultaneous sounds, which can result in reduced music enjoyment and impaired speech understanding in background noise. Hearing aid users often have similar issues, though these are typically less acute. Several recent studies have shown that haptic stimulation can enhance CI listening by giving access to sound features that are poorly transmitted through the electrical CI signal. This "electro-haptic stimulation" improves melody recognition and pitch discrimination, as well as speech-in-noise performance and sound localization. The success of this approach suggests it could also enhance auditory perception in hearing-aid users and other hearing-impaired listeners. This review focuses on the use of haptic stimulation to enhance music perception in hearing-impaired listeners. Music is prevalent throughout everyday life, being critical to media such as film and video games, and often being central to events such as weddings and funerals. It represents the biggest challenge for signal processing, as it is typically an extremely complex acoustic signal, containing multiple simultaneous harmonic and inharmonic sounds. Signal-processing approaches developed for enhancing music perception could therefore have significant utility for other key issues faced by hearing-impaired listeners, such as understanding speech in noisy environments. This review first discusses the limits of music perception in hearing-impaired listeners and the limits of the tactile system. It then discusses the evidence around integration of audio and haptic stimulation in the brain. Next, the features, suitability, and success of current haptic devices for enhancing music perception are reviewed, as well as the signal-processing approaches that could be deployed in future haptic devices. Finally, the cutting-edge technologies that could be exploited for enhancing music perception with haptics are discussed. These include the latest micro motor and driver technology, low-power wireless technology, machine learning, big data, and cloud computing. New approaches for enhancing music perception in hearing-impaired listeners could substantially improve quality of life. Furthermore, effective haptic techniques for providing complex sound information could offer a non-invasive, affordable means for enhancing listening more broadly in hearing-impaired individuals.}, } @article {pmid34529673, year = {2021}, author = {Andleeb, S and Abbasi, WA and Ghulam Mustafa, R and Islam, GU and Naseer, A and Shafique, I and Parween, A and Shaheen, B and Shafiq, M and Altaf, M and Ali Abbas, S}, title = {ESIDE: A computationally intelligent method to identify earthworm species (E. fetida) from digital images: Application in taxonomy.}, journal = {PloS one}, volume = {16}, number = {9}, pages = {e0255674}, pmid = {34529673}, issn = {1932-6203}, mesh = {Animals ; Computer Simulation ; Ecosystem ; Image Processing, Computer-Assisted/*methods ; *Machine Learning ; Oligochaeta/*classification/physiology ; Photography/*instrumentation ; }, abstract = {Earthworms (Crassiclitellata) being ecosystem engineers significantly affect the physical, chemical, and biological properties of the soil by recycling organic material, increasing nutrient availability, and improving soil structure. The efficiency of earthworms in ecology varies along with species. Therefore, the role of taxonomy in earthworm study is significant. The taxonomy of earthworms cannot reliably be established through morphological characteristics because the small and simple body plan of the earthworm does not have anatomical complex and highly specialized structures. Recently, molecular techniques have been adopted to accurately classify the earthworm species but these techniques are time-consuming and costly. To combat this issue, in this study, we propose a machine learning-based earthworm species identification model that uses digital images of earthworms. We performed a stringent performance evaluation not only through 10-fold cross-validation and on an external validation dataset but also in real settings by involving an experienced taxonomist. In all the evaluation settings, our proposed model has given state-of-the-art performance and justified its use to aid earthworm taxonomy studies. We made this model openly accessible through a cloud-based webserver and python code available at https://sites.google.com/view/wajidarshad/software and https://github.com/wajidarshad/ESIDE.}, } @article {pmid34525685, year = {2022}, author = {Gxokwe, S and Dube, T and Mazvimavi, D}, title = {Leveraging Google Earth Engine platform to characterize and map small seasonal wetlands in the semi-arid environments of South Africa.}, journal = {The Science of the total environment}, volume = {803}, number = {}, pages = {150139}, doi = {10.1016/j.scitotenv.2021.150139}, pmid = {34525685}, issn = {1879-1026}, mesh = {Bayes Theorem ; Environmental Monitoring ; *Search Engine ; Seasons ; South Africa ; *Wetlands ; }, abstract = {Although significant scientific research strides have been made in mapping the spatial extents and ecohydrological dynamics of wetlands in semi-arid environments, the focus on small wetlands remains a challenge. This is due to the sensing characteristics of remote sensing platforms and lack of robust data processing techniques. Advancements in data analytic tools, such as the introduction of Google Earth Engine (GEE) platform provides unique opportunities for improved assessment of small and scattered wetlands. This study thus assessed the capabilities of GEE cloud-computing platform in characterising small seasonal flooded wetlands, using the new generation Sentinel 2 data from 2016 to 2020. Specifically, the study assessed the spectral separability of different land cover classes for two different wetlands detected, using Sentinel-2 multi-year composite water and vegetation indices and to identify the most suitable GEE machine learning algorithm for accurately detecting and mapping semi-arid seasonal wetlands. This was achieved using the object based Random Forest (RF), Support Vector Machine (SVM), Classification and Regression Tree (CART) and Naïve Bayes (NB) advanced algorithms in GEE. The results demonstrated the capabilities of using the GEE platform to characterize wetlands with acceptable accuracy. All algorithms showed superiority, in mapping the two wetlands except for the NB method, which had lowest overall classification accuracy. These findings underscore the relevance of the GEE platform, Sentinel-2 data and advanced algorithms in characterizing small and seasonal semi-arid wetlands.}, } @article {pmid34522068, year = {2023}, author = {Nasser, N and Emad-Ul-Haq, Q and Imran, M and Ali, A and Razzak, I and Al-Helali, A}, title = {A smart healthcare framework for detection and monitoring of COVID-19 using IoT and cloud computing.}, journal = {Neural computing & applications}, volume = {35}, number = {19}, pages = {13775-13789}, pmid = {34522068}, issn = {0941-0643}, abstract = {Coronavirus (COVID-19) is a very contagious infection that has drawn the world's attention. Modeling such diseases can be extremely valuable in predicting their effects. Although classic statistical modeling may provide adequate models, it may also fail to understand the data's intricacy. An automatic COVID-19 detection system based on computed tomography (CT) scan or X-ray images is effective, but a robust system design is challenging. In this study, we propose an intelligent healthcare system that integrates IoT-cloud technologies. This architecture uses smart connectivity sensors and deep learning (DL) for intelligent decision-making from the perspective of the smart city. The intelligent system tracks the status of patients in real time and delivers reliable, timely, and high-quality healthcare facilities at a low cost. COVID-19 detection experiments are performed using DL to test the viability of the proposed system. We use a sensor for recording, transferring, and tracking healthcare data. CT scan images from patients are sent to the cloud by IoT sensors, where the cognitive module is stored. The system decides the patient status by examining the images of the CT scan. The DL cognitive module makes the real-time decision on the possible course of action. When information is conveyed to a cognitive module, we use a state-of-the-art classification algorithm based on DL, i.e., ResNet50, to detect and classify whether the patients are normal or infected by COVID-19. We validate the proposed system's robustness and effectiveness using two benchmark publicly available datasets (Covid-Chestxray dataset and Chex-Pert dataset). At first, a dataset of 6000 images is prepared from the above two datasets. The proposed system was trained on the collection of images from 80% of the datasets and tested with 20% of the data. Cross-validation is performed using a tenfold cross-validation technique for performance evaluation. The results indicate that the proposed system gives an accuracy of 98.6%, a sensitivity of 97.3%, a specificity of 98.2%, and an F1-score of 97.87%. Results clearly show that the accuracy, specificity, sensitivity, and F1-score of our proposed method are high. The comparison shows that the proposed system performs better than the existing state-of-the-art systems. The proposed system will be helpful in medical diagnosis research and healthcare systems. It will also support the medical experts for COVID-19 screening and lead to a precious second opinion.}, } @article {pmid34518711, year = {2021}, author = {Sood, SK and Rawat, KS}, title = {A fog assisted intelligent framework based on cyber physical system for safe evacuation in panic situations.}, journal = {Computer communications}, volume = {178}, number = {}, pages = {297-306}, pmid = {34518711}, issn = {0140-3664}, abstract = {In the current scenario of the COVID-19 pandemic and worldwide health emergency, one of the major challenges is to identify and predict the panic health of persons. The management of panic health and on-time evacuation prevents COVID-19 infection incidences in educational institutions and public places. Therefore, a system is required to predict the infection and suggests a safe evacuation path to people that control panic scenarios with mortality. In this paper, a fog-assisted cyber physical system is introduced to control panic attacks and COVID-19 infection risk in public places. The proposed model uses the concept of physical and cyber space. The physical space helps in real time data collection and transmission of the alert generation to the stakeholders. Cyberspace consists of two spaces, fog space, and cloud-space. The fog-space facilitates panic health and COVID-19 symptoms determination with alert generation for risk-affected areas. Cloud space monitors and predicts the person's panic health and symptoms using the SARIMA model. Furthermore, it also identifies risk-prone regions in the affected place using Geographical Population Analysis. The performance evaluation acknowledges the efficiency related to panic health determination and prediction based on the SARIMA with risks mapping accuracy. The proposed system provides an efficient on time evacuation with priority from risk-affected places that protect people from attacks due to panic and infection caused by COVID-19.}, } @article {pmid34514787, year = {2021}, author = {Lin, Z and Zou, J and Liu, S and Peng, C and Li, Z and Wan, X and Fang, D and Yin, J and Gobbo, G and Chen, Y and Ma, J and Wen, S and Zhang, P and Yang, M}, title = {Correction to "A Cloud Computing Platform for Scalable Relative and Absolute Binding Free Energy Prediction: New Opportunities and Challenges for Drug Discovery".}, journal = {Journal of chemical information and modeling}, volume = {61}, number = {9}, pages = {4819}, doi = {10.1021/acs.jcim.1c00934}, pmid = {34514787}, issn = {1549-960X}, } @article {pmid34514378, year = {2021}, author = {Sang, GM and Xu, L and de Vrieze, P}, title = {A Predictive Maintenance Model for Flexible Manufacturing in the Context of Industry 4.0.}, journal = {Frontiers in big data}, volume = {4}, number = {}, pages = {663466}, pmid = {34514378}, issn = {2624-909X}, abstract = {The Industry 4.0 paradigm is the focus of modern manufacturing system design. The integration of cutting-edge technologies such as the Internet of things, cyber-physical systems, big data analytics, and cloud computing requires a flexible platform supporting the effective optimization of manufacturing-related processes, e.g., predictive maintenance. Existing predictive maintenance studies generally focus on either a predictive model without considering the maintenance decisions or maintenance optimizations based on the degradation models of the known system. To address this, we propose PMMI 4.0, a Predictive Maintenance Model for Industry 4.0, which utilizes a newly proposed solution PMS4MMC for supporting an optimized maintenance schedule plan for multiple machine components driven by a data-driven LSTM model for RUL (remaining useful life) estimation. The effectiveness of the proposed solution is demonstrated using a real-world industrial case with related data. The results showed the validity and applicability of this work.}, } @article {pmid34512110, year = {2021}, author = {Ahmadi, Z and Haghi Kashani, M and Nikravan, M and Mahdipour, E}, title = {Fog-based healthcare systems: A systematic review.}, journal = {Multimedia tools and applications}, volume = {80}, number = {30}, pages = {36361-36400}, pmid = {34512110}, issn = {1380-7501}, abstract = {The healthcare system aims to provide a reliable and organized solution to enhance the health of human society. Studying the history of patients can help physicians to consider patients' needs in healthcare system designing and offering service, which leads to an increase in patient satisfaction. Therefore, healthcare is becoming a growing contesting market. With this significant growth in healthcare systems, such challenges as huge data volume, response time, latency, and security vulnerability are raised. Therefore, fog computing, as a well-known distributed architecture, could help to solve such challenges. In fog computing architecture, processing components are placed between the end devices and cloud components, and they execute applications. This architecture is suitable for such applications as healthcare systems that need a real-time response and low latency. In this paper, a systematic review of available approaches in the field of fog-based healthcare systems is proposed; the challenges of its application in healthcare are explored, classified, and discussed. First, the fog computing approaches in healthcare are categorized into three main classes: communication, application, and resource/service. Then, they are discussed and compared based on their tools, evaluation methods, and evaluation metrics. Finally, based on observations, some open issues and challenges are highlighted for further studies in fog-based healthcare.}, } @article {pmid34512108, year = {2021}, author = {Kolak, M and Li, X and Lin, Q and Wang, R and Menghaney, M and Yang, S and Anguiano, V}, title = {The US COVID Atlas: A dynamic cyberinfrastructure surveillance system for interactive exploration of the pandemic.}, journal = {Transactions in GIS : TG}, volume = {25}, number = {4}, pages = {1741-1765}, pmid = {34512108}, issn = {1361-1682}, support = {U2C DA050098/DA/NIDA NIH HHS/United States ; }, abstract = {Distributed spatial infrastructures leveraging cloud computing technologies can tackle issues of disparate data sources and address the need for data-driven knowledge discovery and more sophisticated spatial analysis central to the COVID-19 pandemic. We implement a new, open source spatial middleware component (libgeoda) and system design to scale development quickly to effectively meet the need for surveilling county-level metrics in a rapidly changing pandemic landscape. We incorporate, wrangle, and analyze multiple data streams from volunteered and crowdsourced environments to leverage multiple data perspectives. We integrate explorative spatial data analysis (ESDA) and statistical hotspot standards to detect infectious disease clusters in real time, building on decades of research in GIScience and spatial statistics. We scale the computational infrastructure to provide equitable access to data and insights across the entire USA, demanding a basic but high-quality standard of ESDA techniques. Finally, we engage a research coalition and incorporate principles of user-centered design to ground the direction and design of Atlas application development.}, } @article {pmid34511519, year = {2022}, author = {Gómez, D and Romero, J and López, P and Vázquez, J and Cappo, C and Pinto, D and Villalba, C}, title = {Cloud architecture for electronic health record systems interoperability.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {30}, number = {3}, pages = {551-564}, doi = {10.3233/THC-212806}, pmid = {34511519}, issn = {1878-7401}, mesh = {*Electronic Health Records ; Humans ; *Software ; }, abstract = {BACKGROUND: Current Electronic Health Record (EHR) systems are built using different data representation and information models, which makes difficult achieving information exchange.

OBJECTIVE: Our aim was to propose a scalable architecture that allows the integration of information from different EHR systems.

METHODS: A cloud-based EHR interoperable architecture is proposed through the standardization and integration of patient electronic health records. The data is stored in a cloud repository with high availability features. Stakeholders can retrieve the patient EHR by requesting only to the integrated data repository. The OpenEHR two-level approach is applied according to the HL7-FHIR standards. We validated our architecture by comparing it with 5 different works (CHISTAR, ARIEN, DIRAYA, LLPHR and INEHRIS) using a set of selected axes and a scoring method.

RESULTS: The problem was reduced to a single point of communication between each EHR system and the integrated data repository. By combining cloud computing paradigm with selected health informatics standards, we obtained a generic and scalable architecture that complies 100% with interoperability requisites according to the evaluation framework applied.

CONCLUSIONS: The architecture allowed the integration of several EHR systems, adapting them with the use of standards and ensuring the availability thanks to cloud computing features.}, } @article {pmid34510300, year = {2021}, author = {Pang, J and Bachmatiuk, A and Yang, F and Liu, H and Zhou, W and Rümmeli, MH and Cuniberti, G}, title = {Applications of Carbon Nanotubes in the Internet of Things Era.}, journal = {Nano-micro letters}, volume = {13}, number = {1}, pages = {191}, pmid = {34510300}, issn = {2150-5551}, abstract = {The post-Moore's era has boosted the progress in carbon nanotube-based transistors. Indeed, the 5G communication and cloud computing stimulate the research in applications of carbon nanotubes in electronic devices. In this perspective, we deliver the readers with the latest trends in carbon nanotube research, including high-frequency transistors, biomedical sensors and actuators, brain-machine interfaces, and flexible logic devices and energy storages. Future opportunities are given for calling on scientists and engineers into the emerging topics.}, } @article {pmid34505137, year = {2022}, author = {Grzesik, P and Augustyn, DR and Wyciślik, Ł and Mrozek, D}, title = {Serverless computing in omics data analysis and integration.}, journal = {Briefings in bioinformatics}, volume = {23}, number = {1}, pages = {}, pmid = {34505137}, issn = {1477-4054}, support = {02/100/RGJ21/0009//Silesian University of Technology/ ; }, mesh = {COVID-19/epidemiology/*genetics/*metabolism ; *Cloud Computing ; *Computational Biology ; *Genomics ; Humans ; *Pandemics ; *SARS-CoV-2/genetics/metabolism ; *Software ; }, abstract = {A comprehensive analysis of omics data can require vast computational resources and access to varied data sources that must be integrated into complex, multi-step analysis pipelines. Execution of many such analyses can be accelerated by applying the cloud computing paradigm, which provides scalable resources for storing data of different types and parallelizing data analysis computations. Moreover, these resources can be reused for different multi-omics analysis scenarios. Traditionally, developers are required to manage a cloud platform's underlying infrastructure, configuration, maintenance and capacity planning. The serverless computing paradigm simplifies these operations by automatically allocating and maintaining both servers and virtual machines, as required for analysis tasks. This paradigm offers highly parallel execution and high scalability without manual management of the underlying infrastructure, freeing developers to focus on operational logic. This paper reviews serverless solutions in bioinformatics and evaluates their usage in omics data analysis and integration. We start by reviewing the application of the cloud computing model to a multi-omics data analysis and exposing some shortcomings of the early approaches. We then introduce the serverless computing paradigm and show its applicability for performing an integrative analysis of multiple omics data sources in the context of the COVID-19 pandemic.}, } @article {pmid34502840, year = {2021}, author = {Mateo-Fornés, J and Pagès-Bernaus, A and Plà-Aragonés, LM and Castells-Gasia, JP and Babot-Gaspa, D}, title = {An Internet of Things Platform Based on Microservices and Cloud Paradigms for Livestock.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502840}, issn = {1424-8220}, mesh = {Agriculture ; Animals ; Farms ; *Internet of Things ; Livestock ; Software ; Swine ; }, abstract = {With the growing adoption of the Internet of Things (IoT) technology in the agricultural sector, smart devices are becoming more prevalent. The availability of new, timely, and precise data offers a great opportunity to develop advanced analytical models. Therefore, the platform used to deliver new developments to the final user is a key enabler for adopting IoT technology. This work presents a generic design of a software platform based on the cloud and implemented using microservices to facilitate the use of predictive or prescriptive analytics under different IoT scenarios. Several technologies are combined to comply with the essential features-scalability, portability, interoperability, and usability-that the platform must consider to assist decision-making in agricultural 4.0 contexts. The platform is prepared to integrate new sensor devices, perform data operations, integrate several data sources, transfer complex statistical model developments seamlessly, and provide a user-friendly graphical interface. The proposed software architecture is implemented with open-source technologies and validated in a smart farming scenario. The growth of a batch of pigs at the fattening stage is estimated from the data provided by a level sensor installed in the silo that stores the feed from which the animals are fed. With this application, we demonstrate how farmers can monitor the weight distribution and receive alarms when high deviations happen.}, } @article {pmid34502813, year = {2021}, author = {Kalyani, Y and Collier, R}, title = {A Systematic Survey on the Role of Cloud, Fog, and Edge Computing Combination in Smart Agriculture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502813}, issn = {1424-8220}, support = {16/SPP/3296./SFI_/Science Foundation Ireland/Ireland ; }, mesh = {*Agriculture ; *Cloud Computing ; }, abstract = {Cloud Computing is a well-established paradigm for building service-centric systems. However, ultra-low latency, high bandwidth, security, and real-time analytics are limitations in Cloud Computing when analysing and providing results for a large amount of data. Fog and Edge Computing offer solutions to the limitations of Cloud Computing. The number of agricultural domain applications that use the combination of Cloud, Fog, and Edge is increasing in the last few decades. This article aims to provide a systematic literature review of current works that have been done in Cloud, Fog, and Edge Computing applications in the smart agriculture domain between 2015 and up-to-date. The key objective of this review is to identify all relevant research on new computing paradigms with smart agriculture and propose a new architecture model with the combinations of Cloud-Fog-Edge. Furthermore, it also analyses and examines the agricultural application domains, research approaches, and the application of used combinations. Moreover, this survey discusses the components used in the architecture models and briefly explores the communication protocols used to interact from one layer to another. Finally, the challenges of smart agriculture and future research directions are briefly pointed out in this article.}, } @article {pmid34502795, year = {2021}, author = {Stan, RG and Băjenaru, L and Negru, C and Pop, F}, title = {Evaluation of Task Scheduling Algorithms in Heterogeneous Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502795}, issn = {1424-8220}, support = {PubArt//University Politehnica of Bucharest/ ; }, mesh = {*Algorithms ; Cloud Computing ; Computer Simulation ; *Ecosystem ; Workload ; }, abstract = {This work establishes a set of methodologies to evaluate the performance of any task scheduling policy in heterogeneous computing contexts. We formally state a scheduling model for hybrid edge-cloud computing ecosystems and conduct simulation-based experiments on large workloads. In addition to the conventional cloud datacenters, we consider edge datacenters comprising smartphone and Raspberry Pi edge devices, which are battery powered. We define realistic capacities of the computational resources. Once a schedule is found, the various task demands can or cannot be fulfilled by the resource capacities. We build a scheduling and evaluation framework and measure typical scheduling metrics such as mean waiting time, mean turnaround time, makespan, throughput on the Round-Robin, Shortest Job First, Min-Min and Max-Min scheduling schemes. Our analysis and results show that the state-of-the-art independent task scheduling algorithms suffer from performance degradation in terms of significant task failures and nonoptimal resource utilization of datacenters in heterogeneous edge-cloud mediums in comparison to cloud-only mediums. In particular, for large sets of tasks, due to low battery or limited memory, more than 25% of tasks fail to execute for each scheduling scheme.}, } @article {pmid34502696, year = {2021}, author = {Resende, JS and Magalhães, L and Brandão, A and Martins, R and Antunes, L}, title = {Towards a Modular On-Premise Approach for Data Sharing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502696}, issn = {1424-8220}, support = {PD/BD/128149/2016//Faculdade de Ciências e Tecnologia, Universidade Nova de Lisboa/ ; 830929//H2020-SUICT-03-2018/ ; }, mesh = {Algorithms ; Artificial Intelligence ; *Computer Security ; *Information Dissemination ; Privacy ; }, abstract = {The growing demand for everyday data insights drives the pursuit of more sophisticated infrastructures and artificial intelligence algorithms. When combined with the growing number of interconnected devices, this originates concerns about scalability and privacy. The main problem is that devices can detect the environment and generate large volumes of possibly identifiable data. Public cloud-based technologies have been proposed as a solution, due to their high availability and low entry costs. However, there are growing concerns regarding data privacy, especially with the introduction of the new General Data Protection Regulation, due to the inherent lack of control caused by using off-premise computational resources on which public cloud belongs. Users have no control over the data uploaded to such services as the cloud, which increases the uncontrolled distribution of information to third parties. This work aims to provide a modular approach that uses cloud-of-clouds to store persistent data and reduce upfront costs while allowing information to remain private and under users' control. In addition to storage, this work also extends focus on usability modules that enable data sharing. Any user can securely share and analyze/compute the uploaded data using private computing without revealing private data. This private computation can be training machine learning (ML) models. To achieve this, we use a combination of state-of-the-art technologies, such as MultiParty Computation (MPC) and K-anonymization to produce a complete system with intrinsic privacy properties.}, } @article {pmid34502688, year = {2021}, author = {Mutichiro, B and Tran, MN and Kim, YH}, title = {QoS-Based Service-Time Scheduling in the IoT-Edge Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {17}, pages = {}, pmid = {34502688}, issn = {1424-8220}, support = {2020-0-00946//Institute of Information & communications Technology Planning & Evaluation (IITP) , Korea government (MSIT)/ ; }, mesh = {Algorithms ; *Internet of Things ; Software ; Workload ; }, abstract = {In edge computing, scheduling heterogeneous workloads with diverse resource requirements is challenging. Besides limited resources, the servers may be overwhelmed with computational tasks, resulting in lengthy task queues and congestion occasioned by unusual network traffic patterns. Additionally, Internet of Things (IoT)/Edge applications have different characteristics coupled with performance requirements, which become determinants if most edge applications can both satisfy deadlines and each user's QoS requirements. This study aims to address these restrictions by proposing a mechanism that improves the cluster resource utilization and Quality of Service (QoS) in an edge cloud cluster in terms of service time. Containerization can provide a way to improve the performance of the IoT-Edge cloud by factoring in task dependencies and heterogeneous application resource demands. In this paper, we propose STaSA, a service time aware scheduler for the edge environment. The algorithm automatically assigns requests onto different processing nodes and then schedules their execution under real-time constraints, thus minimizing the number of QoS violations. The effectiveness of our scheduling model is demonstrated through implementation on KubeEdge, a container orchestration platform based on Kubernetes. Experimental results show significantly fewer violations in QoS during scheduling and improved performance compared to the state of the art.}, } @article {pmid34498660, year = {2021}, author = {Camargo, MD and Silveira, DT and Lazzari, DD and Rodrigues, AFV and Moraes, KB and Duarte, ERM}, title = {Nursing Activities Score: trajectory of the instrument from paper to cloud in a university hospital.}, journal = {Revista da Escola de Enfermagem da U S P}, volume = {55}, number = {}, pages = {e20200233}, doi = {10.1590/1980-220X-REEUSP-2020-0233}, pmid = {34498660}, issn = {1980-220X}, mesh = {*Computer Security ; Health Insurance Portability and Accountability Act ; Hospitals, University ; Humans ; *Nursing Care ; Software ; United States ; }, abstract = {OBJECTIVE: To report the process of organization and construction of an information technology structure named Nursing Activities Score (NAS) Cloud Technology®.

METHOD: This project was based on the life cycle theory and has enabled the development of technological production through software engineering.

RESULTS: The NAS Cloud Technology® was developed for remote and collaborative access on a website hosted by Google Sites® and protected in a business environment by the certified security and data protection devices Health Insurance Portability and Accountability Act (HIPPA). In 2015, this system received more than 10.000 submissions/month, totaling 12 care units for critical patients covered by the information technology structure, circa 200 nurses per day involved in the collection and hundreds of daily submissions, integrating the complete transition from paper to cloud.

CONCLUSION: The development of NAS Cloud Technology® system has enabled the use of technology as a facilitating means for the use of Nursing care data, providing tools for decision-making on the nursing personnel sizing required for the care demands in the inpatient care units. The potential of cloud structures stands out due to their possibility of innovation, as well as low-cost access and high replicability of the information system.}, } @article {pmid34497501, year = {2021}, author = {Luo, X and Feng, L and Xun, H and Zhang, Y and Li, Y and Yin, L}, title = {Rinegan: A Scalable Image Processing Architecture for Large Scale Surveillance Applications.}, journal = {Frontiers in neurorobotics}, volume = {15}, number = {}, pages = {648101}, pmid = {34497501}, issn = {1662-5218}, abstract = {Image processing is widely used in intelligent robots, significantly improving the surveillance capabilities of smart buildings, industrial parks, and border ports. However, relying on the camera installed in a single robot is not enough since it only provides a narrow field of view as well as limited processing performance. Specially, a target person such as the suspect may appear anywhere and tracking the suspect in such a large-scale scene requires cooperation between fixed cameras and patrol robots. This induces a significant surge in demand for data, computing resources, as well as networking infrastructures. In this work, we develop a scalable architecture to optimize image processing efficacy and response rate for visual ability. In this architecture, the lightweight pre-process and object detection functions are deployed on the gateway-side to minimize the bandwidth consumption. Cloud-side servers receive solely the recognized data rather than entire image or video streams to identify specific suspect. Then the cloud-side sends the information to the robot, and the robot completes the corresponding tracking task. All these functions are implemented and orchestrated based on micro-service architecture to improve the flexibility. We implement a prototype system, called Rinegan, and evaluate it in an in-lab testing environment. The result shows that Rinegan is able to improve the effectiveness and efficacy of image processing.}, } @article {pmid34489813, year = {2021}, author = {Shan, B and Pu, Y and Chen, B and Lu, S}, title = {New Technologies' Commercialization: The Roles of the Leader's Emotion and Incubation Support.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {710122}, pmid = {34489813}, issn = {1664-1078}, abstract = {New technologies, such as brain-computer interfaces technology, advanced artificial intelligence, cloud computing, and virtual reality technology, have a strong influence on our daily activities. The application and commercialization of these technologies are prevailing globally, such as distance education, health monitoring, smart home devices, and robots. However, we still know little about the roles of individual emotion and the external environment on the commercialization of these new technologies. Therefore, we focus on the emotional factor of the leader, which is their passion for work, and discuss its effect on technology commercialization. We also analyzed the moderating role of incubation support in the relationship between the leader's emotion and technology commercialization. The results contribute to the application of emotion in improving the commercialization of new technologies.}, } @article {pmid34479966, year = {2021}, author = {Govind, D and Becker, JU and Miecznikowski, J and Rosenberg, AZ and Dang, J and Tharaux, PL and Yacoub, R and Thaiss, F and Hoyer, PF and Manthey, D and Lutnick, B and Worral, AM and Mohammad, I and Walavalkar, V and Tomaszewski, JE and Jen, KY and Sarder, P}, title = {PodoSighter: A Cloud-Based Tool for Label-Free Podocyte Detection in Kidney Whole-Slide Images.}, journal = {Journal of the American Society of Nephrology : JASN}, volume = {32}, number = {11}, pages = {2795-2813}, pmid = {34479966}, issn = {1533-3450}, support = {R01 DK114485/DK/NIDDK NIH HHS/United States ; U2C DK114886/DK/NIDDK NIH HHS/United States ; UL1 TR001412/TR/NCATS NIH HHS/United States ; U01 DK103225/DK/NIDDK NIH HHS/United States ; }, mesh = {Animals ; Automation ; Cell Count ; Cell Nucleus/ultrastructure ; *Cloud Computing ; Datasets as Topic ; Deep Learning ; Diabetic Nephropathies/chemically induced/pathology ; Disease Models, Animal ; Humans ; Image Processing, Computer-Assisted/*methods ; Kidney Diseases/*pathology ; Kidney Glomerulus/*cytology ; Mice ; Mice, Inbred C57BL ; Microscopy ; Periodic Acid-Schiff Reaction ; Podocytes/*ultrastructure ; Rats ; Species Specificity ; }, abstract = {BACKGROUND: Podocyte depletion precedes progressive glomerular damage in several kidney diseases. However, the current standard of visual detection and quantification of podocyte nuclei from brightfield microscopy images is laborious and imprecise.

METHODS: We have developed PodoSighter, an online cloud-based tool, to automatically identify and quantify podocyte nuclei from giga-pixel brightfield whole-slide images (WSIs) using deep learning. Ground-truth to train the tool used immunohistochemically or immunofluorescence-labeled images from a multi-institutional cohort of 122 histologic sections from mouse, rat, and human kidneys. To demonstrate the generalizability of our tool in investigating podocyte loss in clinically relevant samples, we tested it in rodent models of glomerular diseases, including diabetic kidney disease, crescentic GN, and dose-dependent direct podocyte toxicity and depletion, and in human biopsies from steroid-resistant nephrotic syndrome and from human autopsy tissues.

RESULTS: The optimal model yielded high sensitivity/specificity of 0.80/0.80, 0.81/0.86, and 0.80/0.91, in mouse, rat, and human images, respectively, from periodic acid-Schiff-stained WSIs. Furthermore, the podocyte nuclear morphometrics extracted using PodoSighter were informative in identifying diseased glomeruli. We have made PodoSighter freely available to the general public as turnkey plugins in a cloud-based web application for end users.

CONCLUSIONS: Our study demonstrates an automated computational approach to detect and quantify podocyte nuclei in standard histologically stained WSIs, facilitating podocyte research, and enabling possible future clinical applications.}, } @article {pmid34461487, year = {2021}, author = {Wang, C and Qin, J and Qu, C and Ran, X and Liu, C and Chen, B}, title = {A smart municipal waste management system based on deep-learning and Internet of Things.}, journal = {Waste management (New York, N.Y.)}, volume = {135}, number = {}, pages = {20-29}, doi = {10.1016/j.wasman.2021.08.028}, pmid = {34461487}, issn = {1879-2456}, mesh = {*Deep Learning ; *Garbage ; *Internet of Things ; *Refuse Disposal ; *Waste Management ; }, abstract = {A proof-of-concept municipal waste management system was proposed to reduce the cost of waste classification, monitoring and collection. In this system, we utilize the deep learning-based classifier and cloud computing technique to realize high accuracy waste classification at the beginning of garbage collection. To facilitate the subsequent waste disposal, we subdivide recyclable waste into plastic, glass, paper or cardboard, metal, fabric and the other recyclable waste, a total of six categories. Deep-learning convolution neural networks (CNN) were applied to realize the garbage classification task. Here, we investigate seven state-of-the-art CNNs and data pre-processing methods for waste classification, whose accuracies of nine categories range from 91.9 to 94.6% in the validation set. Among these networks, MobileNetV3 has a high classification accuracy (94.26%), a small storage size (49.5 MB) and the shortest running time (261.7 ms). Moreover, the Internet of Things (IoT) devices which implement information exchange between waste containers and waste management center are designed to monitor the overall amount of waste produced in this area and the operating state of any waste container via a set of sensors. According to monitoring information, the waste management center can schedule adaptive equipment deployment and maintenance, waste collection and vehicle routing plans, which serves as an essential part of a successful municipal waste management system.}, } @article {pmid34458659, year = {2021}, author = {Bellal, Z and Nour, B and Mastorakis, S}, title = {CoxNet: A Computation Reuse Architecture at the Edge.}, journal = {IEEE transactions on green communications and networking}, volume = {5}, number = {2}, pages = {765-777}, pmid = {34458659}, issn = {2473-2400}, abstract = {In recent years, edge computing has emerged as an effective solution to extend cloud computing and satisfy the demand of applications for low latency. However, with today's explosion of innovative applications (e.g., augmented reality, natural language processing, virtual reality), processing services for mobile and smart devices have become computation-intensive, consisting of multiple interconnected computations. This coupled with the need for delay-sensitivity and high quality of service put massive pressure on edge servers. Meanwhile, tasks invoking these services may involve similar inputs that could lead to the same output. In this paper, we present CoxNet, an efficient computation reuse architecture for edge computing. CoxNet enables edge servers to reuse previous computations while scheduling dependent incoming computations. We provide an analytical model for computation reuse joined with dependent task offloading and design a novel computing offloading scheduling scheme. We also evaluate the efficiency and effectiveness of CoxNet via synthetic and real-world datasets. Our results show that CoxNet is able to reduce the task execution time up to 66% based on a synthetic dataset and up to 50% based on a real-world dataset.}, } @article {pmid34458569, year = {2021}, author = {Peechara, RR and V, S}, title = {A chaos theory inspired, asynchronous two-way encryption mechanism for cloud computing.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e628}, pmid = {34458569}, issn = {2376-5992}, abstract = {Data exchange over the Internet and other access channels is on the rise, leads to the insecurity of consequences. Many experiments have been conducted to investigate time-efficient and high-randomized encryption methods for the data. The latest studies, however, have still been debated because of different factors. The study outcomes do not yield completely random keys for encryption methods that are longer than this. Prominent repetition makes the processes predictable and susceptible to assaults. Furthermore, recently generated keys need recent algorithms to run at a high volume of transactional data successfully. In this article, the proposed solutions to these two critical issues are presented. In the beginning, one must use the chaotic series of events for generating keys is sufficient to obtain a high degree of randomness. Moreover, this work also proposes a novel and non-traditional validation test to determine the true randomness of the keys produced from a correlation algorithm. An approximate 100% probability of the vital phase over almost infinitely long-time intervals minimizes the algorithms' complexity for the higher volume of data security. It is suggested that these algorithms are mainly intended for cloud-based transactions. Data volume is potentially higher and extremely changeable 3% to 4% of the improvement in data transmission time with suggested algorithms. This research has the potential to improve communication systems over ten years by unblocking decades-long bottlenecks.}, } @article {pmid34456613, year = {2021}, author = {Duan, L and Da Xu, L}, title = {Data Analytics in Industry 4.0: A Survey.}, journal = {Information systems frontiers : a journal of research and innovation}, volume = {}, number = {}, pages = {1-17}, pmid = {34456613}, issn = {1387-3326}, abstract = {Industry 4.0 is the fourth industrial revolution for decentralized production through shared facilities to achieve on-demand manufacturing and resource efficiency. It evolves from Industry 3.0 which focuses on routine operation. Data analytics is the set of techniques focus on gain actionable insight to make smart decisions from a massive amount of data. As the performance of routine operation can be improved by smart decisions and smart decisions need the support from routine operation to collect relevant data, there is an increasing amount of research effort in the merge between Industry 4.0 and data analytics. To better understand current research efforts, hot topics, and tending topics on this critical intersection, the basic concepts in Industry 4.0 and data analytics are introduced first. Then the merge between them is decomposed into three components: industry sectors, cyber-physical systems, and analytic methods. Joint research efforts on different intersections with different components are studied and discussed. Finally, a systematic literature review on the interaction between Industry 4.0 and data analytics is conducted to understand the existing research focus and trend.}, } @article {pmid34450978, year = {2021}, author = {Rodero, C and Olmedo, E and Bardaji, R and Piera, J}, title = {New Radiometric Approaches to Compute Underwater Irradiances: Potential Applications for High-Resolution and Citizen Science-Based Water Quality Monitoring Programs.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450978}, issn = {1424-8220}, support = {776480//Horizon 2020 Framework Programme/ ; }, mesh = {*Citizen Science ; Environmental Monitoring ; Water ; *Water Quality ; }, abstract = {Measuring the diffuse attenuation coefficient (Kd) allows for monitoring the water body's environmental status. This parameter is of particular interest in water quality monitoring programs because it quantifies the presence of light and the euphotic zone's depth. Citizen scientists can meaningfully contribute by monitoring water quality, complementing traditional methods by reducing monitoring costs and significantly improving data coverage, empowering and supporting decision-making. However, the quality of the acquisition of in situ underwater irradiance measurements has some limitations, especially in areas where stratification phenomena occur in the first meters of depth. This vertical layering introduces a gradient of properties in the vertical direction, affecting the associated Kd. To detect and characterize these variations of Kd in the water column, it needs a system of optical sensors, ideally placed in a range of a few cm, improving the low vertical accuracy. Despite that, the problem of self-shading on the instrumentation becomes critical. Here, we introduce a new concept that aims to improve the vertical accuracy of the irradiance measurements: the underwater annular irradiance (Ea). This new concept consists of measuring the irradiance in an annular-shaped distribution. We first compute the optimal annular angle that avoids self-shading and maximizes the light captured by the sensors. Second, we use different scenarios of water types, solar zenith angle, and cloud coverage to assess the robustness of the corresponding diffuse attenuation coefficient, Ka. Finally, we derive empirical functions for computing Kd from Ka. This new concept opens the possibility to a new generation of optical sensors in an annular-shaped distribution which is expected to (a) increase the vertical resolution of the irradiance measurements and (b) be easy to deploy and maintain and thus to be more suitable for citizen scientists.}, } @article {pmid34450973, year = {2021}, author = {Lopez-Arevalo, I and Gonzalez-Compean, JL and Hinojosa-Tijerina, M and Martinez-Rendon, C and Montella, R and Martinez-Rodriguez, JL}, title = {A WoT-Based Method for Creating Digital Sentinel Twins of IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450973}, issn = {1424-8220}, support = {41756//FORDECYT-CONACYT/ ; }, abstract = {The data produced by sensors of IoT devices are becoming keystones for organizations to conduct critical decision-making processes. However, delivering information to these processes in real-time represents two challenges for the organizations: the first one is achieving a constant dataflow from IoT to the cloud and the second one is enabling decision-making processes to retrieve data from dataflows in real-time. This paper presents a cloud-based Web of Things method for creating digital twins of IoT devices (named sentinels).The novelty of the proposed approach is that sentinels create an abstract window for decision-making processes to: (a) find data (e.g., properties, events, and data from sensors of IoT devices) or (b) invoke functions (e.g., actions and tasks) from physical devices (PD), as well as from virtual devices (VD). In this approach, the applications and services of decision-making processes deal with sentinels instead of managing complex details associated with the PDs, VDs, and cloud computing infrastructures. A prototype based on the proposed method was implemented to conduct a case study based on a blockchain system for verifying contract violation in sensors used in product transportation logistics. The evaluation showed the effectiveness of sentinels enabling organizations to attain data from IoT sensors and the dataflows used by decision-making processes to convert these data into useful information.}, } @article {pmid34450960, year = {2021}, author = {Schackart, KE and Yoon, JY}, title = {Machine Learning Enhances the Performance of Bioreceptor-Free Biosensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450960}, issn = {1424-8220}, support = {P30 ES006694/ES/NIEHS NIH HHS/United States ; T32GM132008/NH/NIH HHS/United States ; }, mesh = {*Biosensing Techniques ; Machine Learning ; Neural Networks, Computer ; Spectrum Analysis, Raman ; Support Vector Machine ; }, abstract = {Since their inception, biosensors have frequently employed simple regression models to calculate analyte composition based on the biosensor's signal magnitude. Traditionally, bioreceptors provide excellent sensitivity and specificity to the biosensor. Increasingly, however, bioreceptor-free biosensors have been developed for a wide range of applications. Without a bioreceptor, maintaining strong specificity and a low limit of detection have become the major challenge. Machine learning (ML) has been introduced to improve the performance of these biosensors, effectively replacing the bioreceptor with modeling to gain specificity. Here, we present how ML has been used to enhance the performance of these bioreceptor-free biosensors. Particularly, we discuss how ML has been used for imaging, Enose and Etongue, and surface-enhanced Raman spectroscopy (SERS) biosensors. Notably, principal component analysis (PCA) combined with support vector machine (SVM) and various artificial neural network (ANN) algorithms have shown outstanding performance in a variety of tasks. We anticipate that ML will continue to improve the performance of bioreceptor-free biosensors, especially with the prospects of sharing trained models and cloud computing for mobile computation. To facilitate this, the biosensing community would benefit from increased contributions to open-access data repositories for biosensor data.}, } @article {pmid34450933, year = {2021}, author = {Gupta, D and Rani, S and Ahmed, SH and Verma, S and Ijaz, MF and Shafi, J}, title = {Edge Caching Based on Collaborative Filtering for Heterogeneous ICN-IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450933}, issn = {1424-8220}, abstract = {The substantial advancements offered by the edge computing has indicated serious evolutionary improvements for the internet of things (IoT) technology. The rigid design philosophy of the traditional network architecture limits its scope to meet future demands. However, information centric networking (ICN) is envisioned as a promising architecture to bridge the huge gaps and maintain IoT networks, mostly referred as ICN-IoT. The edge-enabled ICN-IoT architecture always demands efficient in-network caching techniques for supporting better user's quality of experience (QoE). In this paper, we propose an enhanced ICN-IoT content caching strategy by enabling artificial intelligence (AI)-based collaborative filtering within the edge cloud to support heterogeneous IoT architecture. This collaborative filtering-based content caching strategy would intelligently cache content on edge nodes for traffic management at cloud databases. The evaluations has been conducted to check the performance of the proposed strategy over various benchmark strategies, such as LCE, LCD, CL4M, and ProbCache. The analytical results demonstrate the better performance of our proposed strategy with average gain of 15% for cache hit ratio, 12% reduction in content retrieval delay, and 28% reduced average hop count in comparison to best considered LCD. We believe that the proposed strategy will contribute an effective solution to the related studies in this domain.}, } @article {pmid34450808, year = {2021}, author = {Wang, Q and Mu, H}, title = {Privacy-Preserving and Lightweight Selective Aggregation with Fault-Tolerance for Edge Computing-Enhanced IoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450808}, issn = {1424-8220}, mesh = {Algorithms ; Computer Security ; Confidentiality ; *Internet of Things ; *Privacy ; }, abstract = {Edge computing has been introduced to the Internet of Things (IoT) to meet the requirements of IoT applications. At the same time, data aggregation is widely used in data processing to reduce the communication overhead and energy consumption in IoT. Most existing schemes aggregate the overall data without filtering. In addition, aggregation schemes also face huge challenges, such as the privacy of the individual IoT device's data or the fault-tolerant and lightweight requirements of the schemes. In this paper, we present a privacy-preserving and lightweight selective aggregation scheme with fault tolerance (PLSA-FT) for edge computing-enhanced IoT. In PLSA-FT, selective aggregation can be achieved by constructing Boolean responses and numerical responses according to specific query conditions of the cloud center. Furthermore, we modified the basic Paillier homomorphic encryption to guarantee data privacy and support fault tolerance of IoT devices' malfunctions. An online/offline signature mechanism is utilized to reduce computation costs. The system characteristic analyses prove that the PLSA-FT scheme achieves confidentiality, privacy preservation, source authentication, integrity verification, fault tolerance, and dynamic membership management. Moreover, performance evaluation results show that PLSA-FT is lightweight with low computation costs and communication overheads.}, } @article {pmid34450797, year = {2021}, author = {Liu, Y and Ni, Z and Karlsson, M and Gong, S}, title = {Methodology for Digital Transformation with Internet of Things and Cloud Computing: A Practical Guideline for Innovation in Small- and Medium-Sized Enterprises.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450797}, issn = {1424-8220}, mesh = {Cloud Computing ; Industry ; *Internet of Things ; Technology ; }, abstract = {Researches on the Internet of Things (IoT) and cloud computing have been pervasive in both the academic and industrial world. IoT and cloud computing are seen as cornerstones to digital transformation in the industry. However, restricted by limited resources and the lack of expertise in information and communication technologies, small- and medium-sized enterprises (SMEs) have difficulty in achieving digitalization of their business. In this paper, we propose a reference framework for SMEs to follow as a guideline in the journey of digital transformation. The framework features a three-stage procedure that covers business, technology, and innovation, which can be iterated to drive product and business development. A case study about digital transformation taking place in the vertical plant wall industry is detailed. Furthermore, some solution design principles that are concluded from real industrial practice are presented. This paper reviews the digital transformation practice in the vertical plant wall industry and aims to accelerate the pace of SMEs in the journey of digital transformation.}, } @article {pmid34450717, year = {2021}, author = {Pérez-Pons, ME and Alonso, RS and García, O and Marreiros, G and Corchado, JM}, title = {Deep Q-Learning and Preference Based Multi-Agent System for Sustainable Agricultural Market.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450717}, issn = {1424-8220}, support = {RTC-2017-6536-7//European Regional Development Fund/ ; 0677\_DISRUPTIVE\_2\_E//European Regional Development Fund/ ; }, mesh = {*Agriculture ; *Climate Change ; }, abstract = {Yearly population growth will lead to a significant increase in agricultural production in the coming years. Twenty-first century agricultural producers will be facing the challenge of achieving food security and efficiency. This must be achieved while ensuring sustainable agricultural systems and overcoming the problems posed by climate change, depletion of water resources, and the potential for increased erosion and loss of productivity due to extreme weather conditions. Those environmental consequences will directly affect the price setting process. In view of the price oscillations and the lack of transparent information for buyers, a multi-agent system (MAS) is presented in this article. It supports the making of decisions in the purchase of sustainable agricultural products. The proposed MAS consists of a system that supports decision-making when choosing a supplier on the basis of certain preference-based parameters aimed at measuring the sustainability of a supplier and a deep Q-learning agent for agricultural future market price forecast. Therefore, different agri-environmental indicators (AEIs) have been considered, as well as the use of edge computing technologies to reduce costs of data transfer to the cloud. The presented MAS combines price setting optimizations and user preferences in regards to accessing, filtering, and integrating information. The agents filter and fuse information relevant to a user according to supplier attributes and a dynamic environment. The results presented in this paper allow a user to choose the supplier that best suits their preferences as well as to gain insight on agricultural future markets price oscillations through a deep Q-learning agent.}, } @article {pmid34450715, year = {2021}, author = {Ni, Z and Liu, Y and Karlsson, M and Gong, S}, title = {A Sensing System Based on Public Cloud to Monitor Indoor Environment of Historic Buildings.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {16}, pages = {}, pmid = {34450715}, issn = {1424-8220}, support = {DNR:2019-023737//Energimyndigheten/ ; }, mesh = {*Artificial Intelligence ; *Cloud Computing ; Electrocardiography ; Monitoring, Physiologic ; }, abstract = {Monitoring the indoor environment of historic buildings helps to identify potential risks, provide guidelines for improving regular maintenance, and preserve cultural artifacts. However, most of the existing monitoring systems proposed for historic buildings are not for general digitization purposes that provide data for smart services employing, e.g., artificial intelligence with machine learning. In addition, considering that preserving historic buildings is a long-term process that demands preventive maintenance, a monitoring system requires stable and scalable storage and computing resources. In this paper, a digitalization framework is proposed for smart preservation of historic buildings. A sensing system following the architecture of this framework is implemented by integrating various advanced digitalization techniques, such as Internet of Things, Edge computing, and Cloud computing. The sensing system realizes remote data collection, enables viewing real-time and historical data, and provides the capability for performing real-time analysis to achieve preventive maintenance of historic buildings in future research. Field testing results show that the implemented sensing system has a 2% end-to-end loss rate for collecting data samples and the loss rate can be decreased to 0.3%. The low loss rate indicates that the proposed sensing system has high stability and meets the requirements for long-term monitoring of historic buildings.}, } @article {pmid34445517, year = {2021}, author = {Bussola, N and Papa, B and Melaiu, O and Castellano, A and Fruci, D and Jurman, G}, title = {Quantification of the Immune Content in Neuroblastoma: Deep Learning and Topological Data Analysis in Digital Pathology.}, journal = {International journal of molecular sciences}, volume = {22}, number = {16}, pages = {}, pmid = {34445517}, issn = {1422-0067}, mesh = {Cloud Computing ; Deep Learning ; Female ; Humans ; Image Interpretation, Computer-Assisted/*methods ; Lymphocytes/metabolism ; Male ; Neural Networks, Computer ; Neuroblastoma/diagnostic imaging/*immunology ; }, abstract = {We introduce here a novel machine learning (ML) framework to address the issue of the quantitative assessment of the immune content in neuroblastoma (NB) specimens. First, the EUNet, a U-Net with an EfficientNet encoder, is trained to detect lymphocytes on tissue digital slides stained with the CD3 T-cell marker. The training set consists of 3782 images extracted from an original collection of 54 whole slide images (WSIs), manually annotated for a total of 73,751 lymphocytes. Resampling strategies, data augmentation, and transfer learning approaches are adopted to warrant reproducibility and to reduce the risk of overfitting and selection bias. Topological data analysis (TDA) is then used to define activation maps from different layers of the neural network at different stages of the training process, described by persistence diagrams (PD) and Betti curves. TDA is further integrated with the uniform manifold approximation and projection (UMAP) dimensionality reduction and the hierarchical density-based spatial clustering of applications with noise (HDBSCAN) algorithm for clustering, by the deep features, the relevant subgroups and structures, across different levels of the neural network. Finally, the recent TwoNN approach is leveraged to study the variation of the intrinsic dimensionality of the U-Net model. As the main task, the proposed pipeline is employed to evaluate the density of lymphocytes over the whole tissue area of the WSIs. The model achieves good results with mean absolute error 3.1 on test set, showing significant agreement between densities estimated by our EUNet model and by trained pathologists, thus indicating the potentialities of a promising new strategy in the quantification of the immune content in NB specimens. Moreover, the UMAP algorithm unveiled interesting patterns compatible with pathological characteristics, also highlighting novel insights into the dynamics of the intrinsic dataset dimensionality at different stages of the training process. All the experiments were run on the Microsoft Azure cloud platform.}, } @article {pmid34444132, year = {2021}, author = {Cai, X and Xu, D}, title = {Application of Edge Computing Technology in Hydrological Spatial Analysis and Ecological Planning.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {16}, pages = {}, pmid = {34444132}, issn = {1660-4601}, mesh = {China ; *Ecosystem ; *Hydrology ; Spatial Analysis ; Technology ; Urbanization ; }, abstract = {The process of rapid urbanization causes so many water security issues such as urban waterlogging, environmental water pollution, water shortages, etc. It is, therefore, necessary for us to integrate a variety of theories, methods, measures, and means to conduct ecological problem diagnosis, ecological function demand assessment, and ecological security pattern planning. Here, EC (Edge Computing) technology is applied to analyze the hydrological spatial structure characteristics and ecological planning method of waterfront green space. First, various information is collected and scientifically analyzed around the core element of ecological planning: water. Then, in-depth research is conducted on the previous hydrological spatial analysis methods to identify their defects. Subsequently, given these defects, the EC technology is introduced to design a bottom-up overall architecture of intelligent ecological planning gateway, which can be divided into field devices, EC intelligent planning gateway, transmission system, and cloud processing platform. Finally, the performance of the overall architecture of the intelligent ecological planning gateway is tested. The study aims to optimize the performance of the hydrological spatial analysis method and ecological planning method in Xianglan town of Jiamusi city. The results show that the system can detect the flood control safety system planning, analysis of water source pollution. Additionally, the system also can use the EC technology, depending on the types, hydrological characteristics, pollutants to predict treatment sludge need to put in the pollutant treatment medicament composition and dosage, protection of water source nearby residents public health security. Compared with previous hydrological spatial analysis and ecological planning methods, the system is more scientific, efficient, and expandable. The results provide a technical basis for the research in related fields.}, } @article {pmid34442097, year = {2021}, author = {Spangler, HD and Simancas-Pallares, MA and Ginnis, J and Ferreira Zandoná, AG and Roach, J and Divaris, K}, title = {A Web-Based Rendering Application for Communicating Dental Conditions.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {8}, pages = {}, pmid = {34442097}, issn = {2227-9032}, support = {U01DE025046/DE/NIDCR NIH HHS/United States ; Grover C. Hunter Research Fund//Dental Foundation of North Carolina/ ; Viviana R. Duce Fellowship in Pediatric Dentistry//Dental Foundation of North Carolina/ ; }, abstract = {The importance of visual aids in communicating clinical examination findings or proposed treatments in dentistry cannot be overstated. Similarly, communicating dental research results with tooth surface-level precision is impractical without visual representations. Here, we present the development, deployment, and two real-life applications of a web-based data visualization informatics pipeline that converts tooth surface-level information to colorized, three-dimensional renderings. The core of the informatics pipeline focuses on texture (UV) mapping of a pre-existing model of the human primary dentition. The 88 individually segmented tooth surfaces receive independent inputs that are represented in colors and textures according to customizable user specifications. The web implementation SculptorHD, deployed on the Google Cloud Platform, can accommodate manually entered or spreadsheet-formatted tooth surface data and allows the customization of color palettes and thresholds, as well as surface textures (e.g., condition-free, caries lesions, stainless steel, or ceramic crowns). Its current implementation enabled the visualization and interpretation of clinical early childhood caries (ECC) subtypes using latent class analysis-derived caries experience summary data. As a demonstration of its potential clinical utility, the tool was also used to simulate the restorative treatment presentation of a severe ECC case, including the use of stainless steel and ceramic crowns. We expect that this publicly available web-based tool can aid clinicians and investigators deliver precise, visual presentations of dental conditions and proposed treatments. The creation of rapidly adjustable lifelike dental models, integrated to existing electronic health records and responsive to new clinical findings or planned for future work, is likely to boost two-way communication between clinicians and their patients.}, } @article {pmid34435200, year = {2022}, author = {Lacey, JV and Benbow, JL}, title = {Invited Commentary: Standards, Inputs, and Outputs-Strategies for Improving Data-Sharing and Consortia-Based Epidemiologic Research.}, journal = {American journal of epidemiology}, volume = {191}, number = {1}, pages = {159-162}, doi = {10.1093/aje/kwab217}, pmid = {34435200}, issn = {1476-6256}, mesh = {Epidemiologic Studies ; Humans ; *Information Dissemination ; *Metabolomics ; Reference Standards ; }, abstract = {Data-sharing improves epidemiologic research, but the sharing of data frustrates epidemiologic researchers. The inefficiencies of current methods and options for data-sharing are increasingly documented and easily understood by any study group that has shared its data and any researcher who has received shared data. In this issue of the Journal, Temprosa et al. (Am J Epidemiol. 2021;191(1):147-158) describe how the Consortium of Metabolomics Studies (COMETS) developed and deployed a flexible analytical platform to eliminate key pain points in large-scale metabolomics research. COMETS Analytics includes an online tool, but its cloud computing and technology are the supporting rather than the leading actors in this script. The COMETS team identified the need to standardize diverse and inconsistent metabolomics and covariate data and models across its many participating cohort studies, and then developed a flexible tool that gave its member studies choices about how they wanted to meet the consortium's analytical requirements. Different specialties will have different specific research needs and will probably continue to use and develop an array of diverse analytical and technical solutions for their projects. COMETS Analytics shows how important-and enabling-the upstream attention to data standards and data consistency is to producing high-quality metabolomics, consortia-based, and large-scale epidemiology research.}, } @article {pmid34435101, year = {2021}, author = {Edu, AS and Agoyi, M and Agozie, D}, title = {Digital security vulnerabilities and threats implications for financial institutions deploying digital technology platforms and application: FMEA and FTOPSIS analysis.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e658}, pmid = {34435101}, issn = {2376-5992}, abstract = {Digital disruptions have led to the integration of applications, platforms, and infrastructure. They assist in business operations, promoting open digital collaborations, and perhaps even the integration of the Internet of Things (IoTs), Big Data Analytics, and Cloud Computing to support data sourcing, data analytics, and storage synchronously on a single platform. Notwithstanding the benefits derived from digital technology integration (including IoTs, Big Data Analytics, and Cloud Computing), digital vulnerabilities and threats have become a more significant concern for users. We addressed these challenges from an information systems perspective and have noted that more research is needed identifying potential vulnerabilities and threats affecting the integration of IoTs, BDA and CC for data management. We conducted a step-by-step analysis of the potential vulnerabilities and threats affecting the integration of IoTs, Big Data Analytics, and Cloud Computing for data management. We combined multi-dimensional analysis, Failure Mode Effect Analysis, and Fuzzy Technique for Order of Preference by Similarity for Ideal Solution to evaluate and rank the potential vulnerabilities and threats. We surveyed 234 security experts from the banking industry with adequate knowledge in IoTs, Big Data Analytics, and Cloud Computing. Based on the closeness of the coefficients, we determined that insufficient use of backup electric generators, firewall protection failures, and no information security audits are high-ranking vulnerabilities and threats affecting integration. This study is an extension of discussions on the integration of digital applications and platforms for data management and the pervasive vulnerabilities and threats arising from that. A detailed review and classification of these threats and vulnerabilities are vital for sustaining businesses' digital integration.}, } @article {pmid34432855, year = {2021}, author = {Mohd Romlay, MR and Mohd Ibrahim, A and Toha, SF and De Wilde, P and Venkat, I}, title = {Novel CE-CBCE feature extraction method for object classification using a low-density LiDAR point cloud.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0256665}, pmid = {34432855}, issn = {1932-6203}, mesh = {*Algorithms ; Cluster Analysis ; Humans ; *Lasers ; Robotics ; Software ; }, abstract = {Low-end LiDAR sensor provides an alternative for depth measurement and object recognition for lightweight devices. However due to low computing capacity, complicated algorithms are incompatible to be performed on the device, with sparse information further limits the feature available for extraction. Therefore, a classification method which could receive sparse input, while providing ample leverage for the classification process to accurately differentiate objects within limited computing capability is required. To achieve reliable feature extraction from a sparse LiDAR point cloud, this paper proposes a novel Clustered Extraction and Centroid Based Clustered Extraction Method (CE-CBCE) method for feature extraction followed by a convolutional neural network (CNN) object classifier. The integration of the CE-CBCE and CNN methods enable us to utilize lightweight actuated LiDAR input and provides low computing means of classification while maintaining accurate detection. Based on genuine LiDAR data, the final result shows reliable accuracy of 97% through the method proposed.}, } @article {pmid34430081, year = {2021}, author = {Zhao, J and Yu, L and Liu, H and Huang, H and Wang, J and Gong, P}, title = {Towards an open and synergistic framework for mapping global land cover.}, journal = {PeerJ}, volume = {9}, number = {}, pages = {e11877}, pmid = {34430081}, issn = {2167-8359}, abstract = {Global land-cover datasets are key sources of information for understanding the complex inter-actions between human activities and global change. They are also among the most critical variables for climate change studies. Over time, the spatial resolution of land cover maps has increased from the kilometer scale to 10-m scale. Single-type historical land cover datasets, including for forests, water, and impervious surfaces, have also been developed in recent years. In this study, we present an open and synergy framework to produce a global land cover dataset that combines supervised land cover classification and aggregation of existing multiple thematic land cover maps with the Google Earth Engine (GEE) cloud computing platform. On the basis of this method of classification and mosaicking, we derived a global land cover dataset for 6 years over a time span of 25 years. The overall accuracies of the six maps were around 75% and the accuracy for change area detection was over 70%. Our product also showed good similarity with the FAO and existing land cover maps.}, } @article {pmid34425749, year = {2021}, author = {Reddy, S and Hung, LH and Sala-Torra, O and Radich, JP and Yeung, CC and Yeung, KY}, title = {A graphical, interactive and GPU-enabled workflow to process long-read sequencing data.}, journal = {BMC genomics}, volume = {22}, number = {1}, pages = {626}, pmid = {34425749}, issn = {1471-2164}, support = {UG1 CA233338/NH/NIH HHS/United States ; R01 GM126019/GM/NIGMS NIH HHS/United States ; Hyundai Hope on Wheel Scholars Hope Grant//Hyuandai/ ; Young Investigator Award//Natioonal Comprehensive Cancer Network/ ; R01 CA175008/NH/NIH HHS/United States ; R01GM126019/NH/NIH HHS/United States ; }, mesh = {*Computational Biology ; Reproducibility of Results ; Sequence Analysis ; *Software ; Workflow ; }, abstract = {BACKGROUND: Long-read sequencing has great promise in enabling portable, rapid molecular-assisted cancer diagnoses. A key challenge in democratizing long-read sequencing technology in the biomedical and clinical community is the lack of graphical bioinformatics software tools which can efficiently process the raw nanopore reads, support graphical output and interactive visualizations for interpretations of results. Another obstacle is that high performance software tools for long-read sequencing data analyses often leverage graphics processing units (GPU), which is challenging and time-consuming to configure, especially on the cloud.

RESULTS: We present a graphical cloud-enabled workflow for fast, interactive analysis of nanopore sequencing data using GPUs. Users customize parameters, monitor execution and visualize results through an accessible graphical interface. The workflow and its components are completely containerized to ensure reproducibility and facilitate installation of the GPU-enabled software. We also provide an Amazon Machine Image (AMI) with all software and drivers pre-installed for GPU computing on the cloud. Most importantly, we demonstrate the potential of applying our software tools to reduce the turnaround time of cancer diagnostics by generating blood cancer (NB4, K562, ME1, 238 MV4;11) cell line Nanopore data using the Flongle adapter. We observe a 29x speedup and a 93x reduction in costs for the rate-limiting basecalling step in the analysis of blood cancer cell line data.

CONCLUSIONS: Our interactive and efficient software tools will make analyses of Nanopore data using GPU and cloud computing accessible to biomedical and clinical scientists, thus facilitating the adoption of cost effective, fast, portable and real-time long-read sequencing.}, } @article {pmid34424931, year = {2021}, author = {Zhao, Y and Sazlina, SG and Rokhani, FZ and Su, J and Chew, BH}, title = {The expectations and acceptability of a smart nursing home model among Chinese elderly people: A mixed methods study protocol.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0255865}, pmid = {34424931}, issn = {1932-6203}, mesh = {Aged ; China ; Family/psychology ; Female ; Humans ; Interviews as Topic ; Male ; Middle Aged ; *Models, Nursing ; *Nursing Homes/standards ; Surveys and Questionnaires ; }, abstract = {Nursing homes integrated with smart information such as the Internet of Things, cloud computing, artificial intelligence, and digital health could improve not only the quality of care but also benefit the residents and health professionals by providing effective care and efficient medical services. However, a clear concept of a smart nursing home, the expectations and acceptability from the perspectives of the elderly people and their family members are still unclear. In addition, instruments to measure the expectations and acceptability of a smart nursing home are also lacking. The study aims to explore and determine the levels of these expectations, acceptability and the associated sociodemographic factors. This exploratory sequential mixed methods study comprises a qualitative study which will be conducted through a semi-structured interview to explore the expectations and acceptability of a smart nursing home among Chinese elderly people and their family members (Phase I). Next, a questionnaire will be developed and validated based on the results of a qualitative study in Phase I and a preceding scoping review on smart nursing homes by the same authors (Phase II). Lastly, a nationwide survey will be carried out to examine the levels of expectations and acceptability, and the associated sociodemographic factors with the different categories of expectations and acceptability (Phase III). With a better understanding of the Chinese elderly people's expectations and acceptability of smart technologies in nursing homes, a feasible smart nursing home model that incorporates appropriate technologies, integrates needed medical services and business concepts could be formulated and tested as a solution for the rapidly ageing societies in many developed and developing countries.}, } @article {pmid34416827, year = {2021}, author = {Tahmasebi, A and Qu, E and Sevrukov, A and Liu, JB and Wang, S and Lyshchik, A and Yu, J and Eisenbrey, JR}, title = {Assessment of Axillary Lymph Nodes for Metastasis on Ultrasound Using Artificial Intelligence.}, journal = {Ultrasonic imaging}, volume = {43}, number = {6}, pages = {329-336}, doi = {10.1177/01617346211035315}, pmid = {34416827}, issn = {1096-0910}, mesh = {*Artificial Intelligence ; Axilla ; *Breast Neoplasms/diagnostic imaging ; Female ; Humans ; Lymph Nodes/diagnostic imaging ; Lymphatic Metastasis ; Sensitivity and Specificity ; }, abstract = {The purpose of this study was to evaluate an artificial intelligence (AI) system for the classification of axillary lymph nodes on ultrasound compared to radiologists. Ultrasound images of 317 axillary lymph nodes from patients referred for ultrasound guided fine needle aspiration or core needle biopsy and corresponding pathology findings were collected. Lymph nodes were classified into benign and malignant groups with histopathological result serving as the reference. Google Cloud AutoML Vision (Mountain View, CA) was used for AI image classification. Three experienced radiologists also classified the images and gave a level of suspicion score (1-5). To test the accuracy of AI, an external testing dataset of 64 images from 64 independent patients was evaluated by three AI models and the three readers. The diagnostic performance of AI and the humans were then quantified using receiver operating characteristics curves. In the complete set of 317 images, AutoML achieved a sensitivity of 77.1%, positive predictive value (PPV) of 77.1%, and an area under the precision recall curve of 0.78, while the three radiologists showed a sensitivity of 87.8% ± 8.5%, specificity of 50.3% ± 16.4%, PPV of 61.1% ± 5.4%, negative predictive value (NPV) of 84.1% ± 6.6%, and accuracy of 67.7% ± 5.7%. In the three external independent test sets, AI and human readers achieved sensitivity of 74.0% ± 0.14% versus 89.9% ± 0.06% (p = .25), specificity of 64.4% ± 0.11% versus 50.1 ± 0.20% (p = .22), PPV of 68.3% ± 0.04% versus 65.4 ± 0.07% (p = .50), NPV of 72.6% ± 0.11% versus 82.1% ± 0.08% (p = .33), and accuracy of 69.5% ± 0.06% versus 70.1% ± 0.07% (p = .90), respectively. These preliminary results indicate AI has comparable performance to trained radiologists and could be used to predict the presence of metastasis in ultrasound images of axillary lymph nodes.}, } @article {pmid34411131, year = {2021}, author = {Khashan, E and Eldesouky, A and Elghamrawy, S}, title = {An adaptive spark-based framework for querying large-scale NoSQL and relational databases.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0255562}, pmid = {34411131}, issn = {1932-6203}, mesh = {*Algorithms ; Cloud Computing/*statistics & numerical data ; Data Management/*methods ; Database Management Systems/*standards ; *Databases, Factual ; Information Storage and Retrieval/*statistics & numerical data ; *Software ; }, abstract = {The growing popularity of big data analysis and cloud computing has created new big data management standards. Sometimes, programmers may interact with a number of heterogeneous data stores depending on the information they are responsible for: SQL and NoSQL data stores. Interacting with heterogeneous data models via numerous APIs and query languages imposes challenging tasks on multi-data processing developers. Indeed, complex queries concerning homogenous data structures cannot currently be performed in a declarative manner when found in single data storage applications and therefore require additional development efforts. Many models were presented in order to address complex queries Via multistore applications. Some of these models implemented a complex unified and fast model, while others' efficiency is not good enough to solve this type of complex database queries. This paper provides an automated, fast and easy unified architecture to solve simple and complex SQL and NoSQL queries over heterogeneous data stores (CQNS). This proposed framework can be used in cloud environments or for any big data application to automatically help developers to manage basic and complicated database queries. CQNS consists of three layers: matching selector layer, processing layer, and query execution layer. The matching selector layer is the heart of this architecture in which five of the user queries are examined if they are matched with another five queries stored in a single engine stored in the architecture library. This is achieved through a proposed algorithm that directs the query to the right SQL or NoSQL database engine. Furthermore, CQNS deal with many NoSQL Databases like MongoDB, Cassandra, Riak, CouchDB, and NOE4J databases. This paper presents a spark framework that can handle both SQL and NoSQL Databases. Four scenarios' benchmarks datasets are used to evaluate the proposed CQNS for querying different NoSQL Databases in terms of optimization process performance and query execution time. The results show that, the CQNS achieves best latency and throughput in less time among the compared systems.}, } @article {pmid34409117, year = {2021}, author = {Miao, Y and Hao, Y and Chen, M and Gharavi, H and Hwang, K}, title = {Intelligent Task Caching in Edge Cloud via Bandit Learning.}, journal = {IEEE transactions on network science and engineering}, volume = {8}, number = {1}, pages = {}, doi = {10.1109/tnse.2020.3047417}, pmid = {34409117}, issn = {2327-4697}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {Task caching, based on edge cloud, aims to meet the latency requirements of computation-intensive and data-intensive tasks (such as augmented reality). However, current task caching strategies are generally based on the unrealistic assumption of knowing the pattern of user task requests and ignoring the fact that a task request pattern is more user specific (e.g., the mobility and personalized task demand). Moreover, it disregards the impact of task size and computing amount on the caching strategy. To investigate these issues, in this paper, we first formalize the task caching problem as a non-linear integer programming problem to minimize task latency. We then design a novel intelligent task caching algorithm based on a multiarmed bandit algorithm, called M-adaptive upper confidence bound (M-AUCB). The proposed caching strategy cannot only learn the task patterns of mobile device requests online, but can also dynamically adjust the caching strategy to incorporate the size and computing amount of each task. Moreover, we prove that the M-AUCB algorithm achieves a sublinear regret bound. The results show that, compared with other task caching schemes, the M-AUCB algorithm reduces the average task latency by at least 14.8%.}, } @article {pmid34407387, year = {2021}, author = {Fox, CB and Israelsen-Augenstein, M and Jones, S and Gillam, SL}, title = {An Evaluation of Expedited Transcription Methods for School-Age Children's Narrative Language: Automatic Speech Recognition and Real-Time Transcription.}, journal = {Journal of speech, language, and hearing research : JSLHR}, volume = {64}, number = {9}, pages = {3533-3548}, doi = {10.1044/2021_JSLHR-21-00096}, pmid = {34407387}, issn = {1558-9102}, mesh = {Child ; Humans ; Reproducibility of Results ; Schools ; Speech ; *Speech Perception ; *Speech-Language Pathology/education ; }, abstract = {Purpose This study examined the accuracy and potential clinical utility of two expedited transcription methods for narrative language samples elicited from school-age children (7;5-11;10 [years;months]) with developmental language disorder. Transcription methods included real-time transcription produced by speech-language pathologists (SLPs) and trained transcribers (TTs) as well as Google Cloud Speech automatic speech recognition. Method The accuracy of each transcription method was evaluated against a gold-standard reference corpus. Clinical utility was examined by determining the reliability of scores calculated from the transcripts produced by each method on several language sample analysis (LSA) measures. Participants included seven certified SLPs and seven TTs. Each participant was asked to produce a set of six transcripts in real time, out of a total 42 language samples. The same 42 samples were transcribed using Google Cloud Speech. Transcription accuracy was evaluated through word error rate. Reliability of LSA scores was determined using correlation analysis. Results Results indicated that Google Cloud Speech was significantly more accurate than real-time transcription in transcribing narrative samples and was not impacted by speech rate of the narrator. In contrast, SLP and TT transcription accuracy decreased as a function of increasing speech rate. LSA metrics generated from Google Cloud Speech transcripts were also more reliably calculated. Conclusions Automatic speech recognition showed greater accuracy and clinical utility as an expedited transcription method than real-time transcription. Though there is room for improvement in the accuracy of speech recognition for the purpose of clinical transcription, it produced highly reliable scores on several commonly used LSA metrics. Supplemental Material https://doi.org/10.23641/asha.15167355.}, } @article {pmid34407145, year = {2021}, author = {Edwards, T and Jones, CB and Perkins, SE and Corcoran, P}, title = {Passive citizen science: The role of social media in wildlife observations.}, journal = {PloS one}, volume = {16}, number = {8}, pages = {e0255416}, pmid = {34407145}, issn = {1932-6203}, mesh = {Animals ; *Animals, Wild ; Biodiversity ; *Citizen Science ; Social Media ; }, abstract = {Citizen science plays an important role in observing the natural environment. While conventional citizen science consists of organized campaigns to observe a particular phenomenon or species there are also many ad hoc observations of the environment in social media. These data constitute a valuable resource for 'passive citizen science'-the use of social media that are unconnected to any particular citizen science program, but represent an untapped dataset of ecological value. We explore the value of passive citizen science, by evaluating species distributions using the photo sharing site Flickr. The data are evaluated relative to those submitted to the National Biodiversity Network (NBN) Atlas, the largest collection of species distribution data in the UK. Our study focuses on the 1500 best represented species on NBN, and common invasive species within UK, and compares the spatial and temporal distribution with NBN data. We also introduce an innovative image verification technique that uses the Google Cloud Vision API in combination with species taxonomic data to determine the likelihood that a mention of a species on Flickr represents a given species. The spatial and temporal analyses for our case studies suggest that the Flickr dataset best reflects the NBN dataset when considering a purely spatial distribution with no time constraints. The best represented species on Flickr in comparison to NBN are diurnal garden birds as around 70% of the Flickr posts for them are valid observations relative to the NBN. Passive citizen science could offer a rich source of observation data for certain taxonomic groups, and/or as a repository for dedicated projects. Our novel method of validating Flickr records is suited to verifying more extensive collections, including less well-known species, and when used in combination with citizen science projects could offer a platform for accurate identification of species and their location.}, } @article {pmid34403339, year = {2021}, author = {Lv, C and Lin, W and Zhao, B}, title = {Approximate Intrinsic Voxel Structure for Point Cloud Simplification.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {30}, number = {}, pages = {7241-7255}, doi = {10.1109/TIP.2021.3104174}, pmid = {34403339}, issn = {1941-0042}, abstract = {A point cloud as an information-intensive 3D representation usually requires a large amount of transmission, storage and computing resources, which seriously hinder its usage in many emerging fields. In this paper, we propose a novel point cloud simplification method, Approximate Intrinsic Voxel Structure (AIVS), to meet the diverse demands in real-world application scenarios. The method includes point cloud pre-processing (denoising and down-sampling), AIVS-based realization for isotropic simplification and flexible simplification with intrinsic control of point distance. To demonstrate the effectiveness of the proposed AIVS-based method, we conducted extensive experiments by comparing it with several relevant point cloud simplification methods on three public datasets, including Stanford, SHREC, and RGB-D scene models. The experimental results indicate that AIVS has great advantages over peers in terms of moving least squares (MLS) surface approximation quality, curvature-sensitive sampling, sharp-feature keeping and processing speed. The source code of the proposed method is publicly available. (https://github.com/vvvwo/AIVS-project).}, } @article {pmid34401476, year = {2021}, author = {Markus, A and Biro, M and Kecskemeti, G and Kertesz, A}, title = {Actuator behaviour modelling in IoT-Fog-Cloud simulation.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e651}, pmid = {34401476}, issn = {2376-5992}, abstract = {The inevitable evolution of information technology has led to the creation of IoT-Fog-Cloud systems, which combine the Internet of Things (IoT), Cloud Computing and Fog Computing. IoT systems are composed of possibly up to billions of smart devices, sensors and actuators connected through the Internet, and these components continuously generate large amounts of data. Cloud and fog services assist the data processing and storage needs of IoT devices. The behaviour of these devices can change dynamically (e.g. properties of data generation or device states). We refer to systems allowing behavioural changes in physical position (i.e. geolocation), as the Internet of Mobile Things (IoMT). The investigation and detailed analysis of such complex systems can be fostered by simulation solutions. The currently available, related simulation tools are lacking a generic actuator model including mobility management. In this paper, we present an extension of the DISSECT-CF-Fog simulator to support the analysis of arbitrary actuator events and mobility capabilities of IoT devices in IoT-Fog-Cloud systems. The main contributions of our work are: (i) a generic actuator model and its implementation in DISSECT-CF-Fog, and (ii) the evaluation of its use through logistics and healthcare scenarios. Our results show that we can successfully model IoMT systems and behavioural changes of actuators in IoT-Fog-Cloud systems in general, and analyse their management issues in terms of usage cost and execution time.}, } @article {pmid34401472, year = {2021}, author = {M, VK and Venkatachalam, K and P, P and Almutairi, A and Abouhawwash, M}, title = {Secure biometric authentication with de-duplication on distributed cloud storage.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e569}, pmid = {34401472}, issn = {2376-5992}, abstract = {Cloud computing is one of the evolving fields of technology, which allows storage, access of data, programs, and their execution over the internet with offering a variety of information related services. With cloud information services, it is essential for information to be saved securely and to be distributed safely across numerous users. Cloud information storage has suffered from issues related to information integrity, data security, and information access by unauthenticated users. The distribution and storage of data among several users are highly scalable and cost-efficient but results in data redundancy and security issues. In this article, a biometric authentication scheme is proposed for the requested users to give access permission in a cloud-distributed environment and, at the same time, alleviate data redundancy. To achieve this, a cryptographic technique is used by service providers to generate the bio-key for authentication, which will be accessible only to authenticated users. A Gabor filter with distributed security and encryption using XOR operations is used to generate the proposed bio-key (biometric generated key) and avoid data deduplication in the cloud, ensuring avoidance of data redundancy and security. The proposed method is compared with existing algorithms, such as convergent encryption (CE), leakage resilient (LR), randomized convergent encryption (RCE), secure de-duplication scheme (SDS), to evaluate the de-duplication performance. Our comparative analysis shows that our proposed scheme results in smaller computation and communication costs than existing schemes.}, } @article {pmid34398234, year = {2021}, author = {Bloom, JD}, title = {Recovery of Deleted Deep Sequencing Data Sheds More Light on the Early Wuhan SARS-CoV-2 Epidemic.}, journal = {Molecular biology and evolution}, volume = {38}, number = {12}, pages = {5211-5224}, pmid = {34398234}, issn = {1537-1719}, support = {S10 OD028685/OD/NIH HHS/United States ; S10OD028685/GF/NIH HHS/United States ; /HHMI/Howard Hughes Medical Institute/United States ; }, mesh = {*COVID-19/virology ; China ; *High-Throughput Nucleotide Sequencing ; Humans ; Phylogeny ; *SARS-CoV-2/genetics ; }, abstract = {The origin and early spread of SARS-CoV-2 remains shrouded in mystery. Here, I identify a data set containing SARS-CoV-2 sequences from early in the Wuhan epidemic that has been deleted from the NIH's Sequence Read Archive. I recover the deleted files from the Google Cloud and reconstruct partial sequences of 13 early epidemic viruses. Phylogenetic analysis of these sequences in the context of carefully annotated existing data further supports the idea that the Huanan Seafood Market sequences are not fully representative of the viruses in Wuhan early in the epidemic. Instead, the progenitor of currently known SARS-CoV-2 sequences likely contained three mutations relative to the market viruses that made it more similar to SARS-CoV-2's bat coronavirus relatives.}, } @article {pmid34395534, year = {2021}, author = {Honorato, RV and Koukos, PI and Jiménez-García, B and Tsaregorodtsev, A and Verlato, M and Giachetti, A and Rosato, A and Bonvin, AMJJ}, title = {Structural Biology in the Clouds: The WeNMR-EOSC Ecosystem.}, journal = {Frontiers in molecular biosciences}, volume = {8}, number = {}, pages = {729513}, pmid = {34395534}, issn = {2296-889X}, abstract = {Structural biology aims at characterizing the structural and dynamic properties of biological macromolecules at atomic details. Gaining insight into three dimensional structures of biomolecules and their interactions is critical for understanding the vast majority of cellular processes, with direct applications in health and food sciences. Since 2010, the WeNMR project (www.wenmr.eu) has implemented numerous web-based services to facilitate the use of advanced computational tools by researchers in the field, using the high throughput computing infrastructure provided by EGI. These services have been further developed in subsequent initiatives under H2020 projects and are now operating as Thematic Services in the European Open Science Cloud portal (www.eosc-portal.eu), sending >12 millions of jobs and using around 4,000 CPU-years per year. Here we review 10 years of successful e-infrastructure solutions serving a large worldwide community of over 23,000 users to date, providing them with user-friendly, web-based solutions that run complex workflows in structural biology. The current set of active WeNMR portals are described, together with the complex backend machinery that allows distributed computing resources to be harvested efficiently.}, } @article {pmid34393952, year = {2021}, author = {Aguirre Montero, A and López-Sánchez, JA}, title = {Intersection of Data Science and Smart Destinations: A Systematic Review.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {712610}, pmid = {34393952}, issn = {1664-1078}, abstract = {This systematic review adopts a formal and structured approach to review the intersection of data science and smart tourism destinations in terms of components found in previous research. The study period corresponds to 1995-2021 focusing the analysis mainly on the last years (2015-2021), identifying and characterizing the current trends on this research topic. The review comprises documentary research based on bibliometric and conceptual analysis, using the VOSviewer and SciMAT software to analyze articles from the Web of Science database. There is growing interest in this research topic, with more than 300 articles published annually. Data science technologies on which current smart destinations research is based include big data, smart data, data analytics, social media, cloud computing, the internet of things (IoT), smart card data, geographic information system (GIS) technologies, open data, artificial intelligence, and machine learning. Critical research areas for data science techniques and technologies in smart destinations are public tourism marketing, mobility-accessibility, and sustainability. Data analysis techniques and technologies face unprecedented challenges and opportunities post-coronavirus disease-2019 (COVID-19) to build on the huge amount of data and a new tourism model that is more sustainable, smarter, and safer than those previously implemented.}, } @article {pmid34393357, year = {2020}, author = {Nour, B and Mastorakis, S and Mtibaa, A}, title = {Compute-Less Networking: Perspectives, Challenges, and Opportunities.}, journal = {IEEE network}, volume = {34}, number = {6}, pages = {259-265}, doi = {10.1109/mnet.011.2000180}, pmid = {34393357}, issn = {0890-8044}, support = {P20 GM109090/GM/NIGMS NIH HHS/United States ; }, abstract = {Delay-sensitive applications have been driving the move away from cloud computing, which cannot meet their low-latency requirements. Edge computing and programmable switches have been among the first steps toward pushing computation closer to end-users in order to reduce cost, latency, and overall resource utilization. This article presents the "compute-less" paradigm, which builds on top of the well known edge computing paradigm through a set of communication and computation optimization mechanisms (e.g.,, in-network computing, task clustering and aggregation, computation reuse). The main objective of the compute-less paradigm is to reduce the migration of computation and the usage of network and computing resources, while maintaining high Quality of Experience for end-users. We discuss the new perspectives, challenges, limitations, and opportunities of this compute-less paradigm.}, } @article {pmid34389135, year = {2021}, author = {Szamosfalvi, B and Heung, M and Yessayan, L}, title = {Technology Innovations in Continuous Kidney Replacement Therapy: The Clinician's Perspective.}, journal = {Advances in chronic kidney disease}, volume = {28}, number = {1}, pages = {3-12}, doi = {10.1053/j.ackd.2021.03.021}, pmid = {34389135}, issn = {1548-5609}, mesh = {*Acute Kidney Injury ; Anticoagulants ; *Continuous Renal Replacement Therapy ; Humans ; Intensive Care Units ; Renal Dialysis ; Technology ; }, abstract = {Continuous kidney replacement therapy (CKRT) has improved remarkably since its first implementation as continuous arteriovenous hemofiltration in the 1970s. However, when looking at the latest generation of CKRT machines, one could argue that clinical deployment of breakthrough innovations by device manufacturers has slowed in the last decade. Simultaneously, there has been a steady accumulation of clinical knowledge using CKRT as well as a multitude of therapeutic and diagnostic innovations in the dialysis and broader intensive care unit technology fields adaptable to CKRT. These include multiple different anticlotting measures; cloud-computing for optimized treatment prescribing and delivered therapy data collection and analysis; novel blood purification techniques aimed at improving the severe multiorgan dysfunction syndrome; and real-time sensing of blood and/or filter effluent composition. The authors present a view of how CKRT devices and programs could be reimagined incorporating these innovations to achieve specific measurable clinical outcomes with personalized care and improved simplicity, safety, and efficacy of CKRT therapy.}, } @article {pmid34383582, year = {2021}, author = {Ronquillo, JG and Lester, WT}, title = {Practical Aspects of Implementing and Applying Health Care Cloud Computing Services and Informatics to Cancer Clinical Trial Data.}, journal = {JCO clinical cancer informatics}, volume = {5}, number = {}, pages = {826-832}, pmid = {34383582}, issn = {2473-4276}, mesh = {*Cloud Computing ; Delivery of Health Care ; Ecosystem ; Humans ; Informatics ; *Neoplasms/diagnosis/epidemiology/therapy ; Precision Medicine ; }, abstract = {PURPOSE: Cloud computing has led to dramatic growth in the volume, variety, and velocity of cancer data. However, cloud platforms and services present new challenges for cancer research, particularly in understanding the practical tradeoffs between cloud performance, cost, and complexity. The goal of this study was to describe the practical challenges when using a cloud-based service to improve the cancer clinical trial matching process.

METHODS: We collected information for all interventional cancer clinical trials from ClinicalTrials.gov and used the Google Cloud Healthcare Natural Language Application Programming Interface (API) to analyze clinical trial Title and Eligibility Criteria text. An informatics pipeline leveraging interoperability standards summarized the distribution of cancer clinical trials, genes, laboratory tests, and medications extracted from cloud-based entity analysis.

RESULTS: There were a total of 38,851 cancer-related clinical trials found in this study, with the distribution of cancer categories extracted from Title text significantly different than in ClinicalTrials.gov (P < .001). Cloud-based entity analysis of clinical trial criteria identified a total of 949 genes, 1,782 laboratory tests, 2,086 medications, and 4,902 National Cancer Institute Thesaurus terms, with estimated detection accuracies ranging from 12.8% to 89.9%. A total of 77,702 API calls processed an estimated 167,179 text records, which took a total of 1,979 processing-minutes (33.0 processing-hours), or approximately 1.5 seconds per API call.

CONCLUSION: Current general-purpose cloud health care tools-like the Google service in this study-should not be used for automated clinical trial matching unless they can perform effective extraction and classification of the clinical, genetic, and medication concepts central to precision oncology research. A strong understanding of the practical aspects of cloud computing will help researchers effectively navigate the vast data ecosystems in cancer research.}, } @article {pmid34380380, year = {2021}, author = {Paul, G and Abele, ND and Kluth, K}, title = {A Review and Qualitative Meta-Analysis of Digital Human Modeling and Cyber-Physical-Systems in Ergonomics 4.0.}, journal = {IISE transactions on occupational ergonomics and human factors}, volume = {9}, number = {3-4}, pages = {111-123}, pmid = {34380380}, issn = {2472-5846}, mesh = {*Ergonomics ; Humans ; *Industry ; }, abstract = {Occupational ApplicationsFounded in an empirical case study and theoretical work, this paper reviews the scientific literature to define the role of Digital Human Modeling (DHM), Digital Twin (DT), and Cyber-Physical Systems (CPS) to inform the emerging concept of Ergonomics 4.0. We find that DHM evolved into DT is a core element in Ergonomics 4.0. A solid understanding and agreement on the nature of Ergonomics 4.0 is essential for the inclusion of ergonomic values and considerations in the larger conceptual framework of Industry 4.0. In this context, we invite Ergonomists from various disciplines to broaden their understanding and application of DHM and DT.}, } @article {pmid34376975, year = {2021}, author = {Koppad, S and B, A and Gkoutos, GV and Acharjee, A}, title = {Cloud Computing Enabled Big Multi-Omics Data Analytics.}, journal = {Bioinformatics and biology insights}, volume = {15}, number = {}, pages = {11779322211035921}, pmid = {34376975}, issn = {1177-9322}, abstract = {High-throughput experiments enable researchers to explore complex multifactorial diseases through large-scale analysis of omics data. Challenges for such high-dimensional data sets include storage, analyses, and sharing. Recent innovations in computational technologies and approaches, especially in cloud computing, offer a promising, low-cost, and highly flexible solution in the bioinformatics domain. Cloud computing is rapidly proving increasingly useful in molecular modeling, omics data analytics (eg, RNA sequencing, metabolomics, or proteomics data sets), and for the integration, analysis, and interpretation of phenotypic data. We review the adoption of advanced cloud-based and big data technologies for processing and analyzing omics data and provide insights into state-of-the-art cloud bioinformatics applications.}, } @article {pmid34372809, year = {2021}, author = {Chaudhuri, S and Han, H and Monaghan, C and Larkin, J and Waguespack, P and Shulman, B and Kuang, Z and Bellamkonda, S and Brzozowski, J and Hymes, J and Black, M and Kotanko, P and Kooman, JP and Maddux, FW and Usvyat, L}, title = {Real-time prediction of intradialytic relative blood volume: a proof-of-concept for integrated cloud computing infrastructure.}, journal = {BMC nephrology}, volume = {22}, number = {1}, pages = {274}, pmid = {34372809}, issn = {1471-2369}, mesh = {Blood Volume/*physiology ; *Body Fluid Compartments ; Cloud Computing ; Early Diagnosis ; Female ; Humans ; *Hypotension/diagnosis/etiology/prevention & control ; *Kidney Failure, Chronic/physiopathology/therapy ; *Machine Learning ; Male ; Middle Aged ; *Muscle Cramp/diagnosis/etiology/prevention & control ; Prognosis ; Proof of Concept Study ; *Renal Dialysis/adverse effects/methods ; *Vomiting/diagnosis/etiology/prevention & control ; }, abstract = {BACKGROUND: Inadequate refilling from extravascular compartments during hemodialysis can lead to intradialytic symptoms, such as hypotension, nausea, vomiting, and cramping/myalgia. Relative blood volume (RBV) plays an important role in adapting the ultrafiltration rate which in turn has a positive effect on intradialytic symptoms. It has been clinically challenging to identify changes RBV in real time to proactively intervene and reduce potential negative consequences of volume depletion. Leveraging advanced technologies to process large volumes of dialysis and machine data in real time and developing prediction models using machine learning (ML) is critical in identifying these signals.

METHOD: We conducted a proof-of-concept analysis to retrospectively assess near real-time dialysis treatment data from in-center patients in six clinics using Optical Sensing Device (OSD), during December 2018 to August 2019. The goal of this analysis was to use real-time OSD data to predict if a patient's relative blood volume (RBV) decreases at a rate of at least - 6.5 % per hour within the next 15 min during a dialysis treatment, based on 10-second windows of data in the previous 15 min. A dashboard application was constructed to demonstrate how reporting structures may be developed to alert clinicians in real time of at-risk cases. Data was derived from three sources: (1) OSDs, (2) hemodialysis machines, and (3) patient electronic health records.

RESULTS: Treatment data from 616 in-center dialysis patients in the six clinics was curated into a big data store and fed into a Machine Learning (ML) model developed and deployed within the cloud. The threshold for classifying observations as positive or negative was set at 0.08. Precision for the model at this threshold was 0.33 and recall was 0.94. The area under the receiver operating curve (AUROC) for the ML model was 0.89 using test data.

CONCLUSIONS: The findings from our proof-of concept analysis demonstrate the design of a cloud-based framework that can be used for making real-time predictions of events during dialysis treatments. Making real-time predictions has the potential to assist clinicians at the point of care during hemodialysis.}, } @article {pmid34372471, year = {2021}, author = {Ismail, L and Materwala, H}, title = {ESCOVE: Energy-SLA-Aware Edge-Cloud Computation Offloading in Vehicular Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {15}, pages = {}, pmid = {34372471}, issn = {1424-8220}, support = {31R215//National Water and Energy Center, United Arab Emirates University, United Arab Emirates/ ; }, abstract = {The vehicular network is an emerging technology in the Intelligent Smart Transportation era. The network provides mechanisms for running different applications, such as accident prevention, publishing and consuming services, and traffic flow management. In such scenarios, edge and cloud computing come into the picture to offload computation from vehicles that have limited processing capabilities. Optimizing the energy consumption of the edge and cloud servers becomes crucial. However, existing research efforts focus on either vehicle or edge energy optimization, and do not account for vehicular applications' quality of services. In this paper, we address this void by proposing a novel offloading algorithm, ESCOVE, which optimizes the energy of the edge-cloud computing platform. The proposed algorithm respects the Service level agreement (SLA) in terms of latency, processing and total execution times. The experimental results show that ESCOVE is a promising approach in energy savings while preserving SLAs compared to the state-of-the-art approach.}, } @article {pmid34370407, year = {2021}, author = {Gahm, NA and Rueden, CT and Evans, EL and Selzer, G and Hiner, MC and Chacko, JV and Gao, D and Sherer, NM and Eliceiri, KW}, title = {New Extensibility and Scripting Tools in the ImageJ Ecosystem.}, journal = {Current protocols}, volume = {1}, number = {8}, pages = {e204}, pmid = {34370407}, issn = {2691-1299}, support = {P41 GM135019/GM/NIGMS NIH HHS/United States ; T15 LM007359/LM/NLM NIH HHS/United States ; T32 CA009135/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; *Ecosystem ; Humans ; *Image Processing, Computer-Assisted ; Microscopy, Fluorescence ; Software ; }, abstract = {ImageJ provides a framework for image processing across scientific domains while being fully open source. Over the years ImageJ has been substantially extended to support novel applications in scientific imaging as they emerge, particularly in the area of biological microscopy, with functionality made more accessible via the Fiji distribution of ImageJ. Within this software ecosystem, work has been done to extend the accessibility of ImageJ to utilize scripting, macros, and plugins in a variety of programming scenarios, e.g., from Groovy and Python and in Jupyter notebooks and cloud computing. We provide five protocols that demonstrate the extensibility of ImageJ for various workflows in image processing. We focus first on Fluorescence Lifetime Imaging Microscopy (FLIM) data, since this requires significant processing to provide quantitative insights into the microenvironments of cells. Second, we show how ImageJ can now be utilized for common image processing techniques, specifically image deconvolution and inversion, while highlighting the new, built-in features of ImageJ-particularly its capacity to run completely headless and the Ops matching feature that selects the optimal algorithm for a given function and data input, thereby enabling processing speedup. Collectively, these protocols can be used as a basis for automating biological image processing workflows. © 2021 Wiley Periodicals LLC. Basic Protocol 1: Using PyImageJ for FLIM data processing Alternate Protocol: Groovy FLIMJ in Jupyter Notebooks Basic Protocol 2: Using ImageJ Ops for image deconvolution Support Protocol 1: Using ImageJ Ops matching feature for image inversion Support Protocol 2: Headless ImageJ deconvolution.}, } @article {pmid34366566, year = {2022}, author = {Su, P and Chen, Y and Lu, M}, title = {Smart city information processing under internet of things and cloud computing.}, journal = {The Journal of supercomputing}, volume = {78}, number = {3}, pages = {3676-3695}, pmid = {34366566}, issn = {0920-8542}, abstract = {This study is to explore the smart city information (SCI) processing technology based on the Internet of Things (IoT) and cloud computing, promoting the construction of smart cities in the direction of effective sharing and interconnection. In this study, a SCI system is constructed based on the information islands in the smart construction of various fields in smart cities. The smart environment monitoring, smart transportation, and smart epidemic prevention at the application layer of the SCI system are designed separately. A multi-objective optimization algorithm for cloud computing virtual machine resource allocation method (CC-VMRA method) is proposed, and the application of the IoT and cloud computing technology in the smart city information system is further analysed and simulated for the performance verification. The results show that the multi-objective optimization algorithm in the CC-VMRA method can greatly reduce the number of physical servers in the SCI system (less than 20), and the variance is not higher than 0.0024, which can enable the server cluster to achieve better load balancing effects. In addition, the packet loss rate of the Zigbee protocol used by the IoT gateway in the SCI system is far below the 0.1% indicator, and the delay is less than 10 ms. Therefore, the SCI system constructed by this study shows low latency and high utilization rate, which can provide experimental reference for the later construction of smart city.}, } @article {pmid34359654, year = {2021}, author = {Prakash, A and Mahoney, KE and Orsburn, BC}, title = {Cloud Computing Based Immunopeptidomics Utilizing Community Curated Variant Libraries Simplifies and Improves Neo-Antigen Discovery in Metastatic Melanoma.}, journal = {Cancers}, volume = {13}, number = {15}, pages = {}, pmid = {34359654}, issn = {2072-6694}, abstract = {Unique peptide neo-antigens presented on the cell surface are attractive targets for researchers in nearly all areas of personalized medicine. Cells presenting peptides with mutated or other non-canonical sequences can be utilized for both targeted therapies and diagnostics. Today's state-of-the-art pipelines utilize complementary proteogenomic approaches where RNA or ribosomal sequencing data helps to create libraries from which tandem mass spectrometry data can be compared. In this study, we present an alternative approach whereby cloud computing is utilized to power neo-antigen searches against community curated databases containing more than 7 million human sequence variants. Using these expansive databases of high-quality sequences as a reference, we reanalyze the original data from two previously reported studies to identify neo-antigen targets in metastatic melanoma. Using our approach, we identify 79 percent of the non-canonical peptides reported by previous genomic analyses of these files. Furthermore, we report 18-fold more non-canonical peptides than previously reported. The novel neo-antigens we report herein can be corroborated by secondary analyses such as high predicted binding affinity, when analyzed by well-established tools such as NetMHC. Finally, we report 738 non-canonical peptides shared by at least five patient samples, and 3258 shared across the two studies. This illustrates the depth of data that is present, but typically missed by lower statistical power proteogenomic approaches. This large list of shared peptides across the two studies, their annotation, non-canonical origin, as well as MS/MS spectra from the two studies are made available on a web portal for community analysis.}, } @article {pmid34345198, year = {2022}, author = {Narayanan, KL and Krishnan, RS and Son, LH and Tung, NT and Julie, EG and Robinson, YH and Kumar, R and Gerogiannis, VC}, title = {Fuzzy Guided Autonomous Nursing Robot through Wireless Beacon Network.}, journal = {Multimedia tools and applications}, volume = {81}, number = {3}, pages = {3297-3325}, pmid = {34345198}, issn = {1380-7501}, abstract = {Robotics is one of the most emerging technologies today, and are used in a variety of applications, ranging from complex rocket technology to monitoring of crops in agriculture. Robots can be exceptionally useful in a smart hospital environment provided that they are equipped with improved vision capabilities for detection and avoidance of obstacles present in their path, thus allowing robots to perform their tasks without any disturbance. In the particular case of Autonomous Nursing Robots, major essential issues are effective robot path planning for the delivery of medicines to patients, measuring the patient body parameters through sensors, interacting with and informing the patient, by means of voice-based modules, about the doctors visiting schedule, his/her body parameter details, etc. This paper presents an approach of a complete Autonomous Nursing Robot which supports all the aforementioned tasks. In this paper, we present a new Autonomous Nursing Robot system capable of operating in a smart hospital environment area. The objective of the system is to identify the patient room, perform robot path planning for the delivery of medicines to a patient, and measure the patient body parameters, through a wireless BLE (Bluetooth Low Energy) beacon receiver and the BLE beacon transmitter at the respective patient rooms. Assuming that a wireless beacon is kept at the patient room, the robot follows the beacon's signal, identifies the respective room and delivers the needed medicine to the patient. A new fuzzy controller system which consists of three ultrasonic sensors and one camera is developed to detect the optimal robot path and to avoid the robot collision with stable and moving obstacles. The fuzzy controller effectively detects obstacles in the robot's vicinity and makes proper decisions for avoiding them. The navigation of the robot is implemented on a BLE tag module by using the AOA (Angle of Arrival) method. The robot uses sensors to measure the patient body parameters and updates these data to the hospital patient database system in a private cloud mode. It also makes uses of a Google assistant to interact with the patients. The robotic system was implemented on the Raspberry Pi using Matlab 2018b. The system performance was evaluated on a PC with an Intel Core i5 processor, while the solar power was used to power the system. Several sensors, namely HC-SR04 ultrasonic sensor, Logitech HD 720p image sensor, a temperature sensor and a heart rate sensor are used together with a camera to generate datasets for testing the proposed system. In particular, the system was tested on operations taking place in the context of a private hospital in Tirunelveli, Tamilnadu, India. A detailed comparison is performed, through some performance metrics, such as Correlation, Root Mean Square Error (RMSE), and Mean Absolute Percentage Error (MAPE), against the related works of Deepu et al., Huh and Seo, Chinmayi et al., Alli et al., Xu, Ran et al., and Lee et al. The experimental system validation showed that the fuzzy controller achieves very high accuracy in obstacle detection and avoidance, with a very low computational time for taking directional decisions. Moreover, the experimental results demonstrated that the robotic system achieves superior accuracy in detecting/avoiding obstacles compared to other systems of similar purposes presented in the related works.}, } @article {pmid34344669, year = {2023}, author = {Antaki, F and Coussa, RG and Kahwati, G and Hammamji, K and Sebag, M and Duval, R}, title = {Accuracy of automated machine learning in classifying retinal pathologies from ultra-widefield pseudocolour fundus images.}, journal = {The British journal of ophthalmology}, volume = {107}, number = {1}, pages = {90-95}, doi = {10.1136/bjophthalmol-2021-319030}, pmid = {34344669}, issn = {1468-2079}, mesh = {Humans ; *Artificial Intelligence ; ROC Curve ; Fundus Oculi ; Machine Learning ; Retina ; *Retinal Vein Occlusion ; }, abstract = {AIMS: Automated machine learning (AutoML) is a novel tool in artificial intelligence (AI). This study assessed the discriminative performance of AutoML in differentiating retinal vein occlusion (RVO), retinitis pigmentosa (RP) and retinal detachment (RD) from normal fundi using ultra-widefield (UWF) pseudocolour fundus images.

METHODS: Two ophthalmologists without coding experience carried out AutoML model design using a publicly available image data set (2137 labelled images). The data set was reviewed for low-quality and mislabeled images and then uploaded to the Google Cloud AutoML Vision platform for training and testing. We designed multiple binary models to differentiate RVO, RP and RD from normal fundi and compared them to bespoke models obtained from the literature. We then devised a multiclass model to detect RVO, RP and RD. Saliency maps were generated to assess the interpretability of the model.

RESULTS: The AutoML models demonstrated high diagnostic properties in the binary classification tasks that were generally comparable to bespoke deep-learning models (area under the precision-recall curve (AUPRC) 0.921-1, sensitivity 84.91%-89.77%, specificity 78.72%-100%). The multiclass AutoML model had an AUPRC of 0.876, a sensitivity of 77.93% and a positive predictive value of 82.59%. The per-label sensitivity and specificity, respectively, were normal fundi (91.49%, 86.75%), RVO (83.02%, 92.50%), RP (72.00%, 100%) and RD (79.55%,96.80%).

CONCLUSION: AutoML models created by ophthalmologists without coding experience can detect RVO, RP and RD in UWF images with very good diagnostic accuracy. The performance was comparable to bespoke deep-learning models derived by AI experts for RVO and RP but not for RD.}, } @article {pmid34343101, year = {2022}, author = {Tajalli, SZ and Kavousi-Fard, A and Mardaneh, M and Khosravi, A and Razavi-Far, R}, title = {Uncertainty-Aware Management of Smart Grids Using Cloud-Based LSTM-Prediction Interval.}, journal = {IEEE transactions on cybernetics}, volume = {52}, number = {10}, pages = {9964-9977}, doi = {10.1109/TCYB.2021.3089634}, pmid = {34343101}, issn = {2168-2275}, abstract = {This article introduces an uncertainty-aware cloud-fog-based framework for power management of smart grids using a multiagent-based system. The power management is a social welfare optimization problem. A multiagent-based algorithm is suggested to solve this problem, in which agents are defined as volunteering consumers and dispatchable generators. In the proposed method, every consumer can voluntarily put a price on its power demand at each interval of operation to benefit from the equal opportunity of contributing to the power management process provided for all generation and consumption units. In addition, the uncertainty analysis using a deep learning method is also applied in a distributive way with the local calculation of prediction intervals for sources with stochastic nature in the system, such as loads, small wind turbines (WTs), and rooftop photovoltaics (PVs). Using the predicted ranges of load demand and stochastic generation outputs, a range for power consumption/generation is also provided for each agent called "preparation range" to demonstrate the predicted boundary, where the accepted power consumption/generation of an agent might occur, considering the uncertain sources. Besides, fog computing is deployed as a critical infrastructure for fast calculation and providing local storage for reasonable pricing. Cloud services are also proposed for virtual applications as efficient databases and computation units. The performance of the proposed framework is examined on two smart grid test systems and compared with other well-known methods. The results prove the capability of the proposed method to obtain the optimal outcomes in a short time for any scale of grid.}, } @article {pmid34342466, year = {2021}, author = {Marques, G and Leswing, K and Robertson, T and Giesen, D and Halls, MD and Goldberg, A and Marshall, K and Staker, J and Morisato, T and Maeshima, H and Arai, H and Sasago, M and Fujii, E and Matsuzawa, NN}, title = {De Novo Design of Molecules with Low Hole Reorganization Energy Based on a Quarter-Million Molecule DFT Screen.}, journal = {The journal of physical chemistry. A}, volume = {125}, number = {33}, pages = {7331-7343}, doi = {10.1021/acs.jpca.1c04587}, pmid = {34342466}, issn = {1520-5215}, abstract = {Materials exhibiting higher mobilities than conventional organic semiconducting materials such as fullerenes and fused thiophenes are in high demand for applications in printed electronics. To discover new molecules in the heteroacene family that might show improved hole mobility, three de novo design methods were applied. Machine learning (ML) models were generated based on previously calculated hole reorganization energies of a quarter million examples of heteroacenes, where the energies were calculated by applying density functional theory (DFT) and a massive cloud computing environment. The three generative methods applied were (1) the continuous space method, where molecular structures are converted into continuous variables by applying the variational autoencoder/decoder technique; (2) the method based on reinforcement learning of SMILES strings (the REINVENT method); and (3) the junction tree variational autoencoder method that directly generates molecular graphs. Among the three methods, the second and third methods succeeded in obtaining chemical structures whose DFT-calculated hole reorganization energy was lower than the lowest energy in the training dataset. This suggests that an extrapolative materials design protocol can be developed by applying generative modeling to a quantitative structure-property relationship (QSPR) utility function.}, } @article {pmid34335730, year = {2021}, author = {Du, Z and Miao, H}, title = {Research on Edge Service Composition Method Based on BAS Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {9931689}, pmid = {34335730}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Heuristics ; }, abstract = {Edge services are transferred data processing, application running, and implementation of some functional services from cloud central server to network edge server to provide services. Combined edge service can effectively reduce task computation in the cloud, shorten transmission distance of processing data, quickly decompose task of service request, and select the optimal edge service combination to provide service for users. BAS is an efficient intelligent optimization algorithm, which can achieve efficient optimization and neither need to know the specific form of function nor need gradient information. This paper designs an edge service composition model based on edge computing and proposes a method about edge service composition by BAS optimization algorithm. Our proposed method has obvious advantages in service composition efficiency compared with service composition method based on PSO or WPA heuristic algorithm. Compared with cloud service composition method, our proposed method has advantages of shorter service response time, low cost, and high quality of user experience.}, } @article {pmid34328586, year = {2021}, author = {Wang, Y and Murlidaran, S and Pearlman, DA}, title = {Quantum simulations of SARS-CoV-2 main protease M[pro] enable high-quality scoring of diverse ligands.}, journal = {Journal of computer-aided molecular design}, volume = {35}, number = {9}, pages = {963-971}, pmid = {34328586}, issn = {1573-4951}, support = {R43 GM140578/GM/NIGMS NIH HHS/United States ; }, mesh = {Antiviral Agents/*chemistry/metabolism ; Atazanavir Sulfate/chemistry/metabolism ; Binding Sites ; Cloud Computing ; Coronavirus 3C Proteases/*chemistry/*metabolism ; Density Functional Theory ; Hydrogen Bonding ; Ligands ; Molecular Docking Simulation ; Protein Conformation ; Quantum Theory ; }, abstract = {The COVID-19 pandemic has led to unprecedented efforts to identify drugs that can reduce its associated morbidity/mortality rate. Computational chemistry approaches hold the potential for triaging potential candidates far more quickly than their experimental counterparts. These methods have been widely used to search for small molecules that can inhibit critical proteins involved in the SARS-CoV-2 replication cycle. An important target is the SARS-CoV-2 main protease Mpro, an enzyme that cleaves the viral polyproteins into individual proteins required for viral replication and transcription. Unfortunately, standard computational screening methods face difficulties in ranking diverse ligands to a receptor due to disparate ligand scaffolds and varying charge states. Here, we describe full density functional quantum mechanical (DFT) simulations of Mpro in complex with various ligands to obtain absolute ligand binding energies. Our calculations are enabled by a new cloud-native parallel DFT implementation running on computational resources from Amazon Web Services (AWS). The results we obtain are promising: the approach is quite capable of scoring a very diverse set of existing drug compounds for their affinities to M pro and suggest the DFT approach is potentially more broadly applicable to repurpose screening against this target. In addition, each DFT simulation required only ~ 1 h (wall clock time) per ligand. The fast turnaround time raises the practical possibility of a broad application of large-scale quantum mechanics in the drug discovery pipeline at stages where ligand diversity is essential.}, } @article {pmid34326980, year = {2021}, author = {Li, X and Ren, S and Gu, F}, title = {Medical Internet of Things to Realize Elderly Stroke Prevention and Nursing Management.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9989602}, pmid = {34326980}, issn = {2040-2309}, mesh = {Aged ; Cloud Computing ; Humans ; Internet ; *Internet of Things ; Middle Aged ; Remote Sensing Technology ; *Stroke/prevention & control ; *Telemedicine/methods ; }, abstract = {Stroke is a major disease that seriously endangers the lives and health of middle-aged and elderly people in our country, but its implementation of secondary prevention needs to be improved urgently. The application of IoT technology in home health monitoring and telemedicine, as well as the popularization of cloud computing, contributes to the early identification of ischemic stroke and provides intelligent, humanized, and preventive medical and health services for patients at high risk of stroke. This article clarifies the networking structure and networking objects of the rehabilitation system Internet of Things, clarifies the functions of each part, and establishes an overall system architecture based on smart medical care; the design and optimization of the mechanical part of the stroke rehabilitation robot are carried out, as well as kinematics and dynamic analysis. According to the functions of different types of stroke rehabilitation robots, strategies are given for the use of lower limb rehabilitation robots; standardized codes are used to identify system objects, and RFID technology is used to automatically identify users and devices. Combined with the use of the Internet and GSM mobile communication network, construct a network database of system networking objects and, on this basis, establish information management software based on a smart medical rehabilitation system that takes care of both doctors and patients to realize the system's Internet of Things architecture. In addition, this article also gives the recovery strategy generation in the system with the design method of resource scheduling method and the theoretical algorithm of rehabilitation strategy generation is given and verified. This research summarizes the application background, advantages, and past practice of the Internet of Things in stroke medical care, develops and applies a medical collaborative cloud computing system for systematic intervention of stroke, and realizes the module functions such as information sharing, regional monitoring, and collaborative consultation within the base.}, } @article {pmid34326863, year = {2021}, author = {Mrozek, D and Stępień, K and Grzesik, P and Małysiak-Mrozek, B}, title = {A Large-Scale and Serverless Computational Approach for Improving Quality of NGS Data Supporting Big Multi-Omics Data Analyses.}, journal = {Frontiers in genetics}, volume = {12}, number = {}, pages = {699280}, pmid = {34326863}, issn = {1664-8021}, abstract = {Various types of analyses performed over multi-omics data are driven today by next-generation sequencing (NGS) techniques that produce large volumes of DNA/RNA sequences. Although many tools allow for parallel processing of NGS data in a Big Data distributed environment, they do not facilitate the improvement of the quality of NGS data for a large scale in a simple declarative manner. Meanwhile, large sequencing projects and routine DNA/RNA sequencing associated with molecular profiling of diseases for personalized treatment require both good quality data and appropriate infrastructure for efficient storing and processing of the data. To solve the problems, we adapt the concept of Data Lake for storing and processing big NGS data. We also propose a dedicated library that allows cleaning the DNA/RNA sequences obtained with single-read and paired-end sequencing techniques. To accommodate the growth of NGS data, our solution is largely scalable on the Cloud and may rapidly and flexibly adjust to the amount of data that should be processed. Moreover, to simplify the utilization of the data cleaning methods and implementation of other phases of data analysis workflows, our library extends the declarative U-SQL query language providing a set of capabilities for data extraction, processing, and storing. The results of our experiments prove that the whole solution supports requirements for ample storage and highly parallel, scalable processing that accompanies NGS-based multi-omics data analyses.}, } @article {pmid34319675, year = {2021}, author = {Ashammakhi, N and Unluturk, BD and Kaarela, O and Akyildiz, IF}, title = {The Cells and the Implant Interact With the Biological System Via the Internet and Cloud Computing as the New Mediator.}, journal = {The Journal of craniofacial surgery}, volume = {32}, number = {5}, pages = {1655-1657}, pmid = {34319675}, issn = {1536-3732}, support = {UG3 TR003148/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Dental Implants ; Internet ; Software ; }, } @article {pmid34314431, year = {2021}, author = {Niemann, M and Lachmann, N and Geneugelijk, K and Spierings, E}, title = {Computational Eurotransplant kidney allocation simulations demonstrate the feasibility and benefit of T-cell epitope matching.}, journal = {PLoS computational biology}, volume = {17}, number = {7}, pages = {e1009248}, pmid = {34314431}, issn = {1553-7358}, mesh = {Algorithms ; Cloud Computing ; Computational Biology ; Computer Simulation ; Epitopes, T-Lymphocyte/*immunology ; Europe ; Feasibility Studies ; Graft Survival/immunology ; Histocompatibility Testing/*methods/statistics & numerical data ; Humans ; Kidney Transplantation/*methods/statistics & numerical data ; Markov Chains ; Monte Carlo Method ; Time Factors ; Tissue and Organ Procurement/*methods/statistics & numerical data ; User-Computer Interface ; Waiting Lists ; }, abstract = {The EuroTransplant Kidney Allocation System (ETKAS) aims at allocating organs to patients on the waiting list fairly whilst optimizing HLA match grades. ETKAS currently considers the number of HLA-A, -B, -DR mismatches. Evidently, epitope matching is biologically and clinically more relevant. We here executed ETKAS-based computer simulations to evaluate the impact of epitope matching on allocation and compared the strategies. A virtual population of 400,000 individuals was generated using the National Marrow Donor Program (NMDP) haplotype frequency dataset of 2011. Using this population, a waiting list of 10,400 patients was constructed and maintained during simulation, matching the 2015 Eurotransplant Annual Report characteristics. Unacceptable antigens were assigned randomly relative to their frequency using HLAMatchmaker. Over 22,600 kidneys were allocated in 10 years in triplicate using Markov Chain Monte Carlo simulations on 32-CPU-core cloud-computing instances. T-cell epitopes were calculated using the www.pirche.com portal. Waiting list effects were evaluated against ETKAS for five epitope matching scenarios. Baseline simulations of ETKAS slightly overestimated reported average HLA match grades. The best balanced scenario maintained prioritisation of HLA A-B-DR fully matched donors while replacing the HLA match grade by PIRCHE-II score and exchanging the HLA mismatch probability (MMP) by epitope MMP. This setup showed no considerable impact on kidney exchange rates and waiting time. PIRCHE-II scores improved, whereas the average HLA match grade diminishes slightly, yet leading to an improved estimated graft survival. We conclude that epitope-based matching in deceased donor kidney allocation is feasible while maintaining equal balances on the waiting list.}, } @article {pmid34312582, year = {2021}, author = {Aslam, B and Javed, AR and Chakraborty, C and Nebhen, J and Raqib, S and Rizwan, M}, title = {Blockchain and ANFIS empowered IoMT application for privacy preserved contact tracing in COVID-19 pandemic.}, journal = {Personal and ubiquitous computing}, volume = {}, number = {}, pages = {1-17}, pmid = {34312582}, issn = {1617-4909}, abstract = {Life-threatening novel severe acute respiratory syndrome coronavirus (SARS-CoV-2), also known as COVID-19, has engulfed the world and caused health and economic challenges. To control the spread of COVID-19, a mechanism is required to enforce physical distancing between people. This paper proposes a Blockchain-based framework that preserves patients' anonymity while tracing their contacts with the help of Bluetooth-enabled smartphones. We use a smartphone application to interact with the proposed blockchain framework for contact tracing of the general public using Bluetooth and to store the obtained data over the cloud, which is accessible to health departments and government agencies to perform necessary and timely actions (e.g., like quarantine the infected people moving around). Thus, the proposed framework helps people perform their regular business and day-to-day activities with a controlled mechanism that keeps them safe from infected and exposed people. The smartphone application is capable enough to check their COVID status after analyzing the symptoms quickly and observes (based on given symptoms) either this person is infected or not. As a result, the proposed Adaptive Neuro-Fuzzy Interference System (ANFIS) system predicts the COVID status, and K-Nearest Neighbor (KNN) enhances the accuracy rate to 95.9% compared to state-of-the-art results.}, } @article {pmid34307859, year = {2021}, author = {Silva Junior, D and Pacitti, E and Paes, A and de Oliveira, D}, title = {Provenance-and machine learning-based recommendation of parameter values in scientific workflows.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e606}, pmid = {34307859}, issn = {2376-5992}, abstract = {Scientific Workflows (SWfs) have revolutionized how scientists in various domains of science conduct their experiments. The management of SWfs is performed by complex tools that provide support for workflow composition, monitoring, execution, capturing, and storage of the data generated during execution. In some cases, they also provide components to ease the visualization and analysis of the generated data. During the workflow's composition phase, programs must be selected to perform the activities defined in the workflow specification. These programs often require additional parameters that serve to adjust the program's behavior according to the experiment's goals. Consequently, workflows commonly have many parameters to be manually configured, encompassing even more than one hundred in many cases. Wrongly parameters' values choosing can lead to crash workflows executions or provide undesired results. As the execution of data- and compute-intensive workflows is commonly performed in a high-performance computing environment e.g., (a cluster, a supercomputer, or a public cloud), an unsuccessful execution configures a waste of time and resources. In this article, we present FReeP-Feature Recommender from Preferences, a parameter value recommendation method that is designed to suggest values for workflow parameters, taking into account past user preferences. FReeP is based on Machine Learning techniques, particularly in Preference Learning. FReeP is composed of three algorithms, where two of them aim at recommending the value for one parameter at a time, and the third makes recommendations for n parameters at once. The experimental results obtained with provenance data from two broadly used workflows showed FReeP usefulness in the recommendation of values for one parameter. Furthermore, the results indicate the potential of FReeP to recommend values for n parameters in scientific workflows.}, } @article {pmid34307857, year = {2021}, author = {Skarlat, O and Schulte, S}, title = {FogFrame: a framework for IoT application execution in the fog.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e588}, pmid = {34307857}, issn = {2376-5992}, abstract = {Recently, a multitude of conceptual architectures and theoretical foundations for fog computing have been proposed. Despite this, there is still a lack of concrete frameworks to setup real-world fog landscapes. In this work, we design and implement the fog computing framework FogFrame-a system able to manage and monitor edge and cloud resources in fog landscapes and to execute Internet of Things (IoT) applications. FogFrame provides communication and interaction as well as application management within a fog landscape, namely, decentralized service placement, deployment and execution. For service placement, we formalize a system model, define an objective function and constraints, and solve the problem implementing a greedy algorithm and a genetic algorithm. The framework is evaluated with regard to Quality of Service parameters of IoT applications and the utilization of fog resources using a real-world operational testbed. The evaluation shows that the service placement is adapted according to the demand and the available resources in the fog landscape. The greedy placement leads to the maximum utilization of edge devices keeping at the edge as many services as possible, while the placement based on the genetic algorithm keeps devices from overloads by balancing between the cloud and edge. When comparing edge and cloud deployment, the service deployment time at the edge takes 14% of the deployment time in the cloud. If fog resources are utilized at maximum capacity, and a new application request arrives with the need of certain sensor equipment, service deployment becomes impossible, and the application needs to be delegated to other fog resources. The genetic algorithm allows to better accommodate new applications and keep the utilization of edge devices at about 50% CPU. During the experiments, the framework successfully reacts to runtime events: (i) services are recovered when devices disappear from the fog landscape; (ii) cloud resources and highly utilized devices are released by migrating services to new devices; (iii) and in case of overloads, services are migrated in order to release resources.}, } @article {pmid34306054, year = {2021}, author = {Sauber, AM and Awad, A and Shawish, AF and El-Kafrawy, PM}, title = {A Novel Hadoop Security Model for Addressing Malicious Collusive Workers.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {5753948}, pmid = {34306054}, issn = {1687-5273}, mesh = {*Algorithms ; Big Data ; *Computer Communication Networks ; Humans ; }, abstract = {With the daily increase of data production and collection, Hadoop is a platform for processing big data on a distributed system. A master node globally manages running jobs, whereas worker nodes process partitions of the data locally. Hadoop uses MapReduce as an effective computing model. However, Hadoop experiences a high level of security vulnerability over hybrid and public clouds. Specially, several workers can fake results without actually processing their portions of the data. Several redundancy-based approaches have been proposed to counteract this risk. A replication mechanism is used to duplicate all or some of the tasks over multiple workers (nodes). A drawback of such approaches is that they generate a high overhead over the cluster. Additionally, malicious workers can behave well for a long period of time and attack later. This paper presents a novel model to enhance the security of the cloud environment against untrusted workers. A new component called malicious workers' trap (MWT) is developed to run on the master node to detect malicious (noncollusive and collusive) workers as they convert and attack the system. An implementation to test the proposed model and to analyze the performance of the system shows that the proposed model can accurately detect malicious workers with minor processing overhead compared to vanilla MapReduce and Verifiable MapReduce (V-MR) model [1]. In addition, MWT maintains a balance between the security and usability of the Hadoop cluster.}, } @article {pmid34305744, year = {2021}, author = {Tariq, MU and Poulin, M and Abonamah, AA}, title = {Achieving Operational Excellence Through Artificial Intelligence: Driving Forces and Barriers.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {686624}, pmid = {34305744}, issn = {1664-1078}, abstract = {This paper presents an in-depth literature review on the driving forces and barriers for achieving operational excellence through artificial intelligence (AI). Artificial intelligence is a technological concept spanning operational management, philosophy, humanities, statistics, mathematics, computer sciences, and social sciences. AI refers to machines mimicking human behavior in terms of cognitive functions. The evolution of new technological procedures and advancements in producing intelligence for machines creates a positive impact on decisions, operations, strategies, and management incorporated in the production process of goods and services. Businesses develop various methods and solutions to extract meaningful information, such as big data, automatic production capabilities, and systematization for business improvement. The progress in organizational competitiveness is apparent through improvements in firm's decisions, resulting in increased operational efficiencies. Innovation with AI has enabled small businesses to reduce operating expenses and increase revenues. The focused literature review reveals the driving forces for achieving operational excellence through AI are improvement in computing abilities of machines, development of data-based AI, advancements in deep learning, cloud computing, data management, and integration of AI in operations. The barriers are mainly cultural constraints, fear of the unknown, lack of employee skills, and strategic planning for adopting AI. The current paper presents an analysis of articles focused on AI adoption in production and operations. We selected articles published between 2015 and 2020. Our study contributes to the literature reviews on operational excellence, artificial intelligence, driving forces for AI, and AI barriers in achieving operational excellence.}, } @article {pmid34305445, year = {2021}, author = {Sharma, SK and Ahmed, SS}, title = {IoT-based analysis for controlling & spreading prediction of COVID-19 in Saudi Arabia.}, journal = {Soft computing}, volume = {25}, number = {18}, pages = {12551-12563}, pmid = {34305445}, issn = {1432-7643}, abstract = {Presently, novel coronavirus outbreak 2019 (COVID-19) is a major threat to public health. Mathematical epidemic models can be utilized to forecast the course of an epidemic and cultivate approaches for controlling it. This paper utilizes the real data of spreading COVID-19 in Saudi Arabia for mathematical modeling and complex analyses. This paper introduces the Susceptible, Exposed, Infectious, Recovered, Undetectable, and Deceased (SEIRUD) and Machine learning algorithm to predict and control COVID-19 in Saudi Arabia.This COVID-19 has initiated many methods, such as cloud computing, edge-computing, IoT, artificial intelligence. The use of sensor devices has increased enormously. Similarly, several developments in solving the COVID-19 crisis have been used by IoT applications. The new technology relies on IoT variables and the roles of symptoms using wearable sensors to forecast cases of COVID-19. The working model involves wearable devices, occupational therapy, condition control, testing of cases, suspicious and IoT elements. Mathematical modeling is useful for understanding the fundamental principle of the transmission of COVID-19 and providing guidance for possible predictions. The method suggested predicts whether COVID-19 would expand or die in the long term in the population. The mathematical study results and related simulation are described here as a way of forecasting the progress and the possible end of the epidemic with three forms of scenarios: 'No Action,' 'Lockdowns and New Medicine.' The lock case slows it down the peak by minimizing infection and impacts area equality of the infected deformation. This study familiarizes the ideal protocol, which can support the Saudi population to breakdown spreading COVID-19 in an accurate and timely way. The simulation findings have been executed, and the suggested model enhances the accuracy ratio of 89.3%, prediction ratio of 88.7%, the precision ratio of 87.7%, recall ratio of 86.4%, and F1 score of 90.9% compared to other existing methods.}, } @article {pmid34300686, year = {2021}, author = {Huč, A and Šalej, J and Trebar, M}, title = {Analysis of Machine Learning Algorithms for Anomaly Detection on Edge Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300686}, issn = {1424-8220}, mesh = {Algorithms ; Benchmarking ; *Internet of Things ; *Machine Learning ; }, abstract = {The Internet of Things (IoT) consists of small devices or a network of sensors, which permanently generate huge amounts of data. Usually, they have limited resources, either computing power or memory, which means that raw data are transferred to central systems or the cloud for analysis. Lately, the idea of moving intelligence to the IoT is becoming feasible, with machine learning (ML) moved to edge devices. The aim of this study is to provide an experimental analysis of processing a large imbalanced dataset (DS2OS), split into a training dataset (80%) and a test dataset (20%). The training dataset was reduced by randomly selecting a smaller number of samples to create new datasets Di (i = 1, 2, 5, 10, 15, 20, 40, 60, 80%). Afterwards, they were used with several machine learning algorithms to identify the size at which the performance metrics show saturation and classification results stop improving with an F1 score equal to 0.95 or higher, which happened at 20% of the training dataset. Further on, two solutions for the reduction of the number of samples to provide a balanced dataset are given. In the first, datasets DRi consist of all anomalous samples in seven classes and a reduced majority class ('NL') with i = 0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20 percent of randomly selected samples. In the second, datasets DCi are generated from the representative samples determined with clustering from the training dataset. All three dataset reduction methods showed comparable performance results. Further evaluation of training times and memory usage on Raspberry Pi 4 shows a possibility to run ML algorithms with limited sized datasets on edge devices.}, } @article {pmid34300671, year = {2021}, author = {Yar, H and Imran, AS and Khan, ZA and Sajjad, M and Kastrati, Z}, title = {Towards Smart Home Automation Using IoT-Enabled Edge-Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300671}, issn = {1424-8220}, mesh = {Automation ; *Delivery of Health Care ; *Privacy ; }, abstract = {Smart home applications are ubiquitous and have gained popularity due to the overwhelming use of Internet of Things (IoT)-based technology. The revolution in technologies has made homes more convenient, efficient, and even more secure. The need for advancement in smart home technology is necessary due to the scarcity of intelligent home applications that cater to several aspects of the home simultaneously, i.e., automation, security, safety, and reducing energy consumption using less bandwidth, computation, and cost. Our research work provides a solution to these problems by deploying a smart home automation system with the applications mentioned above over a resource-constrained Raspberry Pi (RPI) device. The RPI is used as a central controlling unit, which provides a cost-effective platform for interconnecting a variety of devices and various sensors in a home via the Internet. We propose a cost-effective integrated system for smart home based on IoT and Edge-Computing paradigm. The proposed system provides remote and automatic control to home appliances, ensuring security and safety. Additionally, the proposed solution uses the edge-computing paradigm to store sensitive data in a local cloud to preserve the customer's privacy. Moreover, visual and scalar sensor-generated data are processed and held over edge device (RPI) to reduce bandwidth, computation, and storage cost. In the comparison with state-of-the-art solutions, the proposed system is 5% faster in detecting motion, and 5 ms and 4 ms in switching relay on and off, respectively. It is also 6% more efficient than the existing solutions with respect to energy consumption.}, } @article {pmid34300531, year = {2021}, author = {Kosasih, DI and Lee, BG and Lim, H and Atiquzzaman, M}, title = {An Unsupervised Learning-Based Spatial Co-Location Detection System from Low-Power Consumption Sensor.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300531}, issn = {1424-8220}, support = {2020R1A2C1008589//National Research Foundation of Korea/ ; }, mesh = {*Algorithms ; Computers, Handheld ; Humans ; Smartphone ; *Unsupervised Machine Learning ; }, abstract = {Spatial co-location detection is the task of inferring the co-location of two or more objects in the geographic space. Mobile devices, especially a smartphone, are commonly employed to accomplish this task with the human object. Previous work focused on analyzing mobile GPS data to accomplish this task. While this approach may guarantee high accuracy from the perspective of the data, it is considered inefficient since knowing the object's absolute geographic location is not required to accomplish this task. This work proposed the implementation of the unsupervised learning-based algorithm, namely convolutional autoencoder, to infer the co-location of people from a low-power consumption sensor data-magnetometer readings. The idea is that if the trained model can also reconstruct the other data with the structural similarity (SSIM) index being above 0.5, we can then conclude that the observed individuals were co-located. The evaluation of our system has indicated that the proposed approach could recognize the spatial co-location of people from magnetometer readings.}, } @article {pmid34300497, year = {2021}, author = {Alhasnawi, BN and Jasim, BH and Rahman, ZSA and Siano, P}, title = {A Novel Robust Smart Energy Management and Demand Reduction for Smart Homes Based on Internet of Energy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300497}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Internet ; }, abstract = {In residential energy management (REM), Time of Use (ToU) of devices scheduling based on user-defined preferences is an essential task performed by the home energy management controller. This paper devised a robust REM technique capable of monitoring and controlling residential loads within a smart home. In this paper, a new distributed multi-agent framework based on the cloud layer computing architecture is developed for real-time microgrid economic dispatch and monitoring. In this paper the grey wolf optimizer (GWO), artificial bee colony (ABC) optimization algorithm-based Time of Use (ToU) pricing model is proposed to define the rates for shoulder-peak and on-peak hours. The results illustrate the effectiveness of the proposed the grey wolf optimizer (GWO), artificial bee colony (ABC) optimization algorithm based ToU pricing scheme. A Raspberry Pi3 based model of a well-known test grid topology is modified to support real-time communication with open-source IoE platform Node-Red used for cloud computing. Two levels communication system connects microgrid system, implemented in Raspberry Pi3, to cloud server. The local communication level utilizes IP/TCP and MQTT is used as a protocol for global communication level. The results demonstrate and validate the effectiveness of the proposed technique, as well as the capability to track the changes of load with the interactions in real-time and the fast convergence rate.}, } @article {pmid34300454, year = {2021}, author = {Stan, OP and Enyedi, S and Corches, C and Flonta, S and Stefan, I and Gota, D and Miclea, L}, title = {Method to Increase Dependability in a Cloud-Fog-Edge Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300454}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; }, abstract = {Robots can be very different, from humanoids to intelligent self-driving cars or just IoT systems that collect and process local sensors' information. This paper presents a way to increase dependability for information exchange and processing in systems with Cloud-Fog-Edge architectures. In an ideal interconnected world, the recognized and registered robots must be able to communicate with each other if they are close enough, or through the Fog access points without overloading the Cloud. In essence, the presented work addresses the Edge area and how the devices can communicate in a safe and secure environment using cryptographic methods for structured systems. The presented work emphasizes the importance of security in a system's dependability and offers a communication mechanism for several robots without overburdening the Cloud. This solution is ideal to be used where various monitoring and control aspects demand extra degrees of safety. The extra private keys employed by this procedure further enhance algorithm complexity, limiting the probability that the method may be broken by brute force or systemic attacks.}, } @article {pmid34300439, year = {2021}, author = {Brescia, E and Costantino, D and Marzo, F and Massenio, PR and Cascella, GL and Naso, D}, title = {Automated Multistep Parameter Identification of SPMSMs in Large-Scale Applications Using Cloud Computing Resources.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {14}, pages = {}, pmid = {34300439}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Computers ; Humans ; }, abstract = {Parameter identification of permanent magnet synchronous machines (PMSMs) represents a well-established research area. However, parameter estimation of multiple running machines in large-scale applications has not yet been investigated. In this context, a flexible and automated approach is required to minimize complexity, costs, and human interventions without requiring machine information. This paper proposes a novel identification strategy for surface PMSMs (SPMSMs), highly suitable for large-scale systems. A novel multistep approach using measurement data at different operating conditions of the SPMSM is proposed to perform the parameter identification without requiring signal injection, extra sensors, machine information, and human interventions. Thus, the proposed method overcomes numerous issues of the existing parameter identification schemes. An IoT/cloud architecture is designed to implement the proposed multistep procedure and massively perform SPMSM parameter identifications. Finally, hardware-in-the-loop results show the effectiveness of the proposed approach.}, } @article {pmid34283824, year = {2021}, author = {Hanussek, M and Bartusch, F and Krüger, J}, title = {Performance and scaling behavior of bioinformatic applications in virtualization environments to create awareness for the efficient use of compute resources.}, journal = {PLoS computational biology}, volume = {17}, number = {7}, pages = {e1009244}, pmid = {34283824}, issn = {1553-7358}, mesh = {Algorithms ; Benchmarking ; Cloud Computing ; Computational Biology/*methods/standards/statistics & numerical data ; Computers ; Computing Methodologies ; Data Interpretation, Statistical ; Databases, Factual/statistics & numerical data ; High-Throughput Nucleotide Sequencing ; Humans ; Image Interpretation, Computer-Assisted ; Machine Learning ; Sequence Alignment ; Software ; User-Computer Interface ; }, abstract = {The large amount of biological data available in the current times, makes it necessary to use tools and applications based on sophisticated and efficient algorithms, developed in the area of bioinformatics. Further, access to high performance computing resources is necessary, to achieve results in reasonable time. To speed up applications and utilize available compute resources as efficient as possible, software developers make use of parallelization mechanisms, like multithreading. Many of the available tools in bioinformatics offer multithreading capabilities, but more compute power is not always helpful. In this study we investigated the behavior of well-known applications in bioinformatics, regarding their performance in the terms of scaling, different virtual environments and different datasets with our benchmarking tool suite BOOTABLE. The tool suite includes the tools BBMap, Bowtie2, BWA, Velvet, IDBA, SPAdes, Clustal Omega, MAFFT, SINA and GROMACS. In addition we added an application using the machine learning framework TensorFlow. Machine learning is not directly part of bioinformatics but applied to many biological problems, especially in the context of medical images (X-ray photographs). The mentioned tools have been analyzed in two different virtual environments, a virtual machine environment based on the OpenStack cloud software and in a Docker environment. The gained performance values were compared to a bare-metal setup and among each other. The study reveals, that the used virtual environments produce an overhead in the range of seven to twenty-five percent compared to the bare-metal environment. The scaling measurements showed, that some of the analyzed tools do not benefit from using larger amounts of computing resources, whereas others showed an almost linear scaling behavior. The findings of this study have been generalized as far as possible and should help users to find the best amount of resources for their analysis. Further, the results provide valuable information for resource providers to handle their resources as efficiently as possible and raise the user community's awareness of the efficient usage of computing resources.}, } @article {pmid34283149, year = {2021}, author = {Zeng, X and Zhang, X and Yang, S and Shi, Z and Chi, C}, title = {Gait-Based Implicit Authentication Using Edge Computing and Deep Learning for Mobile Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283149}, issn = {1424-8220}, support = {61802252//National Natural Science Foundation of China/ ; }, mesh = {*Biometric Identification ; Computers, Handheld ; *Deep Learning ; Gait ; Privacy ; }, abstract = {Implicit authentication mechanisms are expected to prevent security and privacy threats for mobile devices using behavior modeling. However, recently, researchers have demonstrated that the performance of behavioral biometrics is insufficiently accurate. Furthermore, the unique characteristics of mobile devices, such as limited storage and energy, make it subject to constrained capacity of data collection and processing. In this paper, we propose an implicit authentication architecture based on edge computing, coined Edge computing-based mobile Device Implicit Authentication (EDIA), which exploits edge-based gait biometric identification using a deep learning model to authenticate users. The gait data captured by a device's accelerometer and gyroscope sensors is utilized as the input of our optimized model, which consists of a CNN and a LSTM in tandem. Especially, we deal with extracting the features of gait signal in a two-dimensional domain through converting the original signal into an image, and then input it into our network. In addition, to reduce computation overhead of mobile devices, the model for implicit authentication is generated on the cloud server, and the user authentication process also takes place on the edge devices. We evaluate the performance of EDIA under different scenarios where the results show that i) we achieve a true positive rate of 97.77% and also a 2% false positive rate; and ii) EDIA still reaches high accuracy with limited dataset size.}, } @article {pmid34283139, year = {2021}, author = {Alwateer, M and Almars, AM and Areed, KN and Elhosseini, MA and Haikal, AY and Badawy, M}, title = {Ambient Healthcare Approach with Hybrid Whale Optimization Algorithm and Naïve Bayes Classifier.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283139}, issn = {1424-8220}, mesh = {*Algorithms ; Animals ; Bayes Theorem ; Big Data ; Delivery of Health Care ; *Whales ; }, abstract = {There is a crucial need to process patient's data immediately to make a sound decision rapidly; this data has a very large size and excessive features. Recently, many cloud-based IoT healthcare systems are proposed in the literature. However, there are still several challenges associated with the processing time and overall system efficiency concerning big healthcare data. This paper introduces a novel approach for processing healthcare data and predicts useful information with the support of the use of minimum computational cost. The main objective is to accept several types of data and improve accuracy and reduce the processing time. The proposed approach uses a hybrid algorithm which will consist of two phases. The first phase aims to minimize the number of features for big data by using the Whale Optimization Algorithm as a feature selection technique. After that, the second phase performs real-time data classification by using Naïve Bayes Classifier. The proposed approach is based on fog Computing for better business agility, better security, deeper insights with privacy, and reduced operation cost. The experimental results demonstrate that the proposed approach can reduce the number of datasets features, improve the accuracy and reduce the processing time. Accuracy enhanced by average rate: 3.6% (3.34 for Diabetes, 2.94 for Heart disease, 3.77 for Heart attack prediction, and 4.15 for Sonar). Besides, it enhances the processing speed by reducing the processing time by an average rate: 8.7% (28.96 for Diabetes, 1.07 for Heart disease, 3.31 for Heart attack prediction, and 1.4 for Sonar).}, } @article {pmid34283112, year = {2021}, author = {Agapiou, A and Lysandrou, V}, title = {Observing Thermal Conditions of Historic Buildings through Earth Observation Data and Big Data Engine.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283112}, issn = {1424-8220}, support = {INTEGRATED/0918/0034//This research was undertaken under the PERIsCOPE INTEGRATED/0918/0034 (Portal for heritage buildings integration into the contemporary built environment) is co-financed by the European Regional Development Fund and the Republic of Cyprus through the Resea/ ; }, abstract = {This study combines satellite observation, cloud platforms, and geographical information systems (GIS) to investigate at a macro-scale level of observation the thermal conditions of two historic clusters in Cyprus, namely in Limassol and Strovolos municipalities. The two case studies share different environmental and climatic conditions. The former site is coastal, the last a hinterland, and they both contain historic buildings with similar building materials and techniques. For the needs of the study, more than 140 Landsat 7 ETM+ and 8 LDCM images were processed at the Google Earth Engine big data cloud platform to investigate the thermal conditions of the two historic clusters over the period 2013-2020. The multi-temporal thermal analysis included the calibration of all images to provide land surface temperature (LST) products at a 100 m spatial resolution. Moreover, to investigate anomalies related to possible land cover changes of the area, two indices were extracted from the satellite images, the normalised difference vegetation index (NDVI) and the normalised difference build index (NDBI). Anticipated results include the macro-scale identification of multi-temporal changes, diachronic changes, the establishment of change patterns based on seasonality and location, occurring in large clusters of historic buildings.}, } @article {pmid34283102, year = {2021}, author = {Moon, J and Yang, M and Jeong, J}, title = {A Novel Approach to the Job Shop Scheduling Problem Based on the Deep Q-Network in a Cooperative Multi-Access Edge Computing Ecosystem.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283102}, issn = {1424-8220}, support = {IITP-2021-2020-0-01821//Ministry of Science and ICT, South Korea/ ; }, mesh = {*Cloud Computing ; *Ecosystem ; }, abstract = {In this study, based on multi-access edge computing (MEC), we provided the possibility of cooperating manufacturing processes. We tried to solve the job shop scheduling problem by applying DQN (deep Q-network), a reinforcement learning model, to this method. Here, to alleviate the overload of computing resources, an efficient DQN was used for the experiments using transfer learning data. Additionally, we conducted scheduling studies in the edge computing ecosystem of our manufacturing processes without the help of cloud centers. Cloud computing, an environment in which scheduling processing is performed, has issues sensitive to the manufacturing process in general, such as security issues and communication delay time, and research is being conducted in various fields, such as the introduction of an edge computing system that can replace them. We proposed a method of independently performing scheduling at the edge of the network through cooperative scheduling between edge devices within a multi-access edge computing structure. The proposed framework was evaluated, analyzed, and compared with existing frameworks in terms of providing solutions and services.}, } @article {pmid34283100, year = {2021}, author = {Chen, L and Grimstead, I and Bell, D and Karanka, J and Dimond, L and James, P and Smith, L and Edwardes, A}, title = {Estimating Vehicle and Pedestrian Activity from Town and City Traffic Cameras.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34283100}, issn = {1424-8220}, support = {EP/P016782/1//Engineering and Physical Sciences Research Council (EPSRC) UK/ ; EP/R013411/1//Engineering and Physical Sciences Research Council (EPSRC) UK/ ; NE/P017134/1//Natural Environment Research Council (NERC)/ ; }, mesh = {Accidents, Traffic/prevention & control ; *COVID-19 ; Cities ; Humans ; *Pedestrians ; Reproducibility of Results ; SARS-CoV-2 ; Safety ; }, abstract = {Traffic cameras are a widely available source of open data that offer tremendous value to public authorities by providing real-time statistics to understand and monitor the activity levels of local populations and their responses to policy interventions such as those seen during the COrona VIrus Disease 2019 (COVID-19) pandemic. This paper presents an end-to-end solution based on the Google Cloud Platform with scalable processing capability to deal with large volumes of traffic camera data across the UK in a cost-efficient manner. It describes a deep learning pipeline to detect pedestrians and vehicles and to generate mobility statistics from these. It includes novel methods for data cleaning and post-processing using a Structure SImilarity Measure (SSIM)-based static mask that improves reliability and accuracy in classifying people and vehicles from traffic camera images. The solution resulted in statistics describing trends in the 'busyness' of various towns and cities in the UK. We validated time series against Automatic Number Plate Recognition (ANPR) cameras across North East England, showing a close correlation between our statistical output and the ANPR source. Trends were also favorably compared against traffic flow statistics from the UK's Department of Transport. The results of this work have been adopted as an experimental faster indicator of the impact of COVID-19 on the UK economy and society by the Office for National Statistics (ONS).}, } @article {pmid34282786, year = {2021}, author = {Ali, A and Iqbal, MM and Jamil, H and Qayyum, F and Jabbar, S and Cheikhrouhou, O and Baz, M and Jamil, F}, title = {An Efficient Dynamic-Decision Based Task Scheduler for Task Offloading Optimization and Energy Management in Mobile Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34282786}, issn = {1424-8220}, mesh = {*Algorithms ; *Cloud Computing ; Computers ; Computers, Handheld ; }, abstract = {Restricted abilities of mobile devices in terms of storage, computation, time, energy supply, and transmission causes issues related to energy optimization and time management while processing tasks on mobile phones. This issue pertains to multifarious mobile device-related dimensions, including mobile cloud computing, fog computing, and edge computing. On the contrary, mobile devices' dearth of storage and processing power originates several issues for optimal energy and time management. These problems intensify the process of task retaining and offloading on mobile devices. This paper presents a novel task scheduling algorithm that addresses energy consumption and time execution by proposing an energy-efficient dynamic decision-based method. The proposed model quickly adapts to the cloud computing tasks and energy and time computation of mobile devices. Furthermore, we present a novel task scheduling server that performs the offloading computation process on the cloud, enhancing the mobile device's decision-making ability and computational performance during task offloading. The process of task scheduling harnesses the proposed empirical algorithm. The outcomes of this study enable effective task scheduling wherein energy consumption and task scheduling reduces significantly.}, } @article {pmid34276264, year = {2021}, author = {Risco, S and Moltó, G and Naranjo, DM and Blanquer, I}, title = {Serverless Workflows for Containerised Applications in the Cloud Continuum.}, journal = {Journal of grid computing}, volume = {19}, number = {3}, pages = {30}, pmid = {34276264}, issn = {1572-9184}, abstract = {This paper introduces an open-source platform to support serverless computing for scientific data-processing workflow-based applications across the Cloud continuum (i.e. simultaneously involving both on-premises and public Cloud platforms to process data captured at the edge). This is achieved via dynamic resource provisioning for FaaS platforms compatible with scale-to-zero approaches that minimise resource usage and cost for dynamic workloads with different elasticity requirements. The platform combines the usage of dynamically deployed auto-scaled Kubernetes clusters on on-premises Clouds and automated Cloud bursting into AWS Lambda to achieve higher levels of elasticity. A use case in public health for smart cities is used to assess the platform, in charge of detecting people not wearing face masks from captured videos. Faces are blurred for enhanced anonymity in the on-premises Cloud and detection via Deep Learning models is performed in AWS Lambda for this data-driven containerised workflow. The results indicate that hybrid workflows across the Cloud continuum can efficiently perform local data processing for enhanced regulations compliance and perform Cloud bursting for increased levels of elasticity.}, } @article {pmid34261111, year = {2021}, author = {Worrell, GA}, title = {Electrical Brain Stimulation for Epilepsy and Emerging Applications.}, journal = {Journal of clinical neurophysiology : official publication of the American Electroencephalographic Society}, volume = {38}, number = {6}, pages = {471-477}, doi = {10.1097/WNP.0000000000000819}, pmid = {34261111}, issn = {1537-1603}, mesh = {Brain ; *Deep Brain Stimulation ; *Epilepsy/therapy ; Humans ; *Mental Disorders/therapy ; Stereotaxic Techniques ; }, abstract = {Electrical brain stimulation is an established therapy for movement disorders, epilepsy, obsessive compulsive disorder, and a potential therapy for many other neurologic and psychiatric disorders. Despite significant progress and FDA approvals, there remain significant clinical gaps that can be addressed with next generation systems. Integrating wearable sensors and implantable brain devices with off-the-body computing resources (smart phones and cloud resources) opens a new vista for dense behavioral and physiological signal tracking coupled with adaptive stimulation therapy that should have applications for a range of brain and mind disorders. Here, we briefly review some history and current electrical brain stimulation applications for epilepsy, deep brain stimulation and responsive neurostimulation, and emerging applications for next generation devices and systems.}, } @article {pmid34257851, year = {2021}, author = {Guo, B and Ma, Y and Yang, J and Wang, Z}, title = {Smart Healthcare System Based on Cloud-Internet of Things and Deep Learning.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {4109102}, pmid = {34257851}, issn = {2040-2309}, mesh = {*Deep Learning ; Delivery of Health Care ; Humans ; *Internet of Things ; Neural Networks, Computer ; *Wearable Electronic Devices ; }, abstract = {INTRODUCTION: Health monitoring and remote diagnosis can be realized through Smart Healthcare. In view of the existing problems such as simple measurement parameters of wearable devices, huge computing pressure of cloud servers, and lack of individualization of diagnosis, a novel Cloud-Internet of Things (C-IOT) framework for medical monitoring is put forward.

METHODS: Smart phones are adopted as gateway devices to achieve data standardization and preprocess to generate health gray-scale map uploaded to the cloud server. The cloud server realizes the business logic processing and uses the deep learning model to carry out the gray-scale map calculation of health parameters. A deep learning model based on the convolution neural network (CNN) is constructed, in which six volunteers are selected to participate in the experiment, and their health data are marked by private doctors to generate initial data set.

RESULTS: Experimental results show the feasibility of the proposed framework. The test data set is used to test the CNN model after training; the forecast accuracy is over 77.6%.

CONCLUSION: The CNN model performs well in the recognition of health status. Collectively, this Smart Healthcare System is expected to assist doctors by improving the diagnosis of health status in clinical practice.}, } @article {pmid34256646, year = {2021}, author = {Morales-Botello, ML and Gachet, D and de Buenaga, M and Aparicio, F and Busto, MJ and Ascanio, JR}, title = {Chronic patient remote monitoring through the application of big data and internet of things.}, journal = {Health informatics journal}, volume = {27}, number = {3}, pages = {14604582211030956}, doi = {10.1177/14604582211030956}, pmid = {34256646}, issn = {1741-2811}, mesh = {Big Data ; Cloud Computing ; Humans ; Internet ; *Internet of Things ; Monitoring, Physiologic ; *Telemedicine ; }, abstract = {Chronic patients could benefit from the technological advances, but the clinical approaches for this kind of patients are still limited. This paper describes a system for chronic patients monitoring both, in home and external environments. For this purpose, we used novel technologies as big data, cloud computing and internet of things (IoT). Additionally, the system has been validated for three use cases: cardiovascular disease (CVD), hypertension (HPN) and chronic obstructive pulmonary disease (COPD), which were selected for their incidence in the population. This system is innovative within e-health, mainly due to the use of a big data architecture based on open-source components, also it provides a scalable and distributed environment for storage and processing of biomedical sensor data. The proposed system enables the incorporation of non-medical data sources in order to improve the self-management of chronic diseases and to develop better strategies for health interventions for chronic and dependents patients.}, } @article {pmid34250607, year = {2021}, author = {Miras Del Río, H and Ortiz Lora, A and Bertolet Reina, A and Terrón León, JA}, title = {A Monte Carlo dose calculation system for ophthalmic brachytherapy based on a realistic eye model.}, journal = {Medical physics}, volume = {48}, number = {8}, pages = {4542-4559}, doi = {10.1002/mp.15045}, pmid = {34250607}, issn = {2473-4209}, mesh = {*Brachytherapy ; *Eye Neoplasms/radiotherapy ; Humans ; Monte Carlo Method ; Phantoms, Imaging ; Radiotherapy Dosage ; Radiotherapy Planning, Computer-Assisted ; }, abstract = {PURPOSE: There is a growing trend towards the adoption of model-based calculation algorithms (MBDCAs) for brachytherapy dose calculations which can properly handle media and source/applicator heterogeneities. However, most of dose calculations in ocular plaque therapy are based on homogeneous water media and standard in-silico ocular phantoms, ignoring non-water equivalency of the anatomic tissues and heterogeneities in applicators and patient anatomy. In this work, we introduce EyeMC, a Monte Carlo (MC) model-based calculation algorithm for ophthalmic plaque brachytherapy using realistic and adaptable patient-specific eye geometries and materials.

METHODS: We used the MC code PENELOPE in EyeMC to model Bebig IsoSeed I25.S16 seeds in COMS plaques and [106] Ru/[106] Rh applicators that are coupled onto a customizable eye model with realistic geometry and composition. To significantly reduce calculation times, we integrated EyeMC with CloudMC, a cloud computing platform for radiation therapy calculations. EyeMC is equipped with an evaluation module that allows the generation of isodose distributions, dose-volume histograms, and comparisons with Plaque Simulator three-dimensional dose distribution. We selected a sample of patients treated with [125] I and [106] Ru isotopes in our institution, covering a variety of different type of plaques, tumor sizes, and locations. Results from EyeMC were compared to the original plan calculated by the TPS Plaque Simulation, studying the influence of heterogeneous media composition as well.

RESULTS: EyeMC calculations for Ru plaques agreed well with manufacturer's reference data and data of MC simulations from Hermida et al. (2013). Significant deviations, up to 20%, were only found in lateral profiles for notched plaques. As expected, media composition significantly affected estimated doses to different eye structures, especially in the [125] I cases evaluated. Dose to sclera and lens were found to be about 12% lower when considering real media, while average dose to tumor was 9% higher. [106] Ru cases presented a 1%-3% dose reduction in all structures using real media for calculation, except for the lens, which showed an average dose 7.6% lower than water-based calculations. Comparisons with Plaque Simulator calculations showed large differences in dose to critical structures for [106] Ru notched plaques. [125] I cases presented significant and systematic dose deviations when using the default calculation parameters from Plaque Simulator version 5.3.8., which were corrected when using calculation parameters from a custom physics model for carrier-attenuation and air-interface correction functions.

CONCLUSIONS: EyeMC is a MC calculation system for ophthalmic brachytherapy based on a realistic and customizable eye-tumor model which includes the main eye structures with their real composition. Integrating this tool into a cloud computing environment allows to perform high-precision MC calculations of ocular plaque treatments in short times. The observed variability in eye anatomy among the selected cases justifies the use of patient-specific models.}, } @article {pmid34249300, year = {2021}, author = {Zhou, C and Hu, J and Chen, N}, title = {Remote Care Assistance in Emergency Department Based on Smart Medical.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9971960}, pmid = {34249300}, issn = {2040-2309}, mesh = {*Artificial Intelligence ; Big Data ; *Cloud Computing ; Emergency Service, Hospital ; Humans ; Remote Sensing Technology ; }, abstract = {Smart medical care is user-centric, medical information is the main line, and big data, Internet of Things, cloud computing, artificial intelligence, and other technologies are used to establish scientific and accurate information, as well as an efficient and reasonable medical service system. Smart medical plays an important role in alleviating doctor-patient conflicts caused by information asymmetry, regional health differences caused by irrational allocation of medical resources, and improving medical service levels. This article mainly introduces the remote care assistance system of emergency department based on smart medical and intends to provide some ideas and directions for the technical research of patients in emergency department receiving remote care. This paper proposes a research method for remote care assistance in emergency departments based on smart medical, including an overview of remote care based on smart medical, remote care sensor real-time monitoring algorithms based on smart medical, signal detection algorithms, and signal clustering algorithms for smart medical. Remote care in the emergency department assisted in research experiments. The experimental results show that 86.0% of patients like the remote care system based on smart medical studied in this paper.}, } @article {pmid34249298, year = {2021}, author = {Zhao, X and Liu, J and Ji, B and Wang, L}, title = {Service Migration Policy Optimization considering User Mobility for E-Healthcare Applications.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {9922876}, pmid = {34249298}, issn = {2040-2309}, mesh = {Algorithms ; *Cloud Computing ; Humans ; Public Policy ; *Telemedicine ; }, abstract = {Mobile edge computing (MEC) is an emerging technology that provides cloud services at the edge of network to enable latency-critical and resource-intensive E-healthcare applications. User mobility is common in MEC. User mobility can result in an interruption of ongoing edge services and a dramatic drop in quality of service. Service migration has a great potential to address the issues and brings inevitable cost for the system. In this paper, we propose a service migration solution based on migration zone and formulate service migration cost with a comprehensive model that captures the key challenges. Then, we formulate service migration problem into Markov decision process to obtain optimal service migration policies that decide where to migrate in a limited area. We propose three algorithms to resolve the optimization problem given by the formulated model. Finally, we demonstrate the performance of our proposed algorithms by carrying out extensive experiments. We show that the proposed service migration approach reduces the total cost by up to 3 times compared to no migration and outperforms the general solution in terms of the total expected reward.}, } @article {pmid34228752, year = {2021}, author = {Qu, N and You, W}, title = {Design and fault diagnosis of DCS sintering furnace's temperature control system for edge computing.}, journal = {PloS one}, volume = {16}, number = {7}, pages = {e0253246}, pmid = {34228752}, issn = {1932-6203}, abstract = {Under the background of modern industrial processing and production, the sintering furnace's temperature control system is researched to achieve intelligent smelting and reduce energy consumption. First, the specific application and implementation of edge computing in industrial processing and production are analyzed. The industrial processing and production intelligent equipment based on edge computing includes the equipment layer, the edge layer, and the cloud platform layer. This architecture improves the operating efficiency of the intelligent control system. Then, the sintering furnace in the metallurgical industry is taken as an example. The sintering furnace connects powder material particles at high temperatures; thus, the core temperature control system is investigated. Under the actual sintering furnace engineering design, the Distributed Control System (DCS) is used as the basis of sintering furnace temperature control, and the Programmable Logic Controller (PLC) is adopted to reduce the electrical wiring and switch contacts. The hardware circuit of DCS is designed; on this basis, an embedded operating system with excellent performance is transplanted according to functional requirements. The final DCS-based temperature control system is applied to actual monitoring. The real-time temperature of the upper, middle, and lower currents of 1# sintering furnace at a particular point is measured to be 56.95°C, 56.58°C, and 57.2°C, respectively. The real-time temperature of the upper, middle, and lower currents of 2# sintering furnaces at a particular point is measured to be 144.7°C, 143.8°C, and 144.0°C, respectively. Overall, the temperature control deviation of the three currents of the two sintering furnaces stays in the controllable range. An expert system based on fuzzy logic in the fault diagnosis system can comprehensively predict the situation of the sintering furnaces. The prediction results of the sintering furnace's faults are closer to the actual situation compared with the fault diagnosis method based on the Backpropagation (BP) neural network. The designed system makes up for the shortcomings of the sintering furnace's traditional temperature control systems and can control the temperature of the sintering furnace intelligently and scientifically. Besides, it can diagnose equipment faults timely and efficiently, thereby improving the sintering efficiency.}, } @article {pmid34227850, year = {2021}, author = {Qin, J and Mei, G and Ma, Z and Piccialli, F}, title = {General Paradigm of Edge-Based Internet of Things Data Mining for Geohazard Prevention.}, journal = {Big data}, volume = {9}, number = {5}, pages = {373-389}, doi = {10.1089/big.2020.0392}, pmid = {34227850}, issn = {2167-647X}, mesh = {Cloud Computing ; Data Mining ; Humans ; *Internet of Things ; }, abstract = {Geological hazards (geohazards) are geological processes or phenomena formed under external-induced factors causing losses to human life and property. Geohazards are sudden, cause great harm, and have broad ranges of influence, which bring considerable challenges to geohazard prevention. Monitoring and early warning are the most common strategies to prevent geohazards. With the development of the internet of things (IoT), IoT-based monitoring devices provide rich and fine data, making geohazard monitoring and early warning more accurate and effective. IoT-based monitoring data can be transmitted to a cloud center for processing to provide credible data references for geohazard early warning. However, the massive numbers of IoT devices occupy most resources of the cloud center, which increases the data processing delay. Moreover, limited bandwidth restricts the transmission of large amounts of geohazard monitoring data. Thus, in some cases, cloud computing is not able to meet the real-time requirements of geohazard early warning. Edge computing technology processes data closer to the data source than to the cloud center, which provides the opportunity for the rapid processing of monitoring data. This article presents the general paradigm of edge-based IoT data mining for geohazard prevention, especially monitoring and early warning. The paradigm mainly includes data acquisition, data mining and analysis, and data interpretation. Moreover, a real case is used to illustrate the details of the presented general paradigm. Finally, this article discusses several key problems for the general paradigm of edge-based IoT data mining for geohazard prevention.}, } @article {pmid34226796, year = {2022}, author = {Shin, H and Lee, K and Kwon, HY}, title = {A comparative experimental study of distributed storage engines for big spatial data processing using GeoSpark.}, journal = {The Journal of supercomputing}, volume = {78}, number = {2}, pages = {2556-2579}, pmid = {34226796}, issn = {0920-8542}, abstract = {With increasing numbers of GPS-equipped mobile devices, we are witnessing a deluge of spatial information that needs to be effectively and efficiently managed. Even though there are several distributed spatial data processing systems such as GeoSpark (Apache Sedona), the effects of underlying storage engines have not been well studied for spatial data processing. In this paper, we evaluate the performance of various distributed storage engines for processing large-scale spatial data using GeoSpark, a state-of-the-art distributed spatial data processing system running on top of Apache Spark. For our performance evaluation, we choose three distributed storage engines having different characteristics: (1) HDFS, (2) MongoDB, and (3) Amazon S3. To conduct our experimental study on a real cloud computing environment, we utilize Amazon EMR instances (up to 6 instances) for distributed spatial data processing. For the evaluation of big spatial data processing, we generate data sets considering four kinds of various data distributions and various data sizes up to one billion point records (38.5GB raw size). Through the extensive experiments, we measure the processing time of storage engines with the following variations: (1) sharding strategies in MongoDB, (2) caching effects, (3) data distributions, (4) data set sizes, (5) the number of running executors and storage nodes, and (6) the selectivity of queries. The major points observed from the experiments are summarized as follows. (1) The overall performance of MongoDB-based GeoSpark is degraded compared to HDFS- and S3-based GeoSpark in our experimental settings. (2) The performance of MongoDB-based GeoSpark is relatively improved in large-scale data sets compared to the others. (3) HDFS- and S3-based GeoSpark are more scalable to running executors and storage nodes compared to MongoDB-based GeoSpark. (4) The sharding strategy based on the spatial proximity significantly improves the performance of MongoDB-based GeoSpark. (5) S3- and HDFS-based GeoSpark show similar performances in all the environmental settings. (6) Caching in distributed environments improves the overall performance of spatial data processing. These results can be usefully utilized in decision-making of choosing the most adequate storage engine for big spatial data processing in a target distributed environment.}, } @article {pmid34220289, year = {2022}, author = {Singh, VK and Kolekar, MH}, title = {Deep learning empowered COVID-19 diagnosis using chest CT scan images for collaborative edge-cloud computing platform.}, journal = {Multimedia tools and applications}, volume = {81}, number = {1}, pages = {3-30}, pmid = {34220289}, issn = {1380-7501}, abstract = {The novel coronavirus outbreak has spread worldwide, causing respiratory infections in humans, leading to a huge global pandemic COVID-19. According to World Health Organization, the only way to curb this spread is by increasing the testing and isolating the infected. Meanwhile, the clinical testing currently being followed is not easily accessible and requires much time to give the results. In this scenario, remote diagnostic systems could become a handy solution. Some existing studies leverage the deep learning approach to provide an effective alternative to clinical diagnostic techniques. However, it is difficult to use such complex networks in resource constraint environments. To address this problem, we developed a fine-tuned deep learning model inspired by the architecture of the MobileNet V2 model. Moreover, the developed model is further optimized in terms of its size and complexity to make it compatible with mobile and edge devices. The results of extensive experimentation performed on a real-world dataset consisting of 2482 chest Computerized Tomography scan images strongly suggest the superiority of the developed fine-tuned deep learning model in terms of high accuracy and faster diagnosis time. The proposed model achieved a classification accuracy of 96.40%, with approximately ten times shorter response time than prevailing deep learning models. Further, McNemar's statistical test results also prove the efficacy of the proposed model.}, } @article {pmid34219861, year = {2021}, author = {Mandal, S and Khan, DA and Jain, S}, title = {Cloud-Based Zero Trust Access Control Policy: An Approach to Support Work-From-Home Driven by COVID-19 Pandemic.}, journal = {New generation computing}, volume = {39}, number = {3-4}, pages = {599-622}, pmid = {34219861}, issn = {0288-3635}, abstract = {The ubiquitous cloud computing services provide a new paradigm to the work-from-home environment adopted by the enterprise in the unprecedented crisis of the COVID-19 outbreak. However, the change in work culture would also increase the chances of the cybersecurity attack, MAC spoofing attack, and DDoS/DoS attack due to the divergent incoming traffic from the untrusted network for accessing the enterprise's resources. Networks are usually unable to detect spoofing if the intruder already forges the host's MAC address. However, the techniques used in the existing researches mistakenly classify the malicious host as the legitimate one. This paper proposes a novel access control policy based on a zero-trust network by explicitly restricting the incoming network traffic to substantiate MAC spoofing attacks in the software-defined network (SDN) paradigm of cloud computing. The multiplicative increase and additive decrease algorithm helps to detect the advanced MAC spoofing attack before penetrating the SDN-based cloud resources. Based on the proposed approach, a dynamic threshold is assigned to the incoming port number. The self-learning feature of the threshold stamping helps to rectify a legitimate user's traffic before classifying it to the attacker. Finally, the mathematical and experimental results exhibit high accuracy and detection rate than the existing methodologies. The novelty of this approach strengthens the security of the SDN paradigm of cloud resources by redefining conventional access control policy.}, } @article {pmid34211547, year = {2021}, author = {Ni, L and Sun, X and Li, X and Zhang, J}, title = {GCWOAS2: Multiobjective Task Scheduling Strategy Based on Gaussian Cloud-Whale Optimization in Cloud Computing.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {5546758}, pmid = {34211547}, issn = {1687-5273}, mesh = {Algorithms ; Animals ; *Cloud Computing ; Normal Distribution ; *Whales ; }, abstract = {An important challenge facing cloud computing is how to correctly and effectively handle and serve millions of users' requests. Efficient task scheduling in cloud computing can intuitively affect the resource configuration and operating cost of the entire system. However, task and resource scheduling in a cloud computing environment is an NP-hard problem. In this paper, we propose a three-layer scheduling model based on whale-Gaussian cloud. In the second layer of the model, a whale optimization strategy based on the Gaussian cloud model (GCWOAS2) is used for multiobjective task scheduling in a cloud computing which is to minimize the completion time of the task via effectively utilizing the virtual machine resources and to keep the load balancing of each virtual machine, reducing the operating cost of the system. In the GCWOAS2 strategy, an opposition-based learning mechanism is first used to initialize the scheduling strategy to generate the optimal scheduling scheme. Then, an adaptive mobility factor is proposed to dynamically expand the search range. The whale optimization algorithm based on the Gaussian cloud model is proposed to enhance the randomness of search. Finally, a multiobjective task scheduling algorithm based on Gaussian whale-cloud optimization (GCWOA) is presented, so that the entire scheduling strategy can not only expand the search range but also jump out of the local maximum and obtain the global optimal scheduling strategy. Experimental results show that compared with other existing metaheuristic algorithms, our strategy can not only shorten the task completion time but also balance the load of virtual machine resources, and at the same time, it also has a better performance in resource utilization.}, } @article {pmid34209509, year = {2021}, author = {Pinheiro, A and Canedo, ED and Albuquerque, RO and de Sousa Júnior, RT}, title = {Validation of Architecture Effectiveness for the Continuous Monitoring of File Integrity Stored in the Cloud Using Blockchain and Smart Contracts.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34209509}, issn = {1424-8220}, mesh = {*Blockchain ; Cloud Computing ; Technology ; }, abstract = {The management practicality and economy offered by the various technological solutions based on cloud computing have attracted many organizations, which have chosen to migrate services to the cloud, despite the numerous challenges arising from this migration. Cloud storage services are emerging as a relevant solution to meet the legal requirements of maintaining custody of electronic documents for long periods. However, the possibility of losses and the consequent financial damage require the permanent monitoring of this information. In a previous work named "Monitoring File Integrity Using Blockchain and Smart Contracts", the authors proposed an architecture based on blockchain, smart contract, and computational trust technologies that allows the periodic monitoring of the integrity of files stored in the cloud. However, the experiments carried out in the initial studies that validated the architecture included only small- and medium-sized files. As such, this paper presents a validation of the architecture to determine its effectiveness and efficiency when storing large files for long periods. The article provides an improved and detailed description of the proposed processes, followed by a security analysis of the architecture. The results of both the validation experiments and the implemented defense mechanism analysis confirm the security and the efficiency of the architecture in identifying corrupted files, regardless of file size and storage time.}, } @article {pmid34209400, year = {2021}, author = {Zhou, H and Zhang, W and Wang, C and Ma, X and Yu, H}, title = {BBNet: A Novel Convolutional Neural Network Structure in Edge-Cloud Collaborative Inference.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {13}, pages = {}, pmid = {34209400}, issn = {1424-8220}, support = {61976098//Natural Science Foundation of China/ ; 2020C067//Technology Development Foundation of Quanzhou City/ ; }, mesh = {*Data Compression ; *Neural Networks, Computer ; }, abstract = {Edge-cloud collaborative inference can significantly reduce the delay of a deep neural network (DNN) by dividing the network between mobile edge and cloud. However, the in-layer data size of DNN is usually larger than the original data, so the communication time to send intermediate data to the cloud will also increase end-to-end latency. To cope with these challenges, this paper proposes a novel convolutional neural network structure-BBNet-that accelerates collaborative inference from two levels: (1) through channel-pruning: reducing the number of calculations and parameters of the original network; (2) through compressing the feature map at the split point to further reduce the size of the data transmitted. In addition, This paper implemented the BBNet structure based on NVIDIA Nano and the server. Compared with the original network, BBNet's FLOPs and parameter achieve up to 5.67× and 11.57× on the compression rate, respectively. In the best case, the feature compression layer can reach a bit-compression rate of 512×. Compared with the better bandwidth conditions, BBNet has a more obvious inference delay when the network conditions are poor. For example, when the upload bandwidth is only 20 kb/s, the end-to-end latency of BBNet is increased by 38.89× compared with the cloud-only approach.}, } @article {pmid34207851, year = {2021}, author = {Mendez, J and Molina, M and Rodriguez, N and Cuellar, MP and Morales, DP}, title = {Camera-LiDAR Multi-Level Sensor Fusion for Target Detection at the Network Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34207851}, issn = {1424-8220}, mesh = {*Algorithms ; Automobiles ; Lasers ; *Machine Learning ; }, abstract = {There have been significant advances regarding target detection in the autonomous vehicle context. To develop more robust systems that can overcome weather hazards as well as sensor problems, the sensor fusion approach is taking the lead in this context. Laser Imaging Detection and Ranging (LiDAR) and camera sensors are two of the most used sensors for this task since they can accurately provide important features such as target´s depth and shape. However, most of the current state-of-the-art target detection algorithms for autonomous cars do not take into consideration the hardware limitations of the vehicle such as the reduced computing power in comparison with Cloud servers as well as the reduced latency. In this work, we propose Edge Computing Tensor Processing Unit (TPU) devices as hardware support due to their computing capabilities for machine learning algorithms as well as their reduced power consumption. We developed an accurate and small target detection model for these devices. Our proposed Multi-Level Sensor Fusion model has been optimized for the network edge, specifically for the Google Coral TPU. As a result, high accuracy results are obtained while reducing the memory consumption as well as the latency of the system using the challenging KITTI dataset.}, } @article {pmid34207675, year = {2021}, author = {Caminero, AC and Muñoz-Mansilla, R}, title = {Quality of Service Provision in Fog Computing: Network-Aware Scheduling of Containers.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34207675}, issn = {1424-8220}, support = {FILE (2019V/PUNED/004)//Universidad Nacional de Educación a Distancia/ ; SMARTTRAFFIC (2019V/EUIN-UNED/003)//Universidad Nacional de Educación a Distancia/ ; SNOLA, RED2018-102725-T//Ministerio de Economía, Industria y Competitividad, Gobierno de España/ ; e-Madrid-CM (P2018/TCS-4307)//Comunidad de Madrid/ ; }, mesh = {Algorithms ; *Cloud Computing ; *Internet of Things ; }, abstract = {State-of-the-art scenarios, such as Internet of Things (IoT) and Smart Cities, have recently arisen. They involve the processing of huge data sets under strict time requirements, rendering the use of cloud resources unfeasible. For this reason, Fog computing has been proposed as a solution; however, there remains a need for intelligent allocation decisions, in order to make it a fully usable solution in such contexts. In this paper, a network-aware scheduling algorithm is presented, which aims to select the fog node most suitable for the execution of an application within a given deadline. This decision is made taking the status of the network into account. This scheduling algorithm was implemented as an extension to the Kubernetes default scheduler, and compared with existing proposals in the literature. The comparison shows that our proposal is the only one that can execute all the submitted jobs within their deadlines (i.e., no job is rejected or executed exceeding its deadline) with certain configurations in some of the scenarios tested, thus obtaining an optimal solution in such scenarios.}, } @article {pmid34207511, year = {2021}, author = {Pauca, O and Maxim, A and Caruntu, CF}, title = {Multivariable Optimisation for Waiting-Time Minimisation at Roundabout Intersections in a Cyber-Physical Framework.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34207511}, issn = {1424-8220}, support = {PN-III-P1-1.1-TE-2019-1123//Unitatea Executiva pentru Finantarea Invatamantului Superior, a Cercetarii, Dezvoltarii si Inovarii/ ; PN-III-P1-1.1-PD-2019-0757//Unitatea Executiva pentru Finantarea Invatamantului Superior, a Cercetarii, Dezvoltarii si Inovarii/ ; }, mesh = {*Accidents, Traffic ; Humans ; Safety ; *Waiting Lists ; }, abstract = {The evolution of communication networks offers new possibilities for development in the automotive industry. Smart vehicles will benefit from the possibility of connecting with the infrastructure and from an extensive exchange of data between them. Furthermore, new control strategies can be developed that benefit the advantages of these communication networks. In this endeavour, the main purposes considered by the automotive industry and researchers from academia are defined by: (i) ensuring people's safety; (ii) reducing the overall costs, and (iii) improving the traffic by maximising the fluidity. In this paper, a cyber-physical framework (CPF) to control the access of vehicles in roundabout intersections composed of two levels is proposed. Both levels correspond to the cyber part of the CPF, while the physical part is composed of the vehicles crossing the roundabout. The first level, i.e., the edge-computing layer, is based on an analytical solution that uses multivariable optimisation to minimise the waiting times of the vehicles entering a roundabout intersection and to ensure a safe crossing. The second level, i.e., the cloud-computing layer, stores information about the waiting times and trajectories of all the vehicles that cross the roundabout and uses them for long-term analysis and prediction. The simulated results show the efficacy of the proposed method, which can be easily implemented on an embedded device for real-time operation.}, } @article {pmid34200488, year = {2021}, author = {Bao, Y and Lin, P and Li, Y and Qi, Y and Wang, Z and Du, W and Fan, Q}, title = {Parallel Structure from Motion for Sparse Point Cloud Generation in Large-Scale Scenes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34200488}, issn = {1424-8220}, support = {ZR2020MF132//Natural Science Foundation of Shandong Province/ ; 62072020//National Natural Science Foundation of China/ ; 2017YFB1002602//National Key R&D Program of China/ ; No.VRLAB2019A03//Open Project Program of State Key Laboratory of Virtual Reality Technology and Systems, Beihang University/ ; No.19-3-2-21-zhc//Qingdao Leading Scholars Project on Innovation and Entrepreneurship 2019/ ; }, abstract = {Scene reconstruction uses images or videos as input to reconstruct a 3D model of a real scene and has important applications in smart cities, surveying and mapping, military, and other fields. Structure from motion (SFM) is a key step in scene reconstruction, which recovers sparse point clouds from image sequences. However, large-scale scenes cannot be reconstructed using a single compute node. Image matching and geometric filtering take up a lot of time in the traditional SFM problem. In this paper, we propose a novel divide-and-conquer framework to solve the distributed SFM problem. First, we use the global navigation satellite system (GNSS) information from images to calculate the GNSS neighborhood. The number of images matched is greatly reduced by matching each image to only valid GNSS neighbors. This way, a robust matching relationship can be obtained. Second, the calculated matching relationship is used as the initial camera graph, which is divided into multiple subgraphs by the clustering algorithm. The local SFM is executed on several computing nodes to register the local cameras. Finally, all of the local camera poses are integrated and optimized to complete the global camera registration. Experiments show that our system can accurately and efficiently solve the structure from motion problem in large-scale scenes.}, } @article {pmid34200090, year = {2021}, author = {Kuaban, GS and Atmaca, T and Kamli, A and Czachórski, T and Czekalski, P}, title = {Performance Analysis of Packet Aggregation Mechanisms and Their Applications in Access (e.g., IoT, 4G/5G), Core, and Data Centre Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34200090}, issn = {1424-8220}, mesh = {*Cloud Computing ; }, abstract = {The transmission of massive amounts of small packets generated by access networks through high-speed Internet core networks to other access networks or cloud computing data centres has introduced several challenges such as poor throughput, underutilisation of network resources, and higher energy consumption. Therefore, it is essential to develop strategies to deal with these challenges. One of them is to aggregate smaller packets into a larger payload packet, and these groups of aggregated packets will share the same header, hence increasing throughput, improved resource utilisation, and reduction in energy consumption. This paper presents a review of packet aggregation applications in access networks (e.g., IoT and 4G/5G mobile networks), optical core networks, and cloud computing data centre networks. Then we propose new analytical models based on diffusion approximation for the evaluation of the performance of packet aggregation mechanisms. We demonstrate the use of measured traffic from real networks to evaluate the performance of packet aggregation mechanisms analytically. The use of diffusion approximation allows us to consider time-dependent queueing models with general interarrival and service time distributions. Therefore these models are more general than others presented till now.}, } @article {pmid34199981, year = {2021}, author = {Nouh, R and Singh, M and Singh, D}, title = {SafeDrive: Hybrid Recommendation System Architecture for Early Safety Predication Using Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34199981}, issn = {1424-8220}, mesh = {*Accidents, Traffic/prevention & control ; *Automobile Driving ; Internet ; Risk Factors ; Safety ; Technology ; }, abstract = {The Internet of vehicles (IoV) is a rapidly emerging technological evolution of Intelligent Transportation System (ITS). This paper proposes SafeDrive, a dynamic driver profile (DDP) using a hybrid recommendation system. DDP is a set of functional modules, to analyses individual driver's behaviors, using prior violation and accident records, to identify driving risk patterns. In this paper, we have considered three synthetic data-sets for 1500 drivers based on their profile information, risk parameters information, and risk likelihood. In addition, we have also considered the driver's historical violation/accident data-set records based on four risk-score levels such as high-risk, medium-risk, low-risk, and no-risk to predict current and future driver risk scores. Several error calculation methods have been applied in this study to analyze our proposed hybrid recommendation systems' performance to classify the driver's data with higher accuracy based on various criteria. The evaluated results help to improve the driving behavior and broadcast early warning alarm to the other vehicles in IoV environment for the overall road safety. Moreover, the propoed model helps to provide a safe and predicted environment for vehicles, pedestrians, and road objects, with the help of regular monitoring of vehicle motion, driver behavior, and road conditions. It also enables accurate prediction of accidents beforehand, and also minimizes the complexity of on-road vehicles and latency due to fog/cloud computing servers.}, } @article {pmid34199831, year = {2021}, author = {Wang, Q and Su, M and Zhang, M and Li, R}, title = {Integrating Digital Technologies and Public Health to Fight Covid-19 Pandemic: Key Technologies, Applications, Challenges and Outlook of Digital Healthcare.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {11}, pages = {}, pmid = {34199831}, issn = {1660-4601}, support = {18YJA790081//Social Science Fund of Ministry of Education of China/ ; Grant No. ZR2018MG016//Natural Science Foundation of Shandong Province, China/ ; }, mesh = {Artificial Intelligence ; *COVID-19 ; China/epidemiology ; Delivery of Health Care ; Digital Technology ; Humans ; *Pandemics/prevention & control ; Public Health ; SARS-CoV-2 ; }, abstract = {Integration of digital technologies and public health (or digital healthcare) helps us to fight the Coronavirus Disease 2019 (COVID-19) pandemic, which is the biggest public health crisis humanity has faced since the 1918 Influenza Pandemic. In order to better understand the digital healthcare, this work conducted a systematic and comprehensive review of digital healthcare, with the purpose of helping us combat the COVID-19 pandemic. This paper covers the background information and research overview of digital healthcare, summarizes its applications and challenges in the COVID-19 pandemic, and finally puts forward the prospects of digital healthcare. First, main concepts, key development processes, and common application scenarios of integrating digital technologies and digital healthcare were offered in the part of background information. Second, the bibliometric techniques were used to analyze the research output, geographic distribution, discipline distribution, collaboration network, and hot topics of digital healthcare before and after COVID-19 pandemic. We found that the COVID-19 pandemic has greatly accelerated research on the integration of digital technologies and healthcare. Third, application cases of China, EU and U.S using digital technologies to fight the COVID-19 pandemic were collected and analyzed. Among these digital technologies, big data, artificial intelligence, cloud computing, 5G are most effective weapons to combat the COVID-19 pandemic. Applications cases show that these technologies play an irreplaceable role in controlling the spread of the COVID-19. By comparing the application cases in these three regions, we contend that the key to China's success in avoiding the second wave of COVID-19 pandemic is to integrate digital technologies and public health on a large scale without hesitation. Fourth, the application challenges of digital technologies in the public health field are summarized. These challenges mainly come from four aspects: data delays, data fragmentation, privacy security, and data security vulnerabilities. Finally, this study provides the future application prospects of digital healthcare. In addition, we also provide policy recommendations for other countries that use digital technology to combat COVID-19.}, } @article {pmid34198526, year = {2021}, author = {Kim, J and Lee, J and Kim, T}, title = {AdaMM: Adaptive Object Movement and Motion Tracking in Hierarchical Edge Computing System.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {12}, pages = {}, pmid = {34198526}, issn = {1424-8220}, support = {2018-0-01502//Ministry of Science and ICT, South Korea/ ; GCU-202008450004//Gachon University research fund of 2020/ ; }, abstract = {This paper presents a novel adaptive object movement and motion tracking (AdaMM) framework in a hierarchical edge computing system for achieving GPU memory footprint reduction of deep learning (DL)-based video surveillance services. DL-based object movement and motion tracking requires a significant amount of resources, such as (1) GPU processing power for the inference phase and (2) GPU memory for model loading. Despite the absence of an object in the video, if the DL model is loaded, the GPU memory must be kept allocated for the loaded model. Moreover, in several cases, video surveillance tries to capture events that rarely occur (e.g., abnormal object behaviors); therefore, such standby GPU memory might be easily wasted. To alleviate this problem, the proposed AdaMM framework categorizes the tasks used for the object movement and motion tracking procedure in an increasing order of the required processing and memory resources as task (1) frame difference calculation, task (2) object detection, and task (3) object motion and movement tracking. The proposed framework aims to adaptively release the unnecessary standby object motion and movement tracking model to save GPU memory by utilizing light tasks, such as frame difference calculation and object detection in a hierarchical manner. Consequently, object movement and motion tracking are adaptively triggered if the object is detected within the specified threshold time; otherwise, the GPU memory for the model of task (3) can be released. Moreover, object detection is also adaptively performed if the frame difference over time is greater than the specified threshold. We implemented the proposed AdaMM framework using commercial edge devices by considering a three-tier system, such as the 1st edge node for both tasks (1) and (2), the 2nd edge node for task (3), and the cloud for sending a push alarm. A measurement-based experiment reveals that the proposed framework achieves a maximum GPU memory reduction of 76.8% compared to the baseline system, while requiring a 2680 ms delay for loading the model for object movement and motion tracking.}, } @article {pmid34192136, year = {2020}, author = {Miseikis, J and Caroni, P and Duchamp, P and Gasser, A and Marko, R and Miseikiene, N and Zwilling, F and de Castelbajac, C and Eicher, L and Fruh, M and Fruh, H}, title = {Lio-A Personal Robot Assistant for Human-Robot Interaction and Care Applications.}, journal = {IEEE robotics and automation letters}, volume = {5}, number = {4}, pages = {5339-5346}, pmid = {34192136}, issn = {2377-3766}, abstract = {Lio is a mobile robot platform with a multi-functional arm explicitly designed for human-robot interaction and personal care assistant tasks. The robot has already been deployed in several health care facilities, where it is functioning autonomously, assisting staff and patients on an everyday basis. Lio is intrinsically safe by having full coverage in soft artificial-leather material as well as collision detection, limited speed and forces. Furthermore, the robot has a compliant motion controller. A combination of visual, audio, laser, ultrasound and mechanical sensors are used for safe navigation and environment understanding. The ROS-enabled setup allows researchers to access raw sensor data as well as have direct control of the robot. The friendly appearance of Lio has resulted in the robot being well accepted by health care staff and patients. Fully autonomous operation is made possible by a flexible decision engine, autonomous navigation and automatic recharging. Combined with time-scheduled task triggers, this allows Lio to operate throughout the day, with a battery life of up to 8 hours and recharging during idle times. A combination of powerful computing units provides enough processing power to deploy artificial intelligence and deep learning-based solutions on-board the robot without the need to send any sensitive data to cloud services, guaranteeing compliance with privacy requirements. During the COVID-19 pandemic, Lio was rapidly adjusted to perform additional functionality like disinfection and remote elevated body temperature detection. It complies with ISO13482 - Safety requirements for personal care robots, meaning it can be directly tested and deployed in care facilities.}, } @article {pmid34185678, year = {2021}, author = {Fedorov, A and Longabaugh, WJR and Pot, D and Clunie, DA and Pieper, S and Aerts, HJWL and Homeyer, A and Lewis, R and Akbarzadeh, A and Bontempi, D and Clifford, W and Herrmann, MD and Höfener, H and Octaviano, I and Osborne, C and Paquette, S and Petts, J and Punzo, D and Reyes, M and Schacherer, DP and Tian, M and White, G and Ziegler, E and Shmulevich, I and Pihl, T and Wagner, U and Farahani, K and Kikinis, R}, title = {NCI Imaging Data Commons.}, journal = {Cancer research}, volume = {81}, number = {16}, pages = {4188-4193}, pmid = {34185678}, issn = {1538-7445}, support = {P41 EB015898/EB/NIBIB NIH HHS/United States ; HHSN261201500003C/CA/NCI NIH HHS/United States ; HHSN261201000031C/CA/NCI NIH HHS/United States ; HHSN261201500001C/CA/NCI NIH HHS/United States ; HHSN261201500001G/CA/NCI NIH HHS/United States ; HHSN261201500003I/CA/NCI NIH HHS/United States ; P41 EB028741/EB/NIBIB NIH HHS/United States ; HHSN261201500001W/CA/NCI NIH HHS/United States ; }, mesh = {Biomedical Research/trends ; Cloud Computing ; Computational Biology/methods ; Computer Graphics ; Computer Security ; Data Interpretation, Statistical ; Databases, Factual ; Diagnostic Imaging/*methods/standards ; Humans ; Image Processing, Computer-Assisted ; *National Cancer Institute (U.S.) ; Neoplasms/*diagnostic imaging/*genetics ; Pilot Projects ; Programming Languages ; Radiology/methods/standards ; Reproducibility of Results ; Software ; United States ; User-Computer Interface ; }, abstract = {The National Cancer Institute (NCI) Cancer Research Data Commons (CRDC) aims to establish a national cloud-based data science infrastructure. Imaging Data Commons (IDC) is a new component of CRDC supported by the Cancer Moonshot. The goal of IDC is to enable a broad spectrum of cancer researchers, with and without imaging expertise, to easily access and explore the value of deidentified imaging data and to support integrated analyses with nonimaging data. We achieve this goal by colocating versatile imaging collections with cloud-based computing resources and data exploration, visualization, and analysis tools. The IDC pilot was released in October 2020 and is being continuously populated with radiology and histopathology collections. IDC provides access to curated imaging collections, accompanied by documentation, a user forum, and a growing number of analysis use cases that aim to demonstrate the value of a data commons framework applied to cancer imaging research. SIGNIFICANCE: This study introduces NCI Imaging Data Commons, a new repository of the NCI Cancer Research Data Commons, which will support cancer imaging research on the cloud.}, } @article {pmid34185062, year = {2021}, author = {Park, S and Lee, D and Kim, Y and Lim, S and Chae, H and Kim, S}, title = {BioVLAB-Cancer-Pharmacogenomics: tumor heterogeneity and pharmacogenomics analysis of multi-omics data from tumor on the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {38}, number = {1}, pages = {275-277}, doi = {10.1093/bioinformatics/btab478}, pmid = {34185062}, issn = {1367-4811}, support = {//Collaborative Genome Program for Fostering New Post-Genome Industry of the National Research Foundation (NRF)/ ; NRF-2014M3C9A3063541//Ministry of Science and ICT (MSIT)/ ; //Korea Health Technology R&D Project through the Korea Health Industry Development Institute (KHIDI)/ ; //Ministry of Health & Welfare/ ; HI15C3224//Republic of Korea/ ; //Bio & Medical Technology Development Program of the National Research Foundation (NRF)/ ; NRF-2019M3E5D4065965//Ministry of Science & ICT/ ; //Bio & Medical Technology Development Program of the National Research Foundation (NRF)/ ; NRF-2019M3E5D307337511//Ministry of Science & ICT/ ; }, mesh = {Humans ; Female ; *Software ; Multiomics ; Pharmacogenetics ; *Breast Neoplasms/drug therapy/genetics ; Databases, Factual ; }, abstract = {MOTIVATION: Multi-omics data in molecular biology has accumulated rapidly over the years. Such data contains valuable information for research in medicine and drug discovery. Unfortunately, data-driven research in medicine and drug discovery is challenging for a majority of small research labs due to the large volume of data and the complexity of analysis pipeline.

RESULTS: We present BioVLAB-Cancer-Pharmacogenomics, a bioinformatics system that facilitates analysis of multi-omics data from breast cancer to analyze and investigate intratumor heterogeneity and pharmacogenomics on Amazon Web Services. Our system takes multi-omics data as input to perform tumor heterogeneity analysis in terms of TCGA data and deconvolve-and-match the tumor gene expression to cell line data in CCLE using DNA methylation profiles. We believe that our system can help small research labs perform analysis of tumor multi-omics without worrying about computational infrastructure and maintenance of databases and tools.

http://biohealth.snu.ac.kr/software/biovlab_cancer_pharmacogenomics.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid34183361, year = {2021}, author = {Leu, MG and Weinberg, ST and Monsen, C and Lehmann, CU and , }, title = {Web Services and Cloud Computing in Pediatric Care.}, journal = {Pediatrics}, volume = {148}, number = {1}, pages = {}, doi = {10.1542/peds.2021-052048}, pmid = {34183361}, issn = {1098-4275}, mesh = {Bilirubin/blood ; Child ; *Cloud Computing ; Computer Security ; Confidentiality ; Consumer Health Information/organization & administration ; Decision Support Systems, Clinical/organization & administration ; Electronic Health Records/*organization & administration ; Humans ; Immunization ; Nomograms ; Pediatrics/*organization & administration ; Practice Guidelines as Topic ; Programming Languages ; *Web Browser ; }, abstract = {Electronic health record (EHR) systems do not uniformly implement pediatric-supportive functionalities. One method of adding these capabilities across EHR platforms is to integrate Web services and Web applications that may perform decision support and store data in the cloud when the EHR platform is able to integrate Web services. Specific examples of these services are described, such as immunization clinical decision support services, consumer health resources, and bilirubin nomograms. Health care providers, EHR vendors, and developers share responsibilities in the appropriate development, integration, and use of Web services and Web applications as they relate to best practices in the areas of data security and confidentiality, technical availability, audit trails, terminology and messaging standards, compliance with the Health Insurance Portability and Accountability Act, testing, usability, and other considerations. It is desirable for health care providers to have knowledge of Web services and Web applications that can improve pediatric capabilities in their own EHRs because this will naturally inform discussions concerning EHR features and facilitate implementation and subsequent use of these capabilities by clinicians caring for children.}, } @article {pmid34177116, year = {2022}, author = {Ahanger, TA and Tariq, U and Nusir, M and Aldaej, A and Ullah, I and Sulman, A}, title = {A novel IoT-fog-cloud-based healthcare system for monitoring and predicting COVID-19 outspread.}, journal = {The Journal of supercomputing}, volume = {78}, number = {2}, pages = {1783-1806}, pmid = {34177116}, issn = {0920-8542}, abstract = {Rapid communication of viral sicknesses is an arising public medical issue across the globe. Out of these, COVID-19 is viewed as the most critical and novel infection nowadays. The current investigation gives an effective framework for the monitoring and prediction of COVID-19 virus infection (C-19VI). To the best of our knowledge, no research work is focused on incorporating IoT technology for C-19 outspread over spatial-temporal patterns. Moreover, limited work has been done in the direction of prediction of C-19 in humans for controlling the spread of COVID-19. The proposed framework includes a four-level architecture for the expectation and avoidance of COVID-19 contamination. The presented model comprises COVID-19 Data Collection (C-19DC) level, COVID-19 Information Classification (C-19IC) level, COVID-19-Mining and Extraction (C-19ME) level, and COVID-19 Prediction and Decision Modeling (C-19PDM) level. Specifically, the presented model is used to empower a person/community to intermittently screen COVID-19 Fever Measure (C-19FM) and forecast it so that proactive measures are taken in advance. Additionally, for prescient purposes, the probabilistic examination of C-19VI is quantified as degree of membership, which is cumulatively characterized as a COVID-19 Fever Measure (C-19FM). Moreover, the prediction is realized utilizing the temporal recurrent neural network. Additionally, based on the self-organized mapping technique, the presence of C-19VI is determined over a geographical area. Simulation is performed over four challenging datasets. In contrast to other strategies, altogether improved outcomes in terms of classification efficiency, prediction viability, and reliability were registered for the introduced model.}, } @article {pmid34177036, year = {2022}, author = {Singh, A and Jindal, V and Sandhu, R and Chang, V}, title = {A scalable framework for smart COVID surveillance in the workplace using Deep Neural Networks and cloud computing.}, journal = {Expert systems}, volume = {39}, number = {3}, pages = {e12704}, pmid = {34177036}, issn = {1468-0394}, abstract = {A smart and scalable system is required to schedule various machine learning applications to control pandemics like COVID-19 using computing infrastructure provided by cloud and fog computing. This paper proposes a framework that considers the use case of smart office surveillance to monitor workplaces for detecting possible violations of COVID effectively. The proposed framework uses deep neural networks, fog computing and cloud computing to develop a scalable and time-sensitive infrastructure that can detect two major violations: wearing a mask and maintaining a minimum distance of 6 feet between employees in the office environment. The proposed framework is developed with the vision to integrate multiple machine learning applications and handle the computing infrastructures for pandemic applications. The proposed framework can be used by application developers for the rapid development of new applications based on the requirements and do not worry about scheduling. The proposed framework is tested for two independent applications and performed better than the traditional cloud environment in terms of latency and response time. The work done in this paper tries to bridge the gap between machine learning applications and their computing infrastructure for COVID-19.}, } @article {pmid34175609, year = {2021}, author = {Elnashar, A and Zeng, H and Wu, B and Fenta, AA and Nabil, M and Duerler, R}, title = {Soil erosion assessment in the Blue Nile Basin driven by a novel RUSLE-GEE framework.}, journal = {The Science of the total environment}, volume = {793}, number = {}, pages = {148466}, doi = {10.1016/j.scitotenv.2021.148466}, pmid = {34175609}, issn = {1879-1026}, mesh = {*Conservation of Natural Resources ; Environmental Monitoring ; Geographic Information Systems ; Soil ; *Soil Erosion ; }, abstract = {Assessment of soil loss and understanding its major drivers are essential to implement targeted management interventions. We have proposed and developed a Revised Universal Soil Loss Equation framework fully implemented in the Google Earth Engine cloud platform (RUSLE-GEE) for high spatial resolution (90 m) soil erosion assessment. Using RUSLE-GEE, we analyzed the soil loss rate for different erosion levels, land cover types, and slopes in the Blue Nile Basin. The results showed that the mean soil loss rate is 39.73, 57.98, and 6.40 t ha[-1] yr[-1] for the entire Blue Nile, Upper Blue Nile, and Lower Blue Nile Basins, respectively. Our results also indicated that soil protection measures should be implemented in approximately 27% of the Blue Nile Basin, as these areas face a moderate to high risk of erosion (>10 t ha[-1] yr[-1]). In addition, downscaling the Tropical Rainfall Measuring Mission (TRMM) precipitation data from 25 km to 1 km spatial resolution significantly impacts rainfall erosivity and soil loss rate. In terms of soil erosion assessment, the study showed the rapid characterization of soil loss rates that could be used to prioritize erosion mitigation plans to support sustainable land resources and tackle land degradation in the Blue Nile Basin.}, } @article {pmid34172112, year = {2021}, author = {Karhade, DS and Roach, J and Shrestha, P and Simancas-Pallares, MA and Ginnis, J and Burk, ZJS and Ribeiro, AA and Cho, H and Wu, D and Divaris, K}, title = {An Automated Machine Learning Classifier for Early Childhood Caries.}, journal = {Pediatric dentistry}, volume = {43}, number = {3}, pages = {191-197}, pmid = {34172112}, issn = {1942-5473}, support = {R03 DE028983/DE/NIDCR NIH HHS/United States ; U01 DE025046/DE/NIDCR NIH HHS/United States ; }, mesh = {Child ; Child, Preschool ; *Dental Caries ; *Dental Caries Susceptibility ; Humans ; Machine Learning ; North Carolina ; Nutrition Surveys ; Prevalence ; }, abstract = {Purpose: The purpose of the study was to develop and evaluate an automated machine learning algorithm (AutoML) for children's classification according to early childhood caries (ECC) status. Methods: Clinical, demographic, behavioral, and parent-reported oral health status information for a sample of 6,404 three- to five-year-old children (mean age equals 54 months) participating in an epidemiologic study of early childhood oral health in North Carolina was used. ECC prevalence (decayed, missing, and filled primary teeth surfaces [dmfs] score greater than zero, using an International Caries Detection and Assessment System score greater than or equal to three caries lesion detection threshold) was 54 percent. Ten sets of ECC predictors were evaluated for ECC classification accuracy (i.e., area under the ROC curve [AUC], sensitivity [Se], and positive predictive value [PPV]) using an AutoML deployment on Google Cloud, followed by internal validation and external replication. Results: A parsimonious model including two terms (i.e., children's age and parent-reported child oral health status: excellent/very good/good/fair/poor) had the highest AUC (0.74), Se (0.67), and PPV (0.64) scores and similar performance using an external National Health and Nutrition Examination Survey (NHANES) dataset (AUC equals 0.80, Se equals 0.73, PPV equals 0.49). Contrarily, a comprehensive model with 12 variables covering demographics (e.g., race/ethnicity, parental education), oral health behaviors, fluoride exposure, and dental home had worse performance (AUC equals 0.66, Se equals 0.54, PPV equals 0.61). Conclusions: Parsimonious automated machine learning early childhood caries classifiers, including single-item self-reports, can be valuable for ECC screening. The classifier can accommodate biological information that can help improve its performance in the future.}, } @article {pmid34155435, year = {2021}, author = {El Motaki, S and Yahyaouy, A and Gualous, H and Sabor, J}, title = {A new weighted fuzzy C-means clustering for workload monitoring in cloud datacenter platforms.}, journal = {Cluster computing}, volume = {24}, number = {4}, pages = {3367-3379}, pmid = {34155435}, issn = {1386-7857}, abstract = {The rapid growth in virtualization solutions has driven the widespread adoption of cloud computing paradigms among various industries and applications. This has led to a growing need for XaaS solutions and equipment to enable teleworking. To meet this need, cloud operators and datacenters have to overtake several challenges related to continuity, the quality of services provided, data security, and anomaly detection issues. Mainly, anomaly detection methods play a critical role in detecting virtual machines' abnormal behaviours that can potentially violate service level agreements established with users. Unsupervised machine learning techniques are among the most commonly used technologies for implementing anomaly detection systems. This paper introduces a novel clustering approach for analyzing virtual machine behaviour while running workloads in a system based on resource usage details (such as CPU utilization and downtime events). The proposed algorithm is inspired by the intuitive mechanism of flocking birds in nature to form reasonable clusters. Each starling movement's direction depends on self-information and information provided by other close starlings during the flight. Analogically, after associating a weight with each data sample to guide the formation of meaningful groups, each data element determines its next position in the feature space based on its current position and surroundings. Based on a realistic dataset and clustering validity indices, the experimental evaluation shows that the new weighted fuzzy c-means algorithm provides interesting results and outperforms the corresponding standard algorithm (weighted fuzzy c-means).}, } @article {pmid34155424, year = {2021}, author = {Donato, L and Scimone, C and Rinaldi, C and D'Angelo, R and Sidoti, A}, title = {New evaluation methods of read mapping by 17 aligners on simulated and empirical NGS data: an updated comparison of DNA- and RNA-Seq data from Illumina and Ion Torrent technologies.}, journal = {Neural computing & applications}, volume = {33}, number = {22}, pages = {15669-15692}, pmid = {34155424}, issn = {0941-0643}, abstract = {UNLABELLED: During the last (15) years, improved omics sequencing technologies have expanded the scale and resolution of various biological applications, generating high-throughput datasets that require carefully chosen software tools to be processed. Therefore, following the sequencing development, bioinformatics researchers have been challenged to implement alignment algorithms for next-generation sequencing reads. However, nowadays selection of aligners based on genome characteristics is poorly studied, so our benchmarking study extended the "state of art" comparing 17 different aligners. The chosen tools were assessed on empirical human DNA- and RNA-Seq data, as well as on simulated datasets in human and mouse, evaluating a set of parameters previously not considered in such kind of benchmarks. As expected, we found that each tool was the best in specific conditions. For Ion Torrent single-end RNA-Seq samples, the most suitable aligners were CLC and BWA-MEM, which reached the best results in terms of efficiency, accuracy, duplication rate, saturation profile and running time. About Illumina paired-end osteomyelitis transcriptomics data, instead, the best performer algorithm, together with the already cited CLC, resulted Novoalign, which excelled in accuracy and saturation analyses. Segemehl and DNASTAR performed the best on both DNA-Seq data, with Segemehl particularly suitable for exome data. In conclusion, our study could guide users in the selection of a suitable aligner based on genome and transcriptome characteristics. However, several other aspects, emerged from our work, should be considered in the evolution of alignment research area, such as the involvement of artificial intelligence to support cloud computing and mapping to multiple genomes.

SUPPLEMENTARY INFORMATION: The online version contains supplementary material available at 10.1007/s00521-021-06188-z.}, } @article {pmid34153189, year = {2021}, author = {Bichmann, L and Gupta, S and Rosenberger, G and Kuchenbecker, L and Sachsenberg, T and Ewels, P and Alka, O and Pfeuffer, J and Kohlbacher, O and Röst, H}, title = {DIAproteomics: A Multifunctional Data Analysis Pipeline for Data-Independent Acquisition Proteomics and Peptidomics.}, journal = {Journal of proteome research}, volume = {20}, number = {7}, pages = {3758-3766}, doi = {10.1021/acs.jproteome.1c00123}, pmid = {34153189}, issn = {1535-3907}, mesh = {*Data Analysis ; Mass Spectrometry ; *Proteomics ; Reproducibility of Results ; Software ; }, abstract = {Data-independent acquisition (DIA) is becoming a leading analysis method in biomedical mass spectrometry. The main advantages include greater reproducibility and sensitivity and a greater dynamic range compared with data-dependent acquisition (DDA). However, the data analysis is complex and often requires expert knowledge when dealing with large-scale data sets. Here we present DIAproteomics, a multifunctional, automated, high-throughput pipeline implemented in the Nextflow workflow management system that allows one to easily process proteomics and peptidomics DIA data sets on diverse compute infrastructures. The central components are well-established tools such as the OpenSwathWorkflow for the DIA spectral library search and PyProphet for the false discovery rate assessment. In addition, it provides options to generate spectral libraries from existing DDA data and to carry out the retention time and chromatogram alignment. The output includes annotated tables and diagnostic visualizations from the statistical postprocessing and computation of fold-changes across pairwise conditions, predefined in an experimental design. DIAproteomics is well documented open-source software and is available under a permissive license to the scientific community at https://www.openms.de/diaproteomics/.}, } @article {pmid34143822, year = {2021}, author = {Li, J and Peng, B and Wei, Y and Ye, H}, title = {Accurate extraction of surface water in complex environment based on Google Earth Engine and Sentinel-2.}, journal = {PloS one}, volume = {16}, number = {6}, pages = {e0253209}, pmid = {34143822}, issn = {1932-6203}, mesh = {Environmental Monitoring/*methods ; *Satellite Imagery ; Sri Lanka ; *Water ; *Water Resources ; }, abstract = {To realize the accurate extraction of surface water in complex environment, this study takes Sri Lanka as the study area owing to the complex geography and various types of water bodies. Based on Google Earth engine and Sentinel-2 images, an automatic water extraction model in complex environment(AWECE) was developed. The accuracy of water extraction by AWECE, NDWI, MNDWI and the revised version of multi-spectral water index (MuWI-R) models was evaluated from visual interpretation and quantitative analysis. The results show that the AWECE model could significantly improve the accuracy of water extraction in complex environment, with an overall accuracy of 97.16%, and an extremely low omission error (0.74%) and commission error (2.35%). The AEWCE model could effectively avoid the influence of cloud shadow, mountain shadow and paddy soil on water extraction accuracy. The model can be widely applied in cloudy, mountainous and other areas with complex environments, which has important practical significance for water resources investigation, monitoring and protection.}, } @article {pmid34141897, year = {2021}, author = {Azhir, E and Jafari Navimipour, N and Hosseinzadeh, M and Sharifi, A and Darwesh, A}, title = {A technique for parallel query optimization using MapReduce framework and a semantic-based clustering method.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e580}, pmid = {34141897}, issn = {2376-5992}, abstract = {Query optimization is the process of identifying the best Query Execution Plan (QEP). The query optimizer produces a close to optimal QEP for the given queries based on the minimum resource usage. The problem is that for a given query, there are plenty of different equivalent execution plans, each with a corresponding execution cost. To produce an effective query plan thus requires examining a large number of alternative plans. Access plan recommendation is an alternative technique to database query optimization, which reuses the previously-generated QEPs to execute new queries. In this technique, the query optimizer uses clustering methods to identify groups of similar queries. However, clustering such large datasets is challenging for traditional clustering algorithms due to huge processing time. Numerous cloud-based platforms have been introduced that offer low-cost solutions for the processing of distributed queries such as Hadoop, Hive, Pig, etc. This paper has applied and tested a model for clustering variant sizes of large query datasets parallelly using MapReduce. The results demonstrate the effectiveness of the parallel implementation of query workloads clustering to achieve good scalability.}, } @article {pmid34136534, year = {2021}, author = {Inamura, T and Mizuchi, Y}, title = {SIGVerse: A Cloud-Based VR Platform for Research on Multimodal Human-Robot Interaction.}, journal = {Frontiers in robotics and AI}, volume = {8}, number = {}, pages = {549360}, pmid = {34136534}, issn = {2296-9144}, abstract = {Research on Human-Robot Interaction (HRI) requires the substantial consideration of an experimental design, as well as a significant amount of time to practice the subject experiment. Recent technology in virtual reality (VR) can potentially address these time and effort challenges. The significant advantages of VR systems for HRI are: 1) cost reduction, as experimental facilities are not required in a real environment; 2) provision of the same environmental and embodied interaction conditions to test subjects; 3) visualization of arbitrary information and situations that cannot occur in reality, such as playback of past experiences, and 4) ease of access to an immersive and natural interface for robot/avatar teleoperations. Although VR tools with their features have been applied and developed in previous HRI research, all-encompassing tools or frameworks remain unavailable. In particular, the benefits of integration with cloud computing have not been comprehensively considered. Hence, the purpose of this study is to propose a research platform that can comprehensively provide the elements required for HRI research by integrating VR and cloud technologies. To realize a flexible and reusable system, we developed a real-time bridging mechanism between the robot operating system (ROS) and Unity. To confirm the feasibility of the system in a practical HRI scenario, we applied the proposed system to three case studies, including a robot competition named RoboCup@Home. via these case studies, we validated the system's usefulness and its potential for the development and evaluation of social intelligence via multimodal HRI.}, } @article {pmid34136134, year = {2021}, author = {Paul-Gilloteaux, P and Tosi, S and Hériché, JK and Gaignard, A and Ménager, H and Marée, R and Baecker, V and Klemm, A and Kalaš, M and Zhang, C and Miura, K and Colombelli, J}, title = {Bioimage analysis workflows: community resources to navigate through a complex ecosystem.}, journal = {F1000Research}, volume = {10}, number = {}, pages = {320}, pmid = {34136134}, issn = {2046-1402}, mesh = {Algorithms ; *Computational Biology ; *Ecosystem ; Information Storage and Retrieval ; Workflow ; }, abstract = {Workflows are the keystone of bioimage analysis, and the NEUBIAS (Network of European BioImage AnalystS) community is trying to gather the actors of this field and organize the information around them. One of its most recent outputs is the opening of the F1000Research NEUBIAS gateway, whose main objective is to offer a channel of publication for bioimage analysis workflows and associated resources. In this paper we want to express some personal opinions and recommendations related to finding, handling and developing bioimage analysis workflows. The emergence of "big data" in bioimaging and resource-intensive analysis algorithms make local data storage and computing solutions a limiting factor. At the same time, the need for data sharing with collaborators and a general shift towards remote work, have created new challenges and avenues for the execution and sharing of bioimage analysis workflows. These challenges are to reproducibly run workflows in remote environments, in particular when their components come from different software packages, but also to document them and link their parameters and results by following the FAIR principles (Findable, Accessible, Interoperable, Reusable) to foster open and reproducible science. In this opinion paper, we focus on giving some directions to the reader to tackle these challenges and navigate through this complex ecosystem, in order to find and use workflows, and to compare workflows addressing the same problem. We also discuss tools to run workflows in the cloud and on High Performance Computing resources, and suggest ways to make these workflows FAIR.}, } @article {pmid34127909, year = {2023}, author = {Tan, C and Lin, J}, title = {A new QoE-based prediction model for evaluating virtual education systems with COVID-19 side effects using data mining.}, journal = {Soft computing}, volume = {27}, number = {3}, pages = {1699-1713}, pmid = {34127909}, issn = {1432-7643}, abstract = {Today, emerging technologies such as 5G Internet of things (IoT), virtual reality and cloud-edge computing have enhanced and upgraded higher education environments in universities, colleagues and research centers. Computer-assisted learning systems with aggregating IoT applications and smart devices have improved the e-learning systems by enabling remote monitoring and screening of the behavioral aspects of teaching and education scores of students. On the other side, educational data mining has improved the higher education systems by predicting and analyzing the behavioral aspects of teaching and education scores of students. Due to an unexpected and huge increase in the number of patients during coronavirus (COVID-19) pandemic, all universities, campuses, schools, research centers, many scientific collaborations and meetings have closed and forced to initiate online teaching, e-learning and virtual meeting. Due to importance of behavioral aspects of teaching and education between lecturers and students, prediction of quality of experience (QoE) in virtual education systems is a critical issue. This paper presents a new prediction model to detect technical aspects of teaching and e-learning in virtual education systems using data mining. Association rules mining and supervised techniques are applied to detect efficient QoE factors on virtual education systems. The experimental results described that the suggested prediction model meets the proper accuracy, precision and recall factors for predicting the behavioral aspects of teaching and e-learning for students in virtual education systems.}, } @article {pmid34126874, year = {2021}, author = {Abbasi, WA and Abbas, SA and Andleeb, S}, title = {PANDA: Predicting the change in proteins binding affinity upon mutations by finding a signal in primary structures.}, journal = {Journal of bioinformatics and computational biology}, volume = {19}, number = {4}, pages = {2150015}, doi = {10.1142/S0219720021500153}, pmid = {34126874}, issn = {1757-6334}, mesh = {Amino Acid Sequence ; *Machine Learning ; Mutation ; Protein Binding ; *Proteins/genetics/metabolism ; }, abstract = {Accurately determining a change in protein binding affinity upon mutations is important to find novel therapeutics and to assist mutagenesis studies. Determination of change in binding affinity upon mutations requires sophisticated, expensive, and time-consuming wet-lab experiments that can be supported with computational methods. Most of the available computational prediction techniques depend upon protein structures that bound their applicability to only protein complexes with recognized 3D structures. In this work, we explore the sequence-based prediction of change in protein binding affinity upon mutation and question the effectiveness of [Formula: see text]-fold cross-validation (CV) across mutations adopted in previous studies to assess the generalization ability of such predictors with no known mutation during training. We have used protein sequence information instead of protein structures along with machine learning techniques to accurately predict the change in protein binding affinity upon mutation. Our proposed sequence-based novel change in protein binding affinity predictor called PANDA performs comparably to the existing methods gauged through an appropriate CV scheme and an external independent test dataset. On an external test dataset, our proposed method gives a maximum Pearson correlation coefficient of 0.52 in comparison to the state-of-the-art existing protein structure-based method called MutaBind which gives a maximum Pearson correlation coefficient of 0.59. Our proposed protein sequence-based method, to predict a change in binding affinity upon mutations, has wide applicability and comparable performance in comparison to existing protein structure-based methods. We made PANDA easily accessible through a cloud-based webserver and python code available at https://sites.google.com/view/wajidarshad/software and https://github.com/wajidarshad/panda, respectively.}, } @article {pmid34121460, year = {2021}, author = {Laske, TG and Garshelis, DL and Iles, TL and Iaizzo, PA}, title = {An engineering perspective on the development and evolution of implantable cardiac monitors in free-living animals.}, journal = {Philosophical transactions of the Royal Society of London. Series B, Biological sciences}, volume = {376}, number = {1830}, pages = {20200217}, pmid = {34121460}, issn = {1471-2970}, mesh = {Animals ; Caniformia/*physiology ; Engineering/*instrumentation ; Heart Function Tests/instrumentation/*veterinary ; Monitoring, Ambulatory/instrumentation/veterinary ; Physiology/*instrumentation ; }, abstract = {The latest technologies associated with implantable physiological monitoring devices can record multiple channels of data (including: heart rates and rhythms, activity, temperature, impedance and posture), and coupled with powerful software applications, have provided novel insights into the physiology of animals in the wild. This perspective details past challenges and lessons learned from the uses and developments of implanted biologgers designed for human clinical application in our research on free-ranging American black bears (Ursus americanus). In addition, we reference other research by colleagues and collaborators who have leveraged these devices in their work, including: brown bears (Ursus arctos), grey wolves (Canis lupus), moose (Alces alces), maned wolves (Chrysocyon brachyurus) and southern elephant seals (Mirounga leonina). We also discuss the potentials for applications of such devices across a range of other species. To date, the devices described have been used in fifteen different wild species, with publications pending in many instances. We have focused our physiological research on the analyses of heart rates and rhythms and thus special attention will be paid to this topic. We then discuss some major expected step changes such as improvements in sensing algorithms, data storage, and the incorporation of next-generation short-range wireless telemetry. The latter provides new avenues for data transfer, and when combined with cloud-based computing, it not only provides means for big data storage but also the ability to readily leverage high-performance computing platforms using artificial intelligence and machine learning algorithms. These advances will dramatically increase both data quantity and quality and will facilitate the development of automated recognition of extreme physiological events or key behaviours of interest in a broad array of environments, thus further aiding wildlife monitoring and management. This article is part of the theme issue 'Measuring physiology in free-living animals (Part I)'.}, } @article {pmid34109196, year = {2021}, author = {Macdonald, JC and Isom, DC and Evans, DD and Page, KJ}, title = {Digital Innovation in Medicinal Product Regulatory Submission, Review, and Approvals to Create a Dynamic Regulatory Ecosystem-Are We Ready for a Revolution?.}, journal = {Frontiers in medicine}, volume = {8}, number = {}, pages = {660808}, pmid = {34109196}, issn = {2296-858X}, abstract = {The pace of scientific progress over the past several decades within the biological, drug development, and the digital realm has been remarkable. The'omics revolution has enabled a better understanding of the biological basis of disease, unlocking the possibility of new products such as gene and cell therapies which offer novel patient centric solutions. Innovative approaches to clinical trial designs promise greater efficiency, and in recent years, scientific collaborations, and consortia have been developing novel approaches to leverage new sources of evidence such as real-world data, patient experience data, and biomarker data. Alongside this there have been great strides in digital innovation. Cloud computing has become mainstream and the internet of things and blockchain technology have become a reality. These examples of transformation stand in sharp contrast to the current inefficient approach for regulatory submission, review, and approval of medicinal products. This process has not fundamentally changed since the beginning of medicine regulation in the late 1960s. Fortunately, progressive initiatives are emerging that will enrich and streamline regulatory decision making and deliver patient centric therapies, if they are successful in transforming the current transactional construct and harnessing scientific and technological advances. Such a radical transformation will not be simple for both regulatory authorities and company sponsors, nor will progress be linear. We examine the shortcomings of the current system with its entrenched and variable business processes, offer examples of progress as catalysts for change, and make the case for a new cloud based model. To optimize navigation toward this reality we identify implications and regulatory design questions which must be addressed. We conclude that a new model is possible and is slowly emerging through cumulative change initiatives that question, challenge, and redesign best practices, roles, and responsibilities, and that this must be combined with adaptation of behaviors and acquisition of new skills.}, } @article {pmid34101767, year = {2021}, author = {Abdel-Kader, RF and El-Sayad, NE and Rizk, RY}, title = {Efficient energy and completion time for dependent task computation offloading algorithm in industry 4.0.}, journal = {PloS one}, volume = {16}, number = {6}, pages = {e0252756}, pmid = {34101767}, issn = {1932-6203}, mesh = {Cloud Computing ; Industry/instrumentation/*methods ; *Internet of Things ; *Software ; }, abstract = {Rapid technological development has revolutionized the industrial sector. Internet of Things (IoT) started to appear in many fields, such as health care and smart cities. A few years later, IoT was supported by industry, leading to what is called Industry 4.0. In this paper, a cloud-assisted fog-networking architecture is implemented in an IoT environment with a three-layer network. An efficient energy and completion time for dependent task computation offloading (ET-DTCO) algorithm is proposed, and it considers two quality-of-service (QoS) parameters: efficient energy and completion time offloading for dependent tasks in Industry 4.0. The proposed solution employs the Firefly algorithm to optimize the process of the selection-offloading computing mode and determine the optimal solution for performing tasks locally or offloaded to a fog or cloud considering the task dependency. Moreover, the proposed algorithm is compared with existing techniques. Simulation results proved that the proposed ET-DTCO algorithm outperforms other offloading algorithms in minimizing energy consumption and completion time while enhancing the overall efficiency of the system.}, } @article {pmid34095775, year = {2021}, author = {O'Grady, N and Gibbs, DL and Abdilleh, K and Asare, A and Asare, S and Venters, S and Brown-Swigart, L and Hirst, GL and Wolf, D and Yau, C and van 't Veer, LJ and Esserman, L and Basu, A}, title = {PRoBE the cloud toolkit: finding the best biomarkers of drug response within a breast cancer clinical trial.}, journal = {JAMIA open}, volume = {4}, number = {2}, pages = {ooab038}, pmid = {34095775}, issn = {2574-2531}, abstract = {OBJECTIVES: In this paper, we discuss leveraging cloud-based platforms to collect, visualize, analyze, and share data in the context of a clinical trial. Our cloud-based infrastructure, Patient Repository of Biomolecular Entities (PRoBE), has given us the opportunity for uniform data structure, more efficient analysis of valuable data, and increased collaboration between researchers.

MATERIALS AND METHODS: We utilize a multi-cloud platform to manage and analyze data generated from the clinical Investigation of Serial Studies to Predict Your Therapeutic Response with Imaging And moLecular Analysis 2 (I-SPY 2 TRIAL). A collaboration with the Institute for Systems Biology Cancer Gateway in the Cloud has additionally given us access to public genomic databases. Applications to I-SPY 2 data have been built using R Shiny, while leveraging Google's BigQuery tables and SQL commands for data mining.

RESULTS: We highlight the implementation of PRoBE in several unique case studies including prediction of biomarkers associated with clinical response, access to the Pan-Cancer Atlas, and integrating pathology images within the cloud. Our data integration pipelines, documentation, and all codebase will be placed in a Github repository.

DISCUSSION AND CONCLUSION: We are hoping to develop risk stratification diagnostics by integrating additional molecular, magnetic resonance imaging, and pathology markers into PRoBE to better predict drug response. A robust cloud infrastructure and tool set can help integrate these large datasets to make valuable predictions of response to multiple agents. For that reason, we are continuously improving PRoBE to advance the way data is stored, accessed, and analyzed in the I-SPY 2 clinical trial.}, } @article {pmid34095236, year = {2021}, author = {Kapitonov, A and Lonshakov, S and Bulatov, V and Montazam, BK and White, J}, title = {Robot-as-a-Service: From Cloud to Peering Technologies.}, journal = {Frontiers in robotics and AI}, volume = {8}, number = {}, pages = {560829}, pmid = {34095236}, issn = {2296-9144}, abstract = {This article is devoted to the historical overview of the Robot-as-a-Service concept. Several major scientific publications on the development of Robot-as-a-Service systems based on a service-oriented paradigm are considered. Much attention is paid to the analysis of a centralized approach in the development using cloud computing services and the search for the limitations of this approach. As a result, general conclusions on the reviewed publications are given, as well as the authors' own vision of Robot-as-a-Service systems based on the concept of robot economics.}, } @article {pmid34092679, year = {2021}, author = {Li, F and Shankar, A and Santhosh Kumar, B}, title = {Fog-Internet of things-assisted multi-sensor intelligent monitoring model to analyze the physical health condition.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {29}, number = {6}, pages = {1319-1337}, doi = {10.3233/THC-213009}, pmid = {34092679}, issn = {1878-7401}, mesh = {Cloud Computing ; Delivery of Health Care ; Humans ; *Internet of Things ; *Telemedicine ; *Wearable Electronic Devices ; }, abstract = {BACKGROUND: Internet of Things (IoT) technology provides a tremendous and structured solution to tackle service deliverance aspects of healthcare in terms of mobile health and remote patient tracking. In medicine observation applications, IoT and cloud computing serves as an assistant in the health sector and plays an incredibly significant role. Health professionals and technicians have built an excellent platform for people with various illnesses, leveraging principles of wearable technology, wireless channels, and other remote devices for low-cost healthcare monitoring.

OBJECTIVE: This paper proposed the Fog-IoT-assisted multisensor intelligent monitoring model (FIoT-MIMM) for analyzing the patient's physical health condition.

METHOD: The proposed system uses a multisensor device for collecting biometric and medical observing data. The main point is to continually generate emergency alerts on mobile phones from the fog system to users. For the precautionary steps and suggestions for patients' health, a fog layer's temporal information is used.

RESULTS: Experimental findings show that the proposed FIoT-MIMM model has less response time and high accuracy in determining a patient's condition than other existing methods. Furthermore, decision making based on real-time healthcare information further improves the utility of the suggested model.}, } @article {pmid34092673, year = {2021}, author = {Cui, M and Baek, SS and Crespo, RG and Premalatha, R}, title = {Internet of things-based cloud computing platform for analyzing the physical health condition.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {29}, number = {6}, pages = {1233-1247}, doi = {10.3233/THC-213003}, pmid = {34092673}, issn = {1878-7401}, mesh = {*Cloud Computing ; Computer Systems ; Delivery of Health Care ; Humans ; *Internet of Things ; Models, Theoretical ; *Physical Fitness ; }, abstract = {BACKGROUND: Health monitoring is important for early disease diagnosis and will reduce the discomfort and treatment expenses, which is very relevant in terms of prevention. The early diagnosis and treatment of multiple conditions will improve solutions to the patient's healthcare radically. A concept model for the real-time patient tracking system is the primary goal of the method. The Internet of things (IoT) has made health systems accessible for programs based on the value of patient health.

OBJECTIVE: In this paper, the IoT-based cloud computing for patient health monitoring framework (IoT-CCPHM), has been proposed for effective monitoring of the patients.

METHOD: The emerging connected sensors and IoT devices monitor and test the cardiac speed, oxygen saturation percentage, body temperature, and patient's eye movement. The collected data are used in the cloud database to evaluate the patient's health, and the effects of all measures are stored. The IoT-CCPHM maintains that the medical record is processed in the cloud servers.

RESULTS: The experimental results show that patient health monitoring is a reliable way to improve health effectively.}, } @article {pmid34086595, year = {2021}, author = {Weinstein, RS and Holcomb, MJ and Mo, J and Yonsetto, P and Bojorquez, O and Grant, M and Wendel, CS and Tallman, NJ and Ercolano, E and Cidav, Z and Hornbrook, MC and Sun, V and McCorkle, R and Krouse, RS}, title = {An Ostomy Self-management Telehealth Intervention for Cancer Survivors: Technology-Related Findings From a Randomized Controlled Trial.}, journal = {Journal of medical Internet research}, volume = {23}, number = {9}, pages = {e26545}, pmid = {34086595}, issn = {1438-8871}, mesh = {*Cancer Survivors ; Humans ; *Neoplasms ; *Ostomy ; *Self-Management ; Technology ; *Telemedicine ; }, abstract = {BACKGROUND: An Ostomy Self-management Telehealth (OSMT) intervention by nurse educators and peer ostomates can equip new ostomates with critical knowledge regarding ostomy care. A telehealth technology assessment aim was to measure telehealth engineer support requirements for telehealth technology-related (TTR) incidents encountered during OSMT intervention sessions held via a secure cloud-based videoconferencing service, Zoom for Healthcare.

OBJECTIVE: This paper examines technology-related challenges, issues, and opportunities encountered in the use of telehealth in a randomized controlled trial intervention for cancer survivors living with a permanent ostomy.

METHODS: The Arizona Telemedicine Program provided telehealth engineering support for 105 OSMT sessions, scheduled for 90 to 120 minutes each, over a 2-year period. The OSMT groups included up to 15 participants, comprising 4-6 ostomates, 4-6 peer ostomates, 2 nurse educators, and 1 telehealth engineer. OSMT-session TTR incidents were recorded contemporaneously in detailed notes by the research staff. TTR incidents were categorized and tallied.

RESULTS: A total of 97.1% (102/105) OSMT sessions were completed as scheduled. In total, 3 OSMT sessions were not held owing to non-technology-related reasons. Of the 93 ostomates who participated in OSMT sessions, 80 (86%) completed their OSMT curriculum. TTR incidents occurred in 36.3% (37/102) of the completed sessions with varying disruptive impacts. No sessions were canceled or rescheduled because of TTR incidents. Disruptions from TTR incidents were minimized by following the TTR incident prevention and incident response plans.

CONCLUSIONS: Telehealth videoconferencing technology can enable ostomates to participate in ostomy self-management education by incorporating dedicated telehealth engineering support. Potentially, OSMT greatly expands the availability of ostomy self-management education for new ostomates.

TRIAL REGISTRATION: ClinicalTrials.gov NCT02974634; https://clinicaltrials.gov/ct2/show/NCT02974634.}, } @article {pmid34086476, year = {2021}, author = {Lin, Z and Zou, J and Liu, S and Peng, C and Li, Z and Wan, X and Fang, D and Yin, J and Gobbo, G and Chen, Y and Ma, J and Wen, S and Zhang, P and Yang, M}, title = {A Cloud Computing Platform for Scalable Relative and Absolute Binding Free Energy Predictions: New Opportunities and Challenges for Drug Discovery.}, journal = {Journal of chemical information and modeling}, volume = {61}, number = {6}, pages = {2720-2732}, doi = {10.1021/acs.jcim.0c01329}, pmid = {34086476}, issn = {1549-960X}, mesh = {Artificial Intelligence ; *Cloud Computing ; *Drug Discovery ; Entropy ; Thermodynamics ; }, abstract = {Free energy perturbation (FEP) has become widely used in drug discovery programs for binding affinity prediction between candidate compounds and their biological targets. However, limitations of FEP applications also exist, including, but not limited to, high cost, long waiting time, limited scalability, and breadth of application scenarios. To overcome these problems, we have developed XFEP, a scalable cloud computing platform for both relative and absolute free energy predictions using optimized simulation protocols. XFEP enables large-scale FEP calculations in a more efficient, scalable, and affordable way, for example, the evaluation of 5000 compounds can be performed in 1 week using 50-100 GPUs with a computing cost roughly equivalent to the cost for the synthesis of only one new compound. By combining these capabilities with artificial intelligence techniques for goal-directed molecule generation and evaluation, new opportunities can be explored for FEP applications in the drug discovery stages of hit identification, hit-to-lead, and lead optimization based not only on structure exploitation within the given chemical series but also including evaluation and comparison of completely unrelated molecules during structure exploration in a larger chemical space. XFEP provides the basis for scalable FEP applications to become more widely used in drug discovery projects and to speed up the drug discovery process from hit identification to preclinical candidate compound nomination.}, } @article {pmid34084936, year = {2021}, author = {Heidari, A and Jafari Navimipour, N}, title = {A new SLA-aware method for discovering the cloud services using an improved nature-inspired optimization algorithm.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e539}, pmid = {34084936}, issn = {2376-5992}, abstract = {Cloud computing is one of the most important computing patterns that use a pay-as-you-go manner to process data and execute applications. Therefore, numerous enterprises are migrating their applications to cloud environments. Not only do intensive applications deal with enormous quantities of data, but they also demonstrate compute-intensive properties very frequently. The dynamicity, coupled with the ambiguity between marketed resources and resource requirement queries from users, remains important issues that hamper efficient discovery in a cloud environment. Cloud service discovery becomes a complex problem because of the increase in network size and complexity. Complexity and network size keep increasing dynamically, making it a complex NP-hard problem that requires effective service discovery approaches. One of the most famous cloud service discovery methods is the Ant Colony Optimization (ACO) algorithm; however, it suffers from a load balancing problem among the discovered nodes. If the workload balance is inefficient, it limits the use of resources. This paper solved this problem by applying an Inverted Ant Colony Optimization (IACO) algorithm for load-aware service discovery in cloud computing. The IACO considers the pheromones' repulsion instead of attraction. We design a model for service discovery in the cloud environment to overcome the traditional shortcomings. Numerical results demonstrate that the proposed mechanism can obtain an efficient service discovery method. The algorithm is simulated using a CloudSim simulator, and the result shows better performance. Reducing energy consumption, mitigate response time, and better Service Level Agreement (SLA) violation in the cloud environments are the advantages of the proposed method.}, } @article {pmid34084926, year = {2021}, author = {Ali, A and Ahmed, M and Khan, A and Anjum, A and Ilyas, M and Helfert, M}, title = {VisTAS: blockchain-based visible and trusted remote authentication system.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e516}, pmid = {34084926}, issn = {2376-5992}, abstract = {The information security domain focuses on security needs at all levels in a computing environment in either the Internet of Things, Cloud Computing, Cloud of Things, or any other implementation. Data, devices, services, or applications and communication are required to be protected and provided by information security shields at all levels and in all working states. Remote authentication is required to perform different administrative operations in an information system, and Administrators have full access to the system and may pose insider threats. Superusers and administrators are the most trusted persons in an organisation. "Trust but verify" is an approach to have an eye on the superusers and administrators. Distributed ledger technology (Blockchain-based data storage) is an immutable data storage scheme and provides a built-in facility to share statistics among peers. Distributed ledgers are proposed to provide visible security and non-repudiation, which securely records administrators' authentications requests. The presence of security, privacy, and accountability measures establish trust among its stakeholders. Securing information in an electronic data processing system is challenging, i.e., providing services and access control for the resources to only legitimate users. Authentication plays a vital role in systems' security; therefore, authentication and identity management are the key subjects to provide information security services. The leading cause of information security breaches is the failure of identity management/authentication systems and insider threats. In this regard, visible security measures have more deterrence than other schemes. In this paper, an authentication scheme, "VisTAS," has been introduced, which provides visible security and trusted authentication services to the tenants and keeps the records in the blockchain.}, } @article {pmid34084925, year = {2021}, author = {Cambronero, ME and Bernal, A and Valero, V and Cañizares, PC and Núñez, A}, title = {Profiling SLAs for cloud system infrastructures and user interactions.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e513}, pmid = {34084925}, issn = {2376-5992}, abstract = {Cloud computing has emerged as a cutting-edge technology which is widely used by both private and public institutions, since it eliminates the capital expense of buying, maintaining, and setting up both hardware and software. Clients pay for the services they use, under the so-called Service Level Agreements (SLAs), which are the contracts that establish the terms and costs of the services. In this paper, we propose the CloudCost UML profile, which allows the modeling of cloud architectures and the users' behavior when they interact with the cloud to request resources. We then investigate how to increase the profits of cloud infrastructures by using price schemes. For this purpose, we distinguish between two types of users in the SLAs: regular and high-priority users. Regular users do not require a continuous service, so they can wait to be attended to. In contrast, high-priority users require a constant and immediate service, so they pay a greater price for their services. In addition, a computer-aided design tool, called MSCC (Modeling SLAs Cost Cloud), has been implemented to support the CloudCost profile, which enables the creation of specific cloud scenarios, as well as their edition and validation. Finally, we present a complete case study to illustrate the applicability of the CloudCost profile, thus making it possible to draw conclusions about how to increase the profits of the cloud infrastructures studied by adjusting the different cloud parameters and the resource configuration.}, } @article {pmid34073726, year = {2021}, author = {Tropea, M and De Rango, F and Nevigato, N and Bitonti, L and Pupo, F}, title = {SCARE: A Novel Switching and Collision Avoidance pRocEss for Connected Vehicles Using Virtualization and Edge Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34073726}, issn = {1424-8220}, abstract = {In this paper, some collision avoidance systems based on MEC in a VANET environment are proposed and investigated. Micro services at edge are considered to support service continuity in vehicle communication and advertising. This considered system makes use of cloud and edge computing, allowing to switch communication from edge to cloud server and vice versa when possible, trying to guarantee the required constraints and balancing the communication among the servers. Simulation results were used to evaluate the performance of three considered mechanisms: the first one considering only edge with load balancing, the second one using edge/cloud switching and the third one using edge with load balancing and collision avoidance advertising.}, } @article {pmid34072637, year = {2021}, author = {Li, DC and Huang, CT and Tseng, CW and Chou, LD}, title = {Fuzzy-Based Microservice Resource Management Platform for Edge Computing in the Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34072637}, issn = {1424-8220}, support = {108-2221-E-008-033-MY3 and 105-2221-E-008-071-MY.//Ministry of Science and Technology, Taiwan/ ; }, abstract = {Edge computing exhibits the advantages of real-time operation, low latency, and low network cost. It has become a key technology for realizing smart Internet of Things applications. Microservices are being used by an increasing number of edge computing networks because of their sufficiently small code, reduced program complexity, and flexible deployment. However, edge computing has more limited resources than cloud computing, and thus edge computing networks have higher requirements for the overall resource scheduling of running microservices. Accordingly, the resource management of microservice applications in edge computing networks is a crucial issue. In this study, we developed and implemented a microservice resource management platform for edge computing networks. We designed a fuzzy-based microservice computing resource scaling (FMCRS) algorithm that can dynamically control the resource expansion scale of microservices. We proposed and implemented two microservice resource expansion methods based on the resource usage of edge network computing nodes. We conducted the experimental analysis in six scenarios and the experimental results proved that the designed microservice resource management platform can reduce the response time for microservice resource adjustments and dynamically expand microservices horizontally and vertically. Compared with other state-of-the-art microservice resource management methods, FMCRS can reduce sudden surges in overall network resource allocation, and thus, it is more suitable for the edge computing microservice management environment.}, } @article {pmid34072301, year = {2021}, author = {Botez, R and Costa-Requena, J and Ivanciu, IA and Strautiu, V and Dobrota, V}, title = {SDN-Based Network Slicing Mechanism for a Scalable 4G/5G Core Network: A Kubernetes Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34072301}, issn = {1424-8220}, abstract = {Managing the large volumes of IoT and M2M traffic requires the evaluation of the scalability and reliability for all the components in the end-to-end system. This includes connectivity, mobile network functions, and application or services receiving and processing the data from end devices. Firstly, this paper discusses the design of a containerized IoT and M2M application and the mechanisms for delivering automated scalability and high availability when deploying it in: (1) the edge using balenaCloud; (2) the Amazon Web Services cloud with EC2 instances; and (3) the dedicated Amazon Web Services IoT service. The experiments showed that there are no significant differences between edge and cloud deployments regarding resource consumption. Secondly, the solutions for scaling the 4G/5G network functions and mobile backhaul that provide the connectivity between devices and IoT/M2M applications are analyzed. In this case, the scalability and high availability of the 4G/5G components are provided by Kubernetes. The experiments showed that our proposed scaling algorithm for network slicing managed with SDN guarantees the necessary radio and network resources for end-to-end high availability.}, } @article {pmid34072232, year = {2021}, author = {Shoeibi, A and Khodatars, M and Ghassemi, N and Jafari, M and Moridian, P and Alizadehsani, R and Panahiazar, M and Khozeimeh, F and Zare, A and Hosseini-Nejad, H and Khosravi, A and Atiya, AF and Aminshahidi, D and Hussain, S and Rouhani, M and Nahavandi, S and Acharya, UR}, title = {Epileptic Seizures Detection Using Deep Learning Techniques: A Review.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {11}, pages = {}, pmid = {34072232}, issn = {1660-4601}, mesh = {Algorithms ; Artificial Intelligence ; *Deep Learning ; Electroencephalography ; *Epilepsy/diagnosis ; Humans ; Seizures/diagnosis ; }, abstract = {A variety of screening approaches have been proposed to diagnose epileptic seizures, using electroencephalography (EEG) and magnetic resonance imaging (MRI) modalities. Artificial intelligence encompasses a variety of areas, and one of its branches is deep learning (DL). Before the rise of DL, conventional machine learning algorithms involving feature extraction were performed. This limited their performance to the ability of those handcrafting the features. However, in DL, the extraction of features and classification are entirely automated. The advent of these techniques in many areas of medicine, such as in the diagnosis of epileptic seizures, has made significant advances. In this study, a comprehensive overview of works focused on automated epileptic seizure detection using DL techniques and neuroimaging modalities is presented. Various methods proposed to diagnose epileptic seizures automatically using EEG and MRI modalities are described. In addition, rehabilitation systems developed for epileptic seizures using DL have been analyzed, and a summary is provided. The rehabilitation tools include cloud computing techniques and hardware required for implementation of DL algorithms. The important challenges in accurate detection of automated epileptic seizures using DL with EEG and MRI modalities are discussed. The advantages and limitations in employing DL-based techniques for epileptic seizures diagnosis are presented. Finally, the most promising DL models proposed and possible future works on automated epileptic seizure detection are delineated.}, } @article {pmid34071801, year = {2021}, author = {Rashed, EA and Hirata, A}, title = {One-Year Lesson: Machine Learning Prediction of COVID-19 Positive Cases with Meteorological Data and Mobility Estimate in Japan.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {11}, pages = {}, pmid = {34071801}, issn = {1660-4601}, mesh = {*COVID-19 ; Forecasting ; Humans ; Japan/epidemiology ; Machine Learning ; *Pandemics ; SARS-CoV-2 ; }, abstract = {With the wide spread of COVID-19 and the corresponding negative impact on different life aspects, it becomes important to understand ways to deal with the pandemic as a part of daily routine. After a year of the COVID-19 pandemic, it has become obvious that different factors, including meteorological factors, influence the speed at which the disease is spread and the potential fatalities. However, the impact of each factor on the speed at which COVID-19 is spreading remains controversial. Accurate forecasting of potential positive cases may lead to better management of healthcare resources and provide guidelines for government policies in terms of the action required within an effective timeframe. Recently, Google Cloud has provided online COVID-19 forecasting data for the United States and Japan, which would help in predicting future situations on a state/prefecture scale and are updated on a day-by-day basis. In this study, we propose a deep learning architecture to predict the spread of COVID-19 considering various factors, such as meteorological data and public mobility estimates, and applied it to data collected in Japan to demonstrate its effectiveness. The proposed model was constructed using a neural network architecture based on a long short-term memory (LSTM) network. The model consists of multi-path LSTM layers that are trained using time-series meteorological data and public mobility data obtained from open-source data. The model was tested using different time frames, and the results were compared to Google Cloud forecasts. Public mobility is a dominant factor in estimating new positive cases, whereas meteorological data improve their accuracy. The average relative error of the proposed model ranged from 16.1% to 22.6% in major regions, which is a significant improvement compared with Google Cloud forecasting. This model can be used to provide public awareness regarding the morbidity risk of the COVID-19 pandemic in a feasible manner.}, } @article {pmid34071676, year = {2021}, author = {Gorgulla, C and Çınaroğlu, SS and Fischer, PD and Fackeldey, K and Wagner, G and Arthanari, H}, title = {VirtualFlow Ants-Ultra-Large Virtual Screenings with Artificial Intelligence Driven Docking Algorithm Based on Ant Colony Optimization.}, journal = {International journal of molecular sciences}, volume = {22}, number = {11}, pages = {}, pmid = {34071676}, issn = {1422-0067}, support = {R01 AI150709/AI/NIAID NIH HHS/United States ; R01 GM129026/GM/NIGMS NIH HHS/United States ; GM129026/GM/NIGMS NIH HHS/United States ; R01 AI037581/AI/NIAID NIH HHS/United States ; CA200913/CA/NCI NIH HHS/United States ; }, mesh = {*Algorithms ; *Artificial Intelligence ; Computational Biology/*methods ; Kelch-Like ECH-Associated Protein 1/chemistry/metabolism ; Ligands ; *Molecular Docking Simulation ; NF-E2-Related Factor 2/chemistry/metabolism ; Protein Binding ; Protein Conformation ; Reproducibility of Results ; Thermodynamics ; }, abstract = {The docking program PLANTS, which is based on ant colony optimization (ACO) algorithm, has many advanced features for molecular docking. Among them are multiple scoring functions, the possibility to model explicit displaceable water molecules, and the inclusion of experimental constraints. Here, we add support of PLANTS to VirtualFlow (VirtualFlow Ants), which adds a valuable method for primary virtual screenings and rescoring procedures. Furthermore, we have added support of ligand libraries in the MOL2 format, as well as on the fly conversion of ligand libraries which are in the PDBQT format to the MOL2 format to endow VirtualFlow Ants with an increased flexibility regarding the ligand libraries. The on the fly conversion is carried out with Open Babel and the program SPORES. We applied VirtualFlow Ants to a test system involving KEAP1 on the Google Cloud up to 128,000 CPUs, and the observed scaling behavior is approximately linear. Furthermore, we have adjusted several central docking parameters of PLANTS (such as the speed parameter or the number of ants) and screened 10 million compounds for each of the 10 resulting docking scenarios. We analyzed their docking scores and average docking times, which are key factors in virtual screenings. The possibility of carrying out ultra-large virtual screening with PLANTS via VirtualFlow Ants opens new avenues in computational drug discovery.}, } @article {pmid34071449, year = {2021}, author = {Ismail, L and Materwala, H and Hennebelle, A}, title = {A Scoping Review of Integrated Blockchain-Cloud (BcC) Architecture for Healthcare: Applications, Challenges and Solutions.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34071449}, issn = {1424-8220}, support = {31R215//National Water and Energy Center, United Arab Emirates University/ ; }, mesh = {*Blockchain ; Computer Security ; Data Management ; Delivery of Health Care ; Electronic Health Records ; Humans ; }, abstract = {Blockchain is a disruptive technology for shaping the next era of a healthcare system striving for efficient and effective patient care. This is thanks to its peer-to-peer, secure, and transparent characteristics. On the other hand, cloud computing made its way into the healthcare system thanks to its elasticity and cost-efficiency nature. However, cloud-based systems fail to provide a secured and private patient-centric cohesive view to multiple healthcare stakeholders. In this situation, blockchain provides solutions to address security and privacy concerns of the cloud because of its decentralization feature combined with data security and privacy, while cloud provides solutions to the blockchain scalability and efficiency challenges. Therefore a novel paradigm of blockchain-cloud integration (BcC) emerges for the domain of healthcare. In this paper, we provide an in-depth analysis of the BcC integration for the healthcare system to give the readers the motivations behind the emergence of this new paradigm, introduce a classification of existing architectures and their applications for better healthcare. We then review the development platforms and services and highlight the research challenges for the integrated BcC architecture, possible solutions, and future research directions. The results of this paper will be useful for the healthcare industry to design and develop a data management system for better patient care.}, } @article {pmid34070966, year = {2021}, author = {Krzysztoń, M and Niewiadomska-Szynkiewicz, E}, title = {Intelligent Mobile Wireless Network for Toxic Gas Cloud Monitoring and Tracking.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34070966}, issn = {1424-8220}, support = {833456//Horizon 2020/ ; }, abstract = {Intelligent wireless networks that comprise self-organizing autonomous vehicles equipped with punctual sensors and radio modules support many hostile and harsh environment monitoring systems. This work's contribution shows the benefits of applying such networks to estimate clouds' boundaries created by hazardous toxic substances heavier than air when accidentally released into the atmosphere. The paper addresses issues concerning sensing networks' design, focussing on a computing scheme for online motion trajectory calculation and data exchange. A three-stage approach that incorporates three algorithms for sensing devices' displacement calculation in a collaborative network according to the current task, namely exploration and gas cloud detection, boundary detection and estimation, and tracking the evolving cloud, is presented. A network connectivity-maintaining virtual force mobility model is used to calculate subsequent sensor positions, and multi-hop communication is used for data exchange. The main focus is on the efficient tracking of the cloud boundary. The proposed sensing scheme is sensitive to crucial mobility model parameters. The paper presents five procedures for calculating the optimal values of these parameters. In contrast to widely used techniques, the presented approach to gas cloud monitoring does not calculate sensors' displacements based on exact values of gas concentration and concentration gradients. The sensor readings are reduced to two values: the gas concentration below or greater than the safe value. The utility and efficiency of the presented method were justified through extensive simulations, giving encouraging results. The test cases were carried out on several scenarios with regular and irregular shapes of clouds generated using a widely used box model that describes the heavy gas dispersion in the atmospheric air. The simulation results demonstrate that using only a rough measurement indicating that the threshold concentration value was exceeded can detect and efficiently track a gas cloud boundary. This makes the sensing system less sensitive to the quality of the gas concentration measurement. Thus, it can be easily used to detect real phenomena. Significant results are recommendations on selecting procedures for computing mobility model parameters while tracking clouds with different shapes and determining optimal values of these parameters in convex and nonconvex cloud boundaries.}, } @article {pmid34070719, year = {2021}, author = {Tufail, A and Namoun, A and Sen, AAA and Kim, KH and Alrehaili, A and Ali, A}, title = {Moisture Computing-Based Internet of Vehicles (IoV) Architecture for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {11}, pages = {}, pmid = {34070719}, issn = {1424-8220}, support = {Takamul Project 10//Islamic University of Madina, Saudi Arabia/ ; }, abstract = {Recently, the concept of combining 'things' on the Internet to provide various services has gained tremendous momentum. Such a concept has also impacted the automotive industry, giving rise to the Internet of Vehicles (IoV). IoV enables Internet connectivity and communication between smart vehicles and other devices on the network. Shifting the computing towards the edge of the network reduces communication delays and provides various services instantly. However, both distributed (i.e., edge computing) and central computing (i.e., cloud computing) architectures suffer from several inherent issues, such as high latency, high infrastructure cost, and performance degradation. We propose a novel concept of computation, which we call moisture computing (MC) to be deployed slightly away from the edge of the network but below the cloud infrastructure. The MC-based IoV architecture can be used to assist smart vehicles in collaborating to solve traffic monitoring, road safety, and management issues. Moreover, the MC can be used to dispatch emergency and roadside assistance in case of incidents and accidents. In contrast to the cloud which covers a broader area, the MC provides smart vehicles with critical information with fewer delays. We argue that the MC can help reduce infrastructure costs efficiently since it requires a medium-scale data center with moderate resources to cover a wider area compared to small-scale data centers in edge computing and large-scale data centers in cloud computing. We performed mathematical analyses to demonstrate that the MC reduces network delays and enhances the response time in contrast to the edge and cloud infrastructure. Moreover, we present a simulation-based implementation to evaluate the computational performance of the MC. Our simulation results show that the total processing time (computation delay and communication delay) is optimized, and delays are minimized in the MC as apposed to the traditional approaches.}, } @article {pmid34070069, year = {2021}, author = {Sim, SH and Jeong, YS}, title = {Multi-Blockchain-Based IoT Data Processing Techniques to Ensure the Integrity of IoT Data in AIoT Edge Computing Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34070069}, issn = {1424-8220}, abstract = {As the development of IoT technologies has progressed rapidly recently, most IoT data are focused on monitoring and control to process IoT data, but the cost of collecting and linking various IoT data increases, requiring the ability to proactively integrate and analyze collected IoT data so that cloud servers (data centers) can process smartly. In this paper, we propose a blockchain-based IoT big data integrity verification technique to ensure the safety of the Third Party Auditor (TPA), which has a role in auditing the integrity of AIoT data. The proposed technique aims to minimize IoT information loss by multiple blockchain groupings of information and signature keys from IoT devices. The proposed technique allows IoT information to be effectively guaranteed the integrity of AIoT data by linking hash values designated as arbitrary, constant-size blocks with previous blocks in hierarchical chains. The proposed technique performs synchronization using location information between the central server and IoT devices to manage the cost of the integrity of IoT information at low cost. In order to easily control a large number of locations of IoT devices, we perform cross-distributed and blockchain linkage processing under constant rules to improve the load and throughput generated by IoT devices.}, } @article {pmid34068743, year = {2021}, author = {Melo, GCG and Torres, IC and Araújo, ÍBQ and Brito, DB and Barboza, EA}, title = {A Low-Cost IoT System for Real-Time Monitoring of Climatic Variables and Photovoltaic Generation for Smart Grid Application.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {34068743}, issn = {1424-8220}, support = {PD02/2016//Agência Nacional de Energia Elétrica (ANEEL) and Equatorial Energia/ ; }, abstract = {Monitoring and data acquisition are essential to recognize the renewable resources available on-site, evaluate electrical conversion efficiency, detect failures, and optimize electrical production. Commercial monitoring systems for the photovoltaic system are generally expensive and closed for modifications. This work proposes a low-cost real-time internet of things system for micro and mini photovoltaic generation systems that can monitor continuous voltage, continuous current, alternating power, and seven meteorological variables. The proposed system measures all relevant meteorological variables and directly acquires photovoltaic generation data from the plant (not from the inverter). The system is implemented using open software, connects to the internet without cables, stores data locally and in the cloud, and uses the network time protocol to synchronize the devices' clocks. To the best of our knowledge, no work reported in the literature presents these features altogether. Furthermore, experiments carried out with the proposed system showed good effectiveness and reliability. This system enables fog and cloud computing in a photovoltaic system, creating a time series measurements data set, enabling the future use of machine learning to create smart photovoltaic systems.}, } @article {pmid34068200, year = {2021}, author = {Amoakoh, AO and Aplin, P and Awuah, KT and Delgado-Fernandez, I and Moses, C and Alonso, CP and Kankam, S and Mensah, JC}, title = {Testing the Contribution of Multi-Source Remote Sensing Features for Random Forest Classification of the Greater Amanzule Tropical Peatland.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34068200}, issn = {1424-8220}, support = {GTA//Edge Hill University/ ; }, abstract = {Tropical peatlands such as Ghana's Greater Amanzule peatland are highly valuable ecosystems and under great pressure from anthropogenic land use activities. Accurate measurement of their occurrence and extent is required to facilitate sustainable management. A key challenge, however, is the high cloud cover in the tropics that limits optical remote sensing data acquisition. In this work we combine optical imagery with radar and elevation data to optimise land cover classification for the Greater Amanzule tropical peatland. Sentinel-2, Sentinel-1 and Shuttle Radar Topography Mission (SRTM) imagery were acquired and integrated to drive a machine learning land cover classification using a random forest classifier. Recursive feature elimination was used to optimize high-dimensional and correlated feature space and determine the optimal features for the classification. Six datasets were compared, comprising different combinations of optical, radar and elevation features. Results showed that the best overall accuracy (OA) was found for the integrated Sentinel-2, Sentinel-1 and SRTM dataset (S2+S1+DEM), significantly outperforming all the other classifications with an OA of 94%. Assessment of the sensitivity of land cover classes to image features indicated that elevation and the original Sentinel-1 bands contributed the most to separating tropical peatlands from other land cover types. The integration of more features and the removal of redundant features systematically increased classification accuracy. We estimate Ghana's Greater Amanzule peatland covers 60,187 ha. Our proposed methodological framework contributes a robust workflow for accurate and detailed landscape-scale monitoring of tropical peatlands, while our findings provide timely information critical for the sustainable management of the Greater Amanzule peatland.}, } @article {pmid34066019, year = {2021}, author = {Puliafito, A and Tricomi, G and Zafeiropoulos, A and Papavassiliou, S}, title = {Smart Cities of the Future as Cyber Physical Systems: Challenges and Enabling Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34066019}, issn = {1424-8220}, abstract = {A smart city represents an improvement of today's cities, both functionally and structurally, that strategically utilizes several smart factors, capitalizing on Information and Communications Technology (ICT) to increase the city's sustainable growth and strengthen the city's functions, while ensuring the citizens' enhanced quality of life and health. Cities can be viewed as a microcosm of interconnected "objects" with which citizens interact daily, which represents an extremely interesting example of a cyber physical system (CPS), where the continuous monitoring of a city's status occurs through sensors and processors applied within the real-world infrastructure. Each object in a city can be both the collector and distributor of information regarding mobility, energy consumption, air pollution as well as potentially offering cultural and tourist information. As a consequence, the cyber and real worlds are strongly linked and interdependent in a smart city. New services can be deployed when needed, and evaluation mechanisms can be set up to assess the health and success of a smart city. In particular, the objectives of creating ICT-enabled smart city environments target (but are not limited to) improved city services; optimized decision-making; the creation of smart urban infrastructures; the orchestration of cyber and physical resources; addressing challenging urban issues, such as environmental pollution, transportation management, energy usage and public health; the optimization of the use and benefits of next generation (5G and beyond) communication; the capitalization of social networks and their analysis; support for tactile internet applications; and the inspiration of urban citizens to improve their quality of life. However, the large scale deployment of cyber-physical-social systems faces a series of challenges and issues (e.g., energy efficiency requirements, architecture, protocol stack design, implementation, and security), which requires more smart sensing and computing methods as well as advanced networking and communications technologies to provide more pervasive cyber-physical-social services. In this paper, we discuss the challenges, the state-of-the-art, and the solutions to a set of currently unresolved key questions related to CPSs and smart cities.}, } @article {pmid34065920, year = {2021}, author = {Albowarab, MH and Zakaria, NA and Zainal Abidin, Z}, title = {Directionally-Enhanced Binary Multi-Objective Particle Swarm Optimisation for Load Balancing in Software Defined Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34065920}, issn = {1424-8220}, abstract = {Various aspects of task execution load balancing of Internet of Things (IoTs) networks can be optimised using intelligent algorithms provided by software-defined networking (SDN). These load balancing aspects include makespan, energy consumption, and execution cost. While past studies have evaluated load balancing from one or two aspects, none has explored the possibility of simultaneously optimising all aspects, namely, reliability, energy, cost, and execution time. For the purposes of load balancing, implementing multi-objective optimisation (MOO) based on meta-heuristic searching algorithms requires assurances that the solution space will be thoroughly explored. Optimising load balancing provides not only decision makers with optimised solutions but a rich set of candidate solutions to choose from. Therefore, the purposes of this study were (1) to propose a joint mathematical formulation to solve load balancing challenges in cloud computing and (2) to propose two multi-objective particle swarm optimisation (MP) models; distance angle multi-objective particle swarm optimization (DAMP) and angle multi-objective particle swarm optimization (AMP). Unlike existing models that only use crowding distance as a criterion for solution selection, our MP models probabilistically combine both crowding distance and crowding angle. More specifically, we only selected solutions that had more than a 0.5 probability of higher crowding distance and higher angular distribution. In addition, binary variants of the approaches were generated based on transfer function, and they were denoted by binary DAMP (BDAMP) and binary AMP (BAMP). After using MOO mathematical functions to compare our models, BDAMP and BAMP, with state of the standard models, BMP, BDMP and BPSO, they were tested using the proposed load balancing model. Both tests proved that our DAMP and AMP models were far superior to the state of the art standard models, MP, crowding distance multi-objective particle swarm optimisation (DMP), and PSO. Therefore, this study enables the incorporation of meta-heuristic in the management layer of cloud networks.}, } @article {pmid34065434, year = {2021}, author = {Choi, Y and Kim, N and Hong, S and Bae, J and Park, I and Sohn, HG}, title = {Critical Image Identification via Incident-Type Definition Using Smartphone Data during an Emergency: A Case Study of the 2020 Heavy Rainfall Event in Korea.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34065434}, issn = {1424-8220}, support = {no.20009742//Ministry of Interior and Safety (MOIS, Korea)/ ; }, mesh = {*Algorithms ; Cloud Computing ; Republic of Korea ; *Smartphone ; }, abstract = {In unpredictable disaster scenarios, it is important to recognize the situation promptly and take appropriate response actions. This study proposes a cloud computing-based data collection, processing, and analysis process that employs a crowd-sensing application. Clustering algorithms are used to define the major damage types, and hotspot analysis is applied to effectively filter critical data from crowdsourced data. To verify the utility of the proposed process, it is applied to Icheon-si and Anseong-si, both in Gyeonggi-do, which were affected by heavy rainfall in 2020. The results show that the types of incident at the damaged site were effectively detected, and images reflecting the damage situation could be classified using the application of the geospatial analysis technique. For 5 August 2020, which was close to the date of the event, the images were classified with a precision of 100% at a threshold of 0.4. For 24-25 August 2020, the image classification precision exceeded 95% at a threshold of 0.5, except for the mudslide mudflow in the Yul area. The location distribution of the classified images showed a distribution similar to that of damaged regions in unmanned aerial vehicle images.}, } @article {pmid34065011, year = {2021}, author = {Martínez-Gutiérrez, A and Díez-González, J and Ferrero-Guillén, R and Verde, P and Álvarez, R and Perez, H}, title = {Digital Twin for Automatic Transportation in Industry 4.0.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34065011}, issn = {1424-8220}, support = {PID2019-108277GB-C21//Spanish Ministry of Science and Innovation/ ; }, abstract = {Industry 4.0 is the fourth industrial revolution consisting of the digitalization of processes facilitating an incremental value chain. Smart Manufacturing (SM) is one of the branches of the Industry 4.0 regarding logistics, visual inspection of pieces, optimal organization of processes, machine sensorization, real-time data adquisition and treatment and virtualization of industrial activities. Among these tecniques, Digital Twin (DT) is attracting the research interest of the scientific community in the last few years due to the cost reduction through the simulation of the dynamic behaviour of the industrial plant predicting potential problems in the SM paradigm. In this paper, we propose a new DT design concept based on external service for the transportation of the Automatic Guided Vehicles (AGVs) which are being recently introduced for the Material Requirement Planning satisfaction in the collaborative industrial plant. We have performed real experimentation in two different scenarios through the definition of an Industrial Ethernet platform for the real validation of the DT results obtained. Results show the correlation between the virtual and real experiments carried out in the two scenarios defined in this paper with an accuracy of 97.95% and 98.82% in the total time of the missions analysed in the DT. Therefore, these results validate the model created for the AGV navigation, thus fulfilling the objectives of this paper.}, } @article {pmid34064712, year = {2021}, author = {Sobczak, Ł and Filus, K and Domański, A and Domańska, J}, title = {LiDAR Point Cloud Generation for SLAM Algorithm Evaluation.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {10}, pages = {}, pmid = {34064712}, issn = {1424-8220}, support = {DOB-2P/02/07/2017//Narodowe Centrum Badań i Rozwoju/ ; }, abstract = {With the emerging interest in the autonomous driving level at 4 and 5 comes a necessity to provide accurate and versatile frameworks to evaluate the algorithms used in autonomous vehicles. There is a clear gap in the field of autonomous driving simulators. It covers testing and parameter tuning of a key component of autonomous driving systems, SLAM, frameworks targeting off-road and safety-critical environments. It also includes taking into consideration the non-idealistic nature of the real-life sensors, associated phenomena and measurement errors. We created a LiDAR simulator that delivers accurate 3D point clouds in real time. The point clouds are generated based on the sensor placement and the LiDAR type that can be set using configurable parameters. We evaluate our solution based on comparison of the results using an actual device, Velodyne VLP-16, on real-life tracks and the corresponding simulations. We measure the error values obtained using Google Cartographer SLAM algorithm and the distance between the simulated and real point clouds to verify their accuracy. The results show that our simulation (which incorporates measurement errors and the rolling shutter effect) produces data that can successfully imitate the real-life point clouds. Due to dedicated mechanisms, it is compatible with the Robotic Operating System (ROS) and can be used interchangeably with data from actual sensors, which enables easy testing, SLAM algorithm parameter tuning and deployment.}, } @article {pmid34064710, year = {2021}, author = {Khamisy-Farah, R and Furstenau, LB and Kong, JD and Wu, J and Bragazzi, NL}, title = {Gynecology Meets Big Data in the Disruptive Innovation Medical Era: State-of-Art and Future Prospects.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {10}, pages = {}, pmid = {34064710}, issn = {1660-4601}, mesh = {Artificial Intelligence ; *Big Data ; Data Science ; Delivery of Health Care ; Female ; *Gynecology ; Humans ; }, abstract = {Tremendous scientific and technological achievements have been revolutionizing the current medical era, changing the way in which physicians practice their profession and deliver healthcare provisions. This is due to the convergence of various advancements related to digitalization and the use of information and communication technologies (ICTs)-ranging from the internet of things (IoT) and the internet of medical things (IoMT) to the fields of robotics, virtual and augmented reality, and massively parallel and cloud computing. Further progress has been made in the fields of addictive manufacturing and three-dimensional (3D) printing, sophisticated statistical tools such as big data visualization and analytics (BDVA) and artificial intelligence (AI), the use of mobile and smartphone applications (apps), remote monitoring and wearable sensors, and e-learning, among others. Within this new conceptual framework, big data represents a massive set of data characterized by different properties and features. These can be categorized both from a quantitative and qualitative standpoint, and include data generated from wet-lab and microarrays (molecular big data), databases and registries (clinical/computational big data), imaging techniques (such as radiomics, imaging big data) and web searches (the so-called infodemiology, digital big data). The present review aims to show how big and smart data can revolutionize gynecology by shedding light on female reproductive health, both in terms of physiology and pathophysiology. More specifically, they appear to have potential uses in the field of gynecology to increase its accuracy and precision, stratify patients, provide opportunities for personalized treatment options rather than delivering a package of "one-size-fits-it-all" healthcare management provisions, and enhance its effectiveness at each stage (health promotion, prevention, diagnosis, prognosis, and therapeutics).}, } @article {pmid34063234, year = {2021}, author = {Jalowiczor, J and Rozhon, J and Voznak, M}, title = {Study of the Efficiency of Fog Computing in an Optimized LoRaWAN Cloud Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {34063234}, issn = {1424-8220}, support = {SP2021/25//Ministerstvo Školství, Mládeže a Tělovýchovy/ ; LM2018140//e-Infrastructure CZ/ ; }, abstract = {The technologies of the Internet of Things (IoT) have an increasing influence on our daily lives. The expansion of the IoT is associated with the growing number of IoT devices that are connected to the Internet. As the number of connected devices grows, the demand for speed and data volume is also greater. While most IoT network technologies use cloud computing, this solution becomes inefficient for some use-cases. For example, suppose that a company that uses an IoT network with several sensors to collect data within a production hall. The company may require sharing only selected data to the public cloud and responding faster to specific events. In the case of a large amount of data, the off-loading techniques can be utilized to reach higher efficiency. Meeting these requirements is difficult or impossible for solutions adopting cloud computing. The fog computing paradigm addresses these cases by providing data processing closer to end devices. This paper proposes three possible network architectures that adopt fog computing for LoRaWAN because LoRaWAN is already deployed in many locations and offers long-distance communication with low-power consumption. The architecture proposals are further compared in simulations to select the optimal form in terms of total service time. The resulting optimal communication architecture could be deployed to the existing LoRaWAN with minimal cost and effort of the network operator.}, } @article {pmid34057379, year = {2021}, author = {Spjuth, O and Frid, J and Hellander, A}, title = {The machine learning life cycle and the cloud: implications for drug discovery.}, journal = {Expert opinion on drug discovery}, volume = {16}, number = {9}, pages = {1071-1079}, doi = {10.1080/17460441.2021.1932812}, pmid = {34057379}, issn = {1746-045X}, mesh = {Animals ; *Artificial Intelligence ; Cloud Computing ; Drug Discovery ; Humans ; Life Cycle Stages ; *Machine Learning ; }, abstract = {Introduction: Artificial intelligence (AI) and machine learning (ML) are increasingly used in many aspects of drug discovery. Larger data sizes and methods such as Deep Neural Networks contribute to challenges in data management, the required software stack, and computational infrastructure. There is an increasing need in drug discovery to continuously re-train models and make them available in production environments.Areas covered: This article describes how cloud computing can aid the ML life cycle in drug discovery. The authors discuss opportunities with containerization and scientific workflows and introduce the concept of MLOps and describe how it can facilitate reproducible and robust ML modeling in drug discovery organizations. They also discuss ML on private, sensitive and regulated data.Expert opinion: Cloud computing offers a compelling suite of building blocks to sustain the ML life cycle integrated in iterative drug discovery. Containerization and platforms such as Kubernetes together with scientific workflows can enable reproducible and resilient analysis pipelines, and the elasticity and flexibility of cloud infrastructures enables scalable and efficient access to compute resources. Drug discovery commonly involves working with sensitive or private data, and cloud computing and federated learning can contribute toward enabling collaborative drug discovery within and between organizations.Abbreviations: AI = Artificial Intelligence; DL = Deep Learning; GPU = Graphics Processing Unit; IaaS = Infrastructure as a Service; K8S = Kubernetes; ML = Machine Learning; MLOps = Machine Learning and Operations; PaaS = Platform as a Service; QC = Quality Control; SaaS = Software as a Service.}, } @article {pmid34050420, year = {2021}, author = {Marchand, JR and Pirard, B and Ertl, P and Sirockin, F}, title = {CAVIAR: a method for automatic cavity detection, description and decomposition into subcavities.}, journal = {Journal of computer-aided molecular design}, volume = {35}, number = {6}, pages = {737-750}, pmid = {34050420}, issn = {1573-4951}, mesh = {Binding Sites ; Ligands ; Machine Learning ; Protein Binding ; Protein Conformation ; Proteins/*chemistry ; Software ; }, abstract = {The accurate description of protein binding sites is essential to the determination of similarity and the application of machine learning methods to relate the binding sites to observed functions. This work describes CAVIAR, a new open source tool for generating descriptors for binding sites, using protein structures in PDB and mmCIF format as well as trajectory frames from molecular dynamics simulations as input. The applicability of CAVIAR descriptors is showcased by computing machine learning predictions of binding site ligandability. The method can also automatically assign subcavities, even in the absence of a bound ligand. The defined subpockets mimic the empirical definitions used in medicinal chemistry projects. It is shown that the experimental binding affinity scales relatively well with the number of subcavities filled by the ligand, with compounds binding to more than three subcavities having nanomolar or better affinities to the target. The CAVIAR descriptors and methods can be used in any machine learning-based investigations of problems involving binding sites, from protein engineering to hit identification. The full software code is available on GitHub and a conda package is hosted on Anaconda cloud.}, } @article {pmid34024075, year = {2021}, author = {Chandawarkar, R and Nadkarni, P}, title = {Safe clinical photography: best practice guidelines for risk management and mitigation.}, journal = {Archives of plastic surgery}, volume = {48}, number = {3}, pages = {295-304}, pmid = {34024075}, issn = {2234-6163}, abstract = {Clinical photography is an essential component of patient care in plastic surgery. The use of unsecured smartphone cameras, digital cameras, social media, instant messaging, and commercially available cloud-based storage devices threatens patients' data safety. This paper Identifies potential risks of clinical photography and heightens awareness of safe clinical photography. Specifically, we evaluated existing risk-mitigation strategies globally, comparing them to industry standards in similar settings, and formulated a framework for developing a risk-mitigation plan for avoiding data breaches by identifying the safest methods of picture taking, transfer to storage, retrieval, and use, both within and outside the organization. Since threats evolve constantly, the framework must evolve too. Based on a literature search of both PubMed and the web (via Google) with key phrases and child terms (for PubMed), the risks and consequences of data breaches in individual processes in clinical photography are identified. Current clinical-photography practices are described. Lastly, we evaluate current risk mitigation strategies for clinical photography by examining guidelines from professional organizations, governmental agencies, and non-healthcare industries. Combining lessons learned from the steps above into a comprehensive framework that could contribute to national/international guidelines on safe clinical photography, we provide recommendations for best practice guidelines. It is imperative that best practice guidelines for the simple, safe, and secure capture, transfer, storage, and retrieval of clinical photographs be co-developed through cooperative efforts between providers, hospital administrators, clinical informaticians, IT governance structures, and national professional organizations. This would significantly safeguard patient data security and provide the privacy that patients deserve and expect.}, } @article {pmid34022611, year = {2021}, author = {Bowler, AL and Watson, NJ}, title = {Transfer learning for process monitoring using reflection-mode ultrasonic sensing.}, journal = {Ultrasonics}, volume = {115}, number = {}, pages = {106468}, doi = {10.1016/j.ultras.2021.106468}, pmid = {34022611}, issn = {1874-9968}, abstract = {The fourth industrial revolution is set to integrate entire manufacturing processes using industrial digital technologies such as the Internet of Things, Cloud Computing, and machine learning to improve process productivity, efficiency, and sustainability. Sensors collect the real-time data required to optimise manufacturing processes and are therefore a key technology in this transformation. Ultrasonic sensors have benefits of being low-cost, in-line, non-invasive, and able to operate in opaque systems. Supervised machine learning models can correlate ultrasonic sensor data to useful information about the manufacturing materials and processes. However, this requires a reference measurement of the process material to label each data point for model training. Labelled data is often difficult to obtain in factory environments, and so a method of training models without this is desirable. This work compares two domain adaptation methods to transfer models across processes, so that no labelled data is required to accurately monitor a target process. The two method compared are a Single Feature transfer learning approach and Transfer Component Analysis using three features. Ultrasonic waveforms are unique to the sensor used, attachment procedure, and contact pressure. Therefore, only a small number of transferable features are investigated. Two industrially relevant processes were used as case studies: mixing and cleaning of fouling in pipes. A reflection-mode ultrasonic sensing technique was used, which monitors the sound wave reflected from the interface between the vessel wall and process material. Overall, the Single Feature method produced the highest prediction accuracies: up to 96.0% and 98.4% to classify the completion of mixing and cleaning, respectively; and R[2] values of up to 0.947 and 0.999 to predict the time remaining until completion. These results highlight the potential of combining ultrasonic measurements with transfer learning techniques to monitor industrial processes. Although, further work is required to study various effects such as changing sensor location between source and target domains.}, } @article {pmid34019075, year = {2021}, author = {Miller, M and Zaccheddu, N}, title = {Light for a Potentially Cloudy Situation: Approach to Validating Cloud Computing Tools.}, journal = {Biomedical instrumentation & technology}, volume = {55}, number = {2}, pages = {63-68}, pmid = {34019075}, issn = {0899-8205}, mesh = {*Cloud Computing ; }, } @article {pmid34016012, year = {2021}, author = {Sahu, ML and Atulkar, M and Ahirwal, MK and Ahamad, A}, title = {IoT-enabled cloud-based real-time remote ECG monitoring system.}, journal = {Journal of medical engineering & technology}, volume = {45}, number = {6}, pages = {473-485}, doi = {10.1080/03091902.2021.1921870}, pmid = {34016012}, issn = {1464-522X}, mesh = {Algorithms ; Cloud Computing ; Electrocardiography ; Humans ; *Internet of Things ; }, abstract = {Statistical reports all around the world have deemed cardiovascular diseases (CVDs) as the largest contributor to the death count. The electrocardiogram (ECG) is a widely accepted technology employed for investigation of CVDs of the person. The proposed solution deals with an efficient internet of things (IoT) enabled real-time ECG monitoring system using cloud computing technologies. The article presents a cloud-centric solution to provide remote monitoring of CVD. Sensed ECG data are transmitted to S3 bucket provided by Amazon web service (AWS) through a mobile gateway. AWS cloud uses HTTP and MQTT servers to provide data visualisation, quick response and long-live connection to device and user. Bluetooth low energy (BLE 4.0) is used as a communication protocol for low-power data transmission between device and mobile gateway. The proposed system is implemented with filtering algorithms to ignore distractions, environmental noise and motion artefacts. It offers an analysis of ECG signals to detect various parameters such as heartbeat, PQRST wave and QRS complex intervals along with respiration rate. The proposed system prototype has been tested and validated for reliable ECG monitoring remotely in real-time.}, } @article {pmid34013035, year = {2021}, author = {Usman Sana, M and Li, Z}, title = {Efficiency aware scheduling techniques in cloud computing: a descriptive literature review.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e509}, pmid = {34013035}, issn = {2376-5992}, abstract = {In the last decade, cloud computing becomes the most demanding platform to resolve issues and manage requests across the Internet. Cloud computing takes along terrific opportunities to run cost-effective scientific workflows without the requirement of possessing any set-up for customers. It makes available virtually unlimited resources that can be attained, organized, and used as required. Resource scheduling plays a fundamental role in the well-organized allocation of resources to every task in the cloud environment. However along with these gains many challenges are required to be considered to propose an efficient scheduling algorithm. An efficient Scheduling algorithm must enhance the implementation of goals like scheduling cost, load balancing, makespan time, security awareness, energy consumption, reliability, service level agreement maintenance, etc. To achieve the aforementioned goals many state-of-the-art scheduling techniques have been proposed based upon hybrid, heuristic, and meta-heuristic approaches. This work reviewed existing algorithms from the perspective of the scheduling objective and strategies. We conduct a comparative analysis of existing strategies along with the outcomes they provide. We highlight the drawbacks for insight into further research and open challenges. The findings aid researchers by providing a roadmap to propose efficient scheduling algorithms.}, } @article {pmid34013027, year = {2021}, author = {Baniata, H and Mahmood, S and Kertesz, A}, title = {Assessing anthropogenic heat flux of public cloud data centers: current and future trends.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e478}, pmid = {34013027}, issn = {2376-5992}, abstract = {Global average temperature had been significantly increasing during the past century, mainly due to the growing rates of greenhouse gas (GHG) emissions, leading to a global warming problem. Many research works indicated other causes of this problem, such as the anthropogenic heat flux (AHF). Cloud computing (CC) data centers (DCs), for example, perform massive computational tasks for end users, leading to emit huge amounts of waste heat towards the surrounding (local) atmosphere in the form of AHF. Out of the total power consumption of a public cloud DC, nearly 10% is wasted in the form of heat. In this paper, we quantitatively and qualitatively analyze the current state of AHF emissions of the top three cloud service providers (i.e., Google, Azure and Amazon) according to their average energy consumption and the global distribution of their DCs. In this study, we found that Microsoft Azure DCs emit the highest amounts of AHF, followed by Amazon and Google, respectively. We also found that Europe is the most negatively affected by AHF of public DCs, due to its small area relative to other continents and the large number of cloud DCs within. Accordingly, we present mean estimations of continental AHF density per square meter. Following our results, we found that the top three clouds (with waste heat at a rate of 1,720.512 MW) contribute an average of more than 2.8% out of averaged continental AHF emissions. Using this percentage, we provide future trends estimations of AHF densities in the period [2020-2100]. In one of the presented scenarios, our estimations predict that by 2100, AHF of public clouds DCs will reach 0.01 Wm[-2].}, } @article {pmid33997644, year = {2021}, author = {Onweni, CL and Venegas-Borsellino, CP and Treece, J and Turnbull, MT and Ritchie, C and Freeman, WD}, title = {The Power of Mobile Health: The Girl With the Gadgets in Uganda.}, journal = {Mayo Clinic proceedings. Innovations, quality & outcomes}, volume = {5}, number = {2}, pages = {486-494}, pmid = {33997644}, issn = {2542-4548}, abstract = {Medical-grade ultrasound devices are now pocket sized and can be easily transported to underserved parts of the world, allowing health care providers to have the tools to optimize diagnoses, inform management plans, and improve patient outcomes in remote locations. Other great advances in technology have recently occurred, such as artificial intelligence applied to mobile health devices and cloud computing, as augmented reality instructions make these devices more user friendly and readily applicable across health care encounters. However, broader awareness of the impact of these mobile health technologies is needed among health care providers, along with training on how to use them in valid and reproducible environments, for accurate diagnosis and treatment. This article provides a summary of a Mayo International Health Program journey to Bwindi, Uganda, with a portable mobile health unit. This article shows how point-of-care ultrasonography and other technologies can benefit remote clinical diagnosis and management in underserved areas around the world.}, } @article {pmid33989047, year = {2021}, author = {Qureshi, KN and Alhudhaif, A and Anwar, RW and Bhati, SN and Jeon, G}, title = {Fully Integrated Data Communication Framework by Using Visualization Augmented Reality for Internet of Things Networks.}, journal = {Big data}, volume = {9}, number = {4}, pages = {253-264}, doi = {10.1089/big.2020.0282}, pmid = {33989047}, issn = {2167-647X}, mesh = {*Augmented Reality ; Computer Simulation ; *Internet of Things ; }, abstract = {The new and integrated area called Internet of Things (IoT) has gained popularity due to its smart, objects, services and affordability. These networks are based on data communication, augmented reality (AR), and wired and wireless infrastructures. The basic objective of these network is data communication, environment monitoring, tracking, and sensing by using smart devices and sensor nodes. The dAR is one of the attractive and advanced areas that is integrated in IoT networks in smart homes and smart industries to convert the objects into 3D to visualize information and provide interactive reality-based control. With attraction, this idea has suffered with complex and heavy processes, computation complexities, network communication degradation, and network delay. This article presents a detailed overview of these technologies and proposes a more convenient and fast data communication model by using edge computing and Fifth-Generation platforms. The article also introduces a Visualization Augmented Reality framework for IoT (VAR-IoT) networks fully integrated by communication, sensing, and actuating features with a better interface to control the objects. The proposed network model is evaluated in simulation in terms of applications response time and network delay and it observes the better performance of the proposed framework.}, } @article {pmid33979321, year = {2021}, author = {Bahmani, A and Ferriter, K and Krishnan, V and Alavi, A and Alavi, A and Tsao, PS and Snyder, MP and Pan, C}, title = {Swarm: A federated cloud framework for large-scale variant analysis.}, journal = {PLoS computational biology}, volume = {17}, number = {5}, pages = {e1008977}, pmid = {33979321}, issn = {1553-7358}, support = {RM1 HG007735/HG/NHGRI NIH HHS/United States ; U24 HG009397/HG/NHGRI NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computational Biology/*methods ; Computer Security ; Datasets as Topic ; *Genomics ; Privacy ; Software ; }, abstract = {Genomic data analysis across multiple cloud platforms is an ongoing challenge, especially when large amounts of data are involved. Here, we present Swarm, a framework for federated computation that promotes minimal data motion and facilitates crosstalk between genomic datasets stored on various cloud platforms. We demonstrate its utility via common inquiries of genomic variants across BigQuery in the Google Cloud Platform (GCP), Athena in the Amazon Web Services (AWS), Apache Presto and MySQL. Compared to single-cloud platforms, the Swarm framework significantly reduced computational costs, run-time delays and risks of security breach and privacy violation.}, } @article {pmid33968292, year = {2021}, author = {Masud, M and Gaba, GS and Choudhary, K and Alroobaea, R and Hossain, MS}, title = {A robust and lightweight secure access scheme for cloud based E-healthcare services.}, journal = {Peer-to-peer networking and applications}, volume = {14}, number = {5}, pages = {3043-3057}, pmid = {33968292}, issn = {1936-6442}, abstract = {Traditional healthcare services have transitioned into modern healthcare services where doctors remotely diagnose the patients. Cloud computing plays a significant role in this change by providing easy access to patients' medical records to all stakeholders, such as doctors, nurses, patients, life insurance agents, etc. Cloud services are scalable, cost-effective, and offer a broad range of mobile access to patients' electronic health record (EHR). Despite the cloud's enormous benefits like real-time data access, patients' EHR security and privacy are major concerns. Since the information about patients' health is highly sensitive and crucial, sharing it over the unsecured wireless medium brings many security challenges such as eavesdropping, modifications, etc. Considering the security needs of remote healthcare, this paper proposes a robust and lightweight, secure access scheme for cloud-based E-healthcare services. The proposed scheme addresses the potential threats to E-healthcare by providing a secure interface to stakeholders and prohibiting unauthorized users from accessing information stored in the cloud. The scheme makes use of multiple keys formed through the key derivation function (KDF) to ensure end-to-end ciphering of information for preventing misuse. The rights to access the cloud services are provided based on the identity and the association between stakeholders, thus ensuring privacy. Due to its simplicity and robustness, the proposed scheme is the best fit for protecting data security and privacy in cloud-based E-healthcare services.}, } @article {pmid33968291, year = {2021}, author = {Tamizhselvi, SP and Muthuswamy, V}, title = {Delay - aware bandwidth estimation and intelligent video transcoder in mobile cloud.}, journal = {Peer-to-peer networking and applications}, volume = {14}, number = {4}, pages = {2038-2060}, pmid = {33968291}, issn = {1936-6442}, abstract = {In recent years, smartphone users are interested in large volumes to view live videos and sharing video resources over social media (e.g., Youtube, Netflix). The continuous streaming of video in mobile devices faces many challenges in network parameters namely bandwidth estimation, congestion window, throughput, delay, and transcoding is a challenging and time-consuming task. To perform these resource-intensive tasks via mobile is complicated, and hence, the cloud is integrated with smartphones to provide Mobile Cloud Computing (MCC). To resolve the issue, we propose a novel framework called delay aware bandwidth estimation and intelligent video transcoder in mobile cloud. In this paper, we introduced four techniques, namely, Markov Mobile Bandwidth Cloud Estimation (MMBCE), Cloud Dynamic Congestion Window (CDCW), Queue-based Video Processing for Cloud Server (QVPS), and Intelligent Video Transcoding for selecting Server (IVTS). To evaluate the performance of the proposed algorithm, we implemented a testbed using the two mobile configurations and the public cloud server Amazon Web Server (AWS). The study and results in a real environment demonstrate that our proposed framework can improve the QoS requirements and outperforms the existing algorithms. Firstly, MMBCE utilizes the well-known Markov Decision Process (MDP) model to estimate the best bandwidth of mobile using reward function. MMBCE improves the performance of 50% PDR compared with other algorithms. CDCW fits the congestion window and reduces packet loss dynamically. CDCW produces 40% more goodput with minimal PLR. Next, in QVPS, the M/M/S queueing model is processed to reduce the video processing delay and calculates the total service time. Finally, IVTS applies the M/G/N model and reduces 6% utilization of transcoding workload, by intelligently selecting the minimum workload of the transcoding server. The IVTS takes less time in slow and fast mode. The performance analysis and experimental evaluation show that the queueing model reduces the delay by 0.2 ms and the server's utilization by 20%. Hence, in this work, the cloud minimizes delay effectively to deliver a good quality of video streaming on mobile.}, } @article {pmid33967273, year = {2021}, author = {Ito, Y and Unagami, M and Yamabe, F and Mitsui, Y and Nakajima, K and Nagao, K and Kobayashi, H}, title = {A method for utilizing automated machine learning for histopathological classification of testis based on Johnsen scores.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {9962}, pmid = {33967273}, issn = {2045-2322}, mesh = {Adult ; Automation, Laboratory ; Azoospermia/*diagnosis/pathology ; Coloring Agents ; Eosine Yellowish-(YS) ; Hematoxylin ; Histocytochemistry/methods/*standards ; Humans ; Infertility, Male/*diagnosis/pathology ; *Machine Learning ; Male ; Seminiferous Tubules/*pathology/ultrastructure ; Spermatids/pathology/ultrastructure ; Spermatocytes/*pathology/ultrastructure ; Spermatogonia/pathology/ultrastructure ; }, abstract = {We examined whether a tool for determining Johnsen scores automatically using artificial intelligence (AI) could be used in place of traditional Johnsen scoring to support pathologists' evaluations. Average precision, precision, and recall were assessed by the Google Cloud AutoML Vision platform. We obtained testicular tissues for 275 patients and were able to use haematoxylin and eosin (H&E)-stained glass microscope slides from 264 patients. In addition, we cut out of parts of the histopathology images (5.0 × 5.0 cm) for expansion of Johnsen's characteristic areas with seminiferous tubules. We defined four labels: Johnsen score 1-3, 4-5, 6-7, and 8-10 to distinguish Johnsen scores in clinical practice. All images were uploaded to the Google Cloud AutoML Vision platform. We obtained a dataset of 7155 images at magnification 400× and a dataset of 9822 expansion images for the 5.0 × 5.0 cm cutouts. For the 400× magnification image dataset, the average precision (positive predictive value) of the algorithm was 82.6%, precision was 80.31%, and recall was 60.96%. For the expansion image dataset (5.0 × 5.0 cm), the average precision was 99.5%, precision was 96.29%, and recall was 96.23%. This is the first report of an AI-based algorithm for predicting Johnsen scores.}, } @article {pmid33965571, year = {2021}, author = {Schuhmacher, A and Gatto, A and Kuss, M and Gassmann, O and Hinder, M}, title = {Big Techs and startups in pharmaceutical R&D - A 2020 perspective on artificial intelligence.}, journal = {Drug discovery today}, volume = {26}, number = {10}, pages = {2226-2231}, doi = {10.1016/j.drudis.2021.04.028}, pmid = {33965571}, issn = {1878-5832}, mesh = {Artificial Intelligence/*trends ; Drug Development/*trends ; Drug Discovery/trends ; Drug Industry/*trends ; Entrepreneurship ; Humans ; Machine Learning/trends ; Research/trends ; Technology/trends ; }, abstract = {We investigated what kind of artificial intelligence (AI) technologies are utilized in pharmaceutical research and development (R&D) and which sources of AI-related competencies can be leveraged by pharmaceutical companies. First, we found that machine learning (ML) is the dominating AI technology currently used in pharmaceutical R&D. Second, both Big Techs and AI startups are competent knowledge bases for AI applications. Big Techs have long-lasting experience in the digital field and offer more general IT solutions to support pharmaceutical companies in cloud computing, health monitoring, diagnostics or clinical trial management, whereas startups can provide more specific AI services to address special issues in the drug-discovery space.}, } @article {pmid33959420, year = {2021}, author = {Mora-Márquez, F and Vázquez-Poletti, JL and López de Heredia, U}, title = {NGScloud2: optimized bioinformatic analysis using Amazon Web Services.}, journal = {PeerJ}, volume = {9}, number = {}, pages = {e11237}, pmid = {33959420}, issn = {2167-8359}, abstract = {BACKGROUND: NGScloud was a bioinformatic system developed to perform de novo RNAseq analysis of non-model species by exploiting the cloud computing capabilities of Amazon Web Services. The rapid changes undergone in the way this cloud computing service operates, along with the continuous release of novel bioinformatic applications to analyze next generation sequencing data, have made the software obsolete. NGScloud2 is an enhanced and expanded version of NGScloud that permits the access to ad hoc cloud computing infrastructure, scaled according to the complexity of each experiment.

METHODS: NGScloud2 presents major technical improvements, such as the possibility of running spot instances and the most updated AWS instances types, that can lead to significant cost savings. As compared to its initial implementation, this improved version updates and includes common applications for de novo RNAseq analysis, and incorporates tools to operate workflows of bioinformatic analysis of reference-based RNAseq, RADseq and functional annotation. NGScloud2 optimizes the access to Amazon's large computing infrastructures to easily run popular bioinformatic software applications, otherwise inaccessible to non-specialized users lacking suitable hardware infrastructures.

RESULTS: The correct performance of the pipelines for de novo RNAseq, reference-based RNAseq, RADseq and functional annotation was tested with real experimental data, providing workflow performance estimates and tips to make optimal use of NGScloud2. Further, we provide a qualitative comparison of NGScloud2 vs. the Galaxy framework. NGScloud2 code, instructions for software installation and use are available at https://github.com/GGFHF/NGScloud2. NGScloud2 includes a companion package, NGShelper that contains Python utilities to post-process the output of the pipelines for downstream analysis at https://github.com/GGFHF/NGShelper.}, } @article {pmid33954252, year = {2021}, author = {Raza, S and Ayzed Mirza, M and Ahmad, S and Asif, M and Rasheed, MB and Ghadi, Y}, title = {A vehicle to vehicle relay-based task offloading scheme in Vehicular Communication Networks.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e486}, pmid = {33954252}, issn = {2376-5992}, abstract = {Vehicular edge computing (VEC) is a potential field that distributes computational tasks between VEC servers and local vehicular terminals, hence improve vehicular services. At present, vehicles' intelligence and capabilities are rapidly improving, which will likely support many new and exciting applications. The network resources are well-utilized by exploiting neighboring vehicles' available resources while mitigating the VEC server's heavy burden. However, due to the vehicles' mobility, network topology, and the available computing resources change rapidly, which are difficult to predict. To tackle this problem, we investigate the task offloading schemes by utilizing vehicle to vehicle and vehicle to infrastructure communication modes and exploiting the vehicle's under-utilized computation and communication resources, and taking the cost and time consumption into account. We present a promising relay task-offloading scheme in vehicular edge computing (RVEC). According to this scheme, the tasks are offloaded in a vehicle to vehicle relay for computation while being transmitted to VEC servers. Numerical results illustrate that the RVEC scheme substantially enhances the network's overall offloading cost.}, } @article {pmid33948273, year = {2020}, author = {Vignolo, SM and Diray-Arce, J and McEnaney, K and Rao, S and Shannon, CP and Idoko, OT and Cole, F and Darboe, A and Cessay, F and Ben-Othman, R and Tebbutt, SJ and Kampmann, B and Levy, O and Ozonoff, A}, title = {A cloud-based bioinformatic analytic infrastructure and Data Management Core for the Expanded Program on Immunization Consortium.}, journal = {Journal of clinical and translational science}, volume = {5}, number = {1}, pages = {e52}, pmid = {33948273}, issn = {2059-8661}, support = {MC_UU_00026/2/MRC_/Medical Research Council/United Kingdom ; U19 AI118608/AI/NIAID NIH HHS/United States ; }, abstract = {The Expanded Program for Immunization Consortium - Human Immunology Project Consortium study aims to employ systems biology to identify and characterize vaccine-induced biomarkers that predict immunogenicity in newborns. Key to this effort is the establishment of the Data Management Core (DMC) to provide reliable data and bioinformatic infrastructure for centralized curation, storage, and analysis of multiple de-identified "omic" datasets. The DMC established a cloud-based architecture using Amazon Web Services to track, store, and share data according to National Institutes of Health standards. The DMC tracks biological samples during collection, shipping, and processing while capturing sample metadata and associated clinical data. Multi-omic datasets are stored in access-controlled Amazon Simple Storage Service (S3) for data security and file version control. All data undergo quality control processes at the generating site followed by DMC validation for quality assurance. The DMC maintains a controlled computing environment for data analysis and integration. Upon publication, the DMC deposits finalized datasets to public repositories. The DMC architecture provides resources and scientific expertise to accelerate translational discovery. Robust operations allow rapid sharing of results across the project team. Maintenance of data quality standards and public data deposition will further benefit the scientific community.}, } @article {pmid33946909, year = {2021}, author = {Fröhlich, P and Gelenbe, E and Fiołka, J and Chęciński, J and Nowak, M and Filus, Z}, title = {Smart SDN Management of Fog Services to Optimize QoS and Energy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33946909}, issn = {1424-8220}, support = {780139//Horizon 2020 Framework Programme/ ; }, abstract = {The short latency required by IoT devices that need to access specific services have led to the development of Fog architectures that can serve as a useful intermediary between IoT systems and the Cloud. However, the massive numbers of IoT devices that are being deployed raise concerns about the power consumption of such systems as the number of IoT devices and Fog servers increase. Thus, in this paper, we describe a software-defined network (SDN)-based control scheme for client-server interaction that constantly measures ongoing client-server response times and estimates network power consumption, in order to select connection paths that minimize a composite goal function, including both QoS and power consumption. The approach using reinforcement learning with neural networks has been implemented in a test-bed and is detailed in this paper. Experiments are presented that show the effectiveness of our proposed system in the presence of a time-varying workload of client-to-service requests, resulting in a reduction of power consumption of approximately 15% for an average response time increase of under 2%.}, } @article {pmid33941078, year = {2021}, author = {Ji, SS and German, CA and Lange, K and Sinsheimer, JS and Zhou, H and Zhou, J and Sobel, EM}, title = {Modern simulation utilities for genetic analysis.}, journal = {BMC bioinformatics}, volume = {22}, number = {1}, pages = {228}, pmid = {33941078}, issn = {1471-2105}, support = {R01 HG009120/NH/NIH HHS/United States ; P30 ES006694/ES/NIEHS NIH HHS/United States ; R01 HG006139/HG/NHGRI NIH HHS/United States ; R01 GM053275/NH/NIH HHS/United States ; T32 HG002536/HG/NHGRI NIH HHS/United States ; R35 GM141798/GM/NIGMS NIH HHS/United States ; K01 DK106116/DK/NIDDK NIH HHS/United States ; R01 HG006139/NH/NIH HHS/United States ; T32 HG002536/NH/NIH HHS/United States ; K01 DK106116/NH/NIH HHS/United States ; R01 GM053275/GM/NIGMS NIH HHS/United States ; }, mesh = {Aged ; *Cloud Computing ; Computer Simulation ; *Genetic Testing ; Humans ; Pedigree ; Phenotype ; }, abstract = {BACKGROUND: Statistical geneticists employ simulation to estimate the power of proposed studies, test new analysis tools, and evaluate properties of causal models. Although there are existing trait simulators, there is ample room for modernization. For example, most phenotype simulators are limited to Gaussian traits or traits transformable to normality, while ignoring qualitative traits and realistic, non-normal trait distributions. Also, modern computer languages, such as Julia, that accommodate parallelization and cloud-based computing are now mainstream but rarely used in older applications. To meet the challenges of contemporary big studies, it is important for geneticists to adopt new computational tools.

RESULTS: We present TraitSimulation, an open-source Julia package that makes it trivial to quickly simulate phenotypes under a variety of genetic architectures. This package is integrated into our OpenMendel suite for easy downstream analyses. Julia was purpose-built for scientific programming and provides tremendous speed and memory efficiency, easy access to multi-CPU and GPU hardware, and to distributed and cloud-based parallelization. TraitSimulation is designed to encourage flexible trait simulation, including via the standard devices of applied statistics, generalized linear models (GLMs) and generalized linear mixed models (GLMMs). TraitSimulation also accommodates many study designs: unrelateds, sibships, pedigrees, or a mixture of all three. (Of course, for data with pedigrees or cryptic relationships, the simulation process must include the genetic dependencies among the individuals.) We consider an assortment of trait models and study designs to illustrate integrated simulation and analysis pipelines. Step-by-step instructions for these analyses are available in our electronic Jupyter notebooks on Github. These interactive notebooks are ideal for reproducible research.

CONCLUSION: The TraitSimulation package has three main advantages. (1) It leverages the computational efficiency and ease of use of Julia to provide extremely fast, straightforward simulation of even the most complex genetic models, including GLMs and GLMMs. (2) It can be operated entirely within, but is not limited to, the integrated analysis pipeline of OpenMendel. And finally (3), by allowing a wider range of more realistic phenotype models, TraitSimulation brings power calculations and diagnostic tools closer to what investigators might see in real-world analyses.}, } @article {pmid33938423, year = {2021}, author = {Pascual-Ferrá, P and Alperstein, N and Barnett, DJ}, title = {A Multi-platform Approach to Monitoring Negative Dominance for COVID-19 Vaccine-Related Information Online.}, journal = {Disaster medicine and public health preparedness}, volume = {}, number = {}, pages = {1-24}, pmid = {33938423}, issn = {1938-744X}, abstract = {OBJECTIVE: The aim of this study was to test the appearance of negative dominance in COVID-19 vaccine-related information and activity online. We hypothesized that if negative dominance appeared, it would be a reflection of peaks in adverse events related to the vaccine, that negative content would attract more engagement on social media than other vaccine-related posts, and posts referencing adverse events related to COVID-19 vaccination would have a higher average toxicity score.

METHODS: We collected data using Google Trends for search behavior, CrowdTangle for social media data, and Media Cloud for media stories, and compared them against the dates of key adverse events related to COVID-19. We used Communalytic to analyze the toxicity of social media posts by platform and topic.

RESULTS: While our first hypothesis was partially supported, with peaks in search behavior for image and YouTube videos driven by adverse events, we did not find negative dominance in other types of searches or patterns of attention by news media or on social media.

CONCLUSION: We did not find evidence in our data to prove the negative dominance of adverse events related to COVID-19 vaccination on social media. Future studies should corroborate these findings and, if consistent, focus on explaining why this may be the case.}, } @article {pmid33937619, year = {2021}, author = {Hook, DW and Porter, SJ}, title = {Scaling Scientometrics: Dimensions on Google BigQuery as an Infrastructure for Large-Scale Analysis.}, journal = {Frontiers in research metrics and analytics}, volume = {6}, number = {}, pages = {656233}, pmid = {33937619}, issn = {2504-0537}, abstract = {Cloud computing has the capacity to transform many parts of the research ecosystem, from particular research areas to overall strategic decision making and policy. Scientometrics sits at the boundary between research and the decision-making, policy-making, and evaluation processes that underpin research. One of the biggest challenges in research policy and strategy is having access to data in a way that allows for analysis that can respond in an iterative way to inform decisions. Many decisions are based on "global" measures such as benchmark metrics that are hard to source and hence are often nonspecific or outdated. The use of cloud technologies may be promising in addressing this area of providing data for research strategy and policy decisions. A novel visualisation technique is introduced and used as a means to explore the potential for scaling scientometrics by democratising both access to data and compute capacity using the cloud.}, } @article {pmid33925902, year = {2021}, author = {Kim, WS}, title = {Progressive Traffic-Oriented Resource Management for Reducing Network Congestion in Edge Computing.}, journal = {Entropy (Basel, Switzerland)}, volume = {23}, number = {5}, pages = {}, pmid = {33925902}, issn = {1099-4300}, support = {NRF-2020R1G1A1014096//National Research Foundation of Korea/ ; }, abstract = {Edge computing can deliver network services with low latency and real-time processing by providing cloud services at the network edge. Edge computing has a number of advantages such as low latency, locality, and network traffic distribution, but the associated resource management has become a significant challenge because of its inherent hierarchical, distributed, and heterogeneous nature. Various cloud-based network services such as crowd sensing, hierarchical deep learning systems, and cloud gaming each have their own traffic patterns and computing requirements. To provide a satisfactory user experience for these services, resource management that comprehensively considers service diversity, client usage patterns, and network performance indicators is required. In this study, an algorithm that simultaneously considers computing resources and network traffic load when deploying servers that provide edge services is proposed. The proposed algorithm generates candidate deployments based on factors that affect traffic load, such as the number of servers, server location, and client mapping according to service characteristics and usage. A final deployment plan is then established using a partial vector bin packing scheme that considers both the generated traffic and computing resources in the network. The proposed algorithm is evaluated using several simulations that consider actual network service and device characteristics.}, } @article {pmid33924090, year = {2021}, author = {Chen, YY and Chen, MH and Chang, CM and Chang, FS and Lin, YH}, title = {A Smart Home Energy Management System Using Two-Stage Non-Intrusive Appliance Load Monitoring over Fog-Cloud Analytics Based on Tridium's Niagara Framework for Residential Demand-Side Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33924090}, issn = {1424-8220}, support = {MOST 109-3116-F-006-017-CC2//Ministry of Science and Technology, Taiwan/ ; MOST 109-2221-E-027-121-MY2//Ministry of Science and Technology, Taiwan/ ; O01109E048 (Industry-Academia Collaboration Project)//First International Computer, Inc. (FIC), Taiwan/ ; }, abstract = {Electricity is a vital resource for various human activities, supporting customers' lifestyles in today's modern technologically driven society. Effective demand-side management (DSM) can alleviate ever-increasing electricity demands that arise from customers in downstream sectors of a smart grid. Compared with the traditional means of energy management systems, non-intrusive appliance load monitoring (NIALM) monitors relevant electrical appliances in a non-intrusive manner. Fog (edge) computing addresses the need to capture, process and analyze data generated and gathered by Internet of Things (IoT) end devices, and is an advanced IoT paradigm for applications in which resources, such as computing capability, of a central data center acted as cloud computing are placed at the edge of the network. The literature leaves NIALM developed over fog-cloud computing and conducted as part of a home energy management system (HEMS). In this study, a Smart HEMS prototype based on Tridium's Niagara Framework[®] has been established over fog (edge)-cloud computing, where NIALM as an IoT application in energy management has also been investigated in the framework. The SHEMS prototype established over fog-cloud computing in this study utilizes an artificial neural network-based NIALM approach to non-intrusively monitor relevant electrical appliances without an intrusive deployment of plug-load power meters (smart plugs), where a two-stage NIALM approach is completed. The core entity of the SHEMS prototype is based on a compact, cognitive, embedded IoT controller that connects IoT end devices, such as sensors and meters, and serves as a gateway in a smart house/smart building for residential DSM. As demonstrated and reported in this study, the established SHEMS prototype using the investigated two-stage NIALM approach is feasible and usable.}, } @article {pmid33923182, year = {2021}, author = {Ghaleb, M and Azzedin, F}, title = {Towards Scalable and Efficient Architecture for Modeling Trust in IoT Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33923182}, issn = {1424-8220}, support = {13-INF2452-04//King Abdulaziz City for Science and Technology/ ; }, abstract = {The Internet of Services (IoS) is gaining ground where cloud environments are utilized to create, subscribe, publish, and share services. The fast and significant evolution of IoS is affecting various aspects in people's life and is enabling a wide spectrum of services and applications ranging from smart e-health, smart homes, to smart surveillance. Building trusted IoT environments is of great importance to achieve the full benefits of IoS. In addition, building trusted IoT environments mitigates unrecoverable and unexpected damages in order to create reliable, efficient, stable, and flexible smart IoS-driven systems. Therefore, ensuring trust will provide the confidence and belief that IoT devices and consequently IoS behave as expected. Before hosting trust models, suitable architecture for Fog computing is needed to provide scalability, fast data access, simple and efficient intra-communication, load balancing, decentralization, and availability. In this article, we propose scalable and efficient Chord-based horizontal architecture. We also show how trust modeling can be mapped to our proposed architecture. Extensive performance evaluation experiments have been conducted to evaluate the performance and the feasibility and also to verify the behavior of our proposed architecture.}, } @article {pmid33922893, year = {2021}, author = {Kim, T and Yoo, SE and Kim, Y}, title = {Edge/Fog Computing Technologies for IoT Infrastructure.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33922893}, issn = {1424-8220}, abstract = {The prevalence of smart devices and cloud computing has led to an explosion in the amount of data generated by IoT devices [...].}, } @article {pmid33922751, year = {2021}, author = {Masip-Bruin, X and Marín-Tordera, E and Sánchez-López, S and Garcia, J and Jukan, A and Juan Ferrer, A and Queralt, A and Salis, A and Bartoli, A and Cankar, M and Cordeiro, C and Jensen, J and Kennedy, J}, title = {Managing the Cloud Continuum: Lessons Learnt from a Real Fog-to-Cloud Deployment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33922751}, issn = {1424-8220}, support = {730929//H2020 LEIT Information and Communication Technologies/ ; }, abstract = {The wide adoption of the recently coined fog and edge computing paradigms alongside conventional cloud computing creates a novel scenario, known as the cloud continuum, where services may benefit from the overall set of resources to optimize their execution. To operate successfully, such a cloud continuum scenario demands for novel management strategies, enabling a coordinated and efficient management of the entire set of resources, from the edge up to the cloud, designed in particular to address key edge characteristics, such as mobility, heterogeneity and volatility. The design of such a management framework poses many research challenges and has already promoted many initiatives worldwide at different levels. In this paper we present the results of one of these experiences driven by an EU H2020 project, focusing on the lessons learnt from a real deployment of the proposed management solution in three different industrial scenarios. We think that such a description may help understand the benefits brought in by a holistic cloud continuum management and also may help other initiatives in their design and development processes.}, } @article {pmid33922709, year = {2021}, author = {Huang, W and Zhou, J and Zhang, D}, title = {On-the-Fly Fusion of Remotely-Sensed Big Data Using an Elastic Computing Paradigm with a Containerized Spark Engine on Kubernetes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33922709}, issn = {1424-8220}, abstract = {Remotely-sensed satellite image fusion is indispensable for the generation of long-term gap-free Earth observation data. While cloud computing (CC) provides the big picture for RS big data (RSBD), the fundamental question of the efficient fusion of RSBD on CC platforms has not yet been settled. To this end, we propose a lightweight cloud-native framework for the elastic processing of RSBD in this study. With the scaling mechanisms provided by both the Infrastructure as a Service (IaaS) and Platform as a Services (PaaS) of CC, the Spark-on-Kubernetes operator model running in the framework can enhance the efficiency of Spark-based algorithms without considering bottlenecks such as task latency caused by an unbalanced workload, and can ease the burden to tune the performance parameters for their parallel algorithms. Internally, we propose a task scheduling mechanism (TSM) to dynamically change the Spark executor pods' affinities to the computing hosts. The TSM learns the workload of a computing host. Learning from the ratio between the number of completed and failed tasks on a computing host, the TSM dispatches Spark executor pods to newer and less-overwhelmed computing hosts. In order to illustrate the advantage, we implement a parallel enhanced spatial and temporal adaptive reflectance fusion model (PESTARFM) to enable the efficient fusion of big RS images with a Spark aggregation function. We construct an OpenStack cloud computing environment to test the usability of the framework. According to the experiments, TSM can improve the performance of the PESTARFM using only PaaS scaling to about 11.7%. When using both the IaaS and PaaS scaling, the maximum performance gain with the TSM can be even greater than 13.6%. The fusion of such big Sentinel and PlanetScope images requires less than 4 min in the experimental environment.}, } @article {pmid33921505, year = {2021}, author = {Vidana Morales, RY and Ortega Cisneros, S and Camacho Perez, JR and Sandoval Ibarra, F and Casas Carrillo, R}, title = {3D Simulation-Based Acoustic Wave Resonator Analysis and Validation Using Novel Finite Element Method Software.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33921505}, issn = {1424-8220}, abstract = {This work illustrates the analysis of Film Bulk Acoustic Resonators (FBAR) using 3D Finite Element (FEM) simulations with the software OnScale in order to predict and improve resonator performance and quality before manufacturing. This kind of analysis minimizes manufacturing cycles by reducing design time with 3D simulations running on High-Performance Computing (HPC) cloud services. It also enables the identification of manufacturing effects on device performance. The simulation results are compared and validated with a manufactured FBAR device, previously reported, to further highlight the usefulness and advantages of the 3D simulations-based design process. In the 3D simulation results, some analysis challenges, like boundary condition definitions, mesh tuning, loss source tracing, and device quality estimations, were studied. Hence, it is possible to highlight that modern FEM solvers, like OnScale enable unprecedented FBAR analysis and design optimization.}, } @article {pmid33920249, year = {2021}, author = {Ye, Z and Yan, G and Wei, Y and Zhou, B and Li, N and Shen, S and Wang, L}, title = {Real-Time and Efficient Traffic Information Acquisition via Pavement Vibration IoT Monitoring System.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33920249}, issn = {1424-8220}, support = {Z191100008019002//Beijing major science and technology projects/ ; }, abstract = {Traditional road-embedded monitoring systems for traffic monitoring have the disadvantages of a short life, high energy consumption and data redundancy, resulting in insufficient durability and high cost. In order to improve the durability and efficiency of the road-embedded monitoring system, a pavement vibration monitoring system is developed based on the Internet of things (IoT). The system includes multi-acceleration sensing nodes, a gateway, and a cloud platform. The key design principles and technologies of each part of the system are proposed, which provides valuable experience for the application of IoT monitoring technology in road infrastructures. Characterized by low power consumption, distributed computing, and high extensibility properties, the pavement vibration IoT monitoring system can realize the monitoring, transmission, and analysis of pavement vibration signal, and acquires the real-time traffic information. This road-embedded system improves the intellectual capacity of road infrastructure and is conducive to the construction of a new generation of smart roads.}, } @article {pmid33920075, year = {2021}, author = {Molina-Molina, JC and Salhaoui, M and Guerrero-González, A and Arioua, M}, title = {Autonomous Marine Robot Based on AI Recognition for Permanent Surveillance in Marine Protected Areas.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33920075}, issn = {1424-8220}, support = {PROYECTO VIGIA: SISTEMA DE VIGILANCIA COSTERO BASADO EN VEHÍCULOS AUTÓNOMOS DE SUPERFICIE.//Consejería de Agua, Agricultura, Ganadería, Pesca y Medio Ambiente, Comunidad Autónoma de la Región de Murcia/ ; PROYECTO OOMMUR: OBSERVATORIO OCEANOGRÁFICO MÓVIL DE LA REGIÓN DE MURCIA//Consejería de Empleo, Universidades, Empresa y Medio Ambiente, Dirección General de Comercio, Consumo y Simplificación administrativa/ ; }, mesh = {Artificial Intelligence ; Biodiversity ; Conservation of Natural Resources ; *Ecosystem ; Humans ; Oceans and Seas ; *Robotics ; }, abstract = {The world's oceans are one of the most valuable sources of biodiversity and resources on the planet, although there are areas where the marine ecosystem is threatened by human activities. Marine protected areas (MPAs) are distinctive spaces protected by law due to their unique characteristics, such as being the habitat of endangered marine species. Even with this protection, there are still illegal activities such as poaching or anchoring that threaten the survival of different marine species. In this context, we propose an autonomous surface vehicle (ASV) model system for the surveillance of marine areas by detecting and recognizing vessels through artificial intelligence (AI)-based image recognition services, in search of those carrying out illegal activities. Cloud and edge AI computing technologies were used for computer vision. These technologies have proven to be accurate and reliable in detecting shapes and objects for which they have been trained. Azure edge and cloud vision services offer the best option in terms of accuracy for this task. Due to the lack of 4G and 5G coverage in offshore marine environments, it is necessary to use radio links with a coastal base station to ensure communications, which may result in a high response time due to the high latency involved. The analysis of on-board images may not be sufficiently accurate; therefore, we proposed a smart algorithm for autonomy optimization by selecting the proper AI technology according to the current scenario (SAAO) capable of selecting the best AI source for the current scenario in real time, according to the required recognition accuracy or low latency. The SAAO optimizes the execution, efficiency, risk reduction, and results of each stage of the surveillance mission, taking appropriate decisions by selecting either cloud or edge vision models without human intervention.}, } @article {pmid33919222, year = {2021}, author = {Dos Anjos, JCS and Gross, JLG and Matteussi, KJ and González, GV and Leithardt, VRQ and Geyer, CFR}, title = {An Algorithm to Minimize Energy Consumption and Elapsed Time for IoT Workloads in a Hybrid Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {9}, pages = {}, pmid = {33919222}, issn = {1424-8220}, support = {J118//Consejería De Economía Y Empleo: System for simulation and training in advanced techniques for the occupational risk prevention through the design of hybrid-reality environments with ref. J118/ ; }, abstract = {Advances in communication technologies have made the interaction of small devices, such as smartphones, wearables, and sensors, scattered on the Internet, bringing a whole new set of complex applications with ever greater task processing needs. These Internet of things (IoT) devices run on batteries with strict energy restrictions. They tend to offload task processing to remote servers, usually to cloud computing (CC) in datacenters geographically located away from the IoT device. In such a context, this work proposes a dynamic cost model to minimize energy consumption and task processing time for IoT scenarios in mobile edge computing environments. Our approach allows for a detailed cost model, with an algorithm called TEMS that considers energy, time consumed during processing, the cost of data transmission, and energy in idle devices. The task scheduling chooses among cloud or mobile edge computing (MEC) server or local IoT devices to achieve better execution time with lower cost. The simulated environment evaluation saved up to 51.6% energy consumption and improved task completion time up to 86.6%.}, } @article {pmid33918614, year = {2021}, author = {Kyung, Y}, title = {Prioritized Task Distribution Considering Opportunistic Fog Computing Nodes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33918614}, issn = {1424-8220}, abstract = {As service latency and core network load relates to performance issues in the conventional cloud-based computing environment, the fog computing system has gained a lot of interest. However, since the load can be concentrated on specific fog computing nodes because of spatial and temporal service characteristics, performance degradation can occur, resulting in quality of service (QoS) degradation, especially for delay-sensitive services. Therefore, this paper proposes a prioritized task distribution scheme, which considers static as well as opportunistic fog computing nodes according to their mobility feature. Based on the requirements of offloaded tasks, the proposed scheme supports delay sensitive task processing at the static fog node and delay in-sensitive tasks by means of opportunistic fog nodes for task distribution. To assess the performance of the proposed scheme, we develop an analytic model for the service response delay. Extensive simulation results are given to validate the analytic model and to show the performance of the proposed scheme, compared to the conventional schemes in terms of service response delay and outage probability.}, } @article {pmid33918443, year = {2021}, author = {Zhang, P and Zhang, M and Liu, J}, title = {Real-Time HD Map Change Detection for Crowdsourcing Update Based on Mid-to-High-End Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33918443}, issn = {1424-8220}, abstract = {Continuous maintenance and real-time update of high-definition (HD) maps is a big challenge. With the development of autonomous driving, more and more vehicles are equipped with a variety of advanced sensors and a powerful computing platform. Based on mid-to-high-end sensors including an industry camera, a high-end Global Navigation Satellite System (GNSS)/Inertial Measurement Unit (IMU), and an onboard computing platform, a real-time HD map change detection method for crowdsourcing update is proposed in this paper. First, a mature commercial integrated navigation product is directly used to achieve a self-positioning accuracy of 20 cm on average. Second, an improved network based on BiSeNet is utilized for real-time semantic segmentation. It achieves the result of 83.9% IOU (Intersection over Union) on Nvidia Pegasus at 31 FPS. Third, a visual Simultaneous Localization and Mapping (SLAM) associated with pixel type information is performed to obtain the semantic point cloud data of features such as lane dividers, road markings, and other static objects. Finally, the semantic point cloud data is vectorized after denoising and clustering, and the results are matched with a pre-constructed HD map to confirm map elements that have not changed and generate new elements when appearing. The experiment conducted in Beijing shows that the method proposed is effective for crowdsourcing update of HD maps.}, } @article {pmid33918246, year = {2021}, author = {Chang, SC and Lu, MT and Pan, TH and Chen, CS}, title = {Evaluating the E-Health Cloud Computing Systems Adoption in Taiwan's Healthcare Industry.}, journal = {Life (Basel, Switzerland)}, volume = {11}, number = {4}, pages = {}, pmid = {33918246}, issn = {2075-1729}, abstract = {Although the electronic health (e-health) cloud computing system is a promising innovation, its adoption in the healthcare industry has been slow. This study investigated the adoption of e-health cloud computing systems in the healthcare industry and considered security functions, management, cloud service delivery, and cloud software for e-health cloud computing systems. Although numerous studies have determined factors affecting e-health cloud computing systems, few comprehensive reviews of factors and their relations have been conducted. Therefore, this study investigated the relations between the factors affecting e-health cloud computing systems by using a multiple criteria decision-making technique, in which decision-making trial and evaluation laboratory (DEMATEL), DANP (DEMATEL-based Analytic Network Process), and modified VIKOR (VlseKriterijumska Optimizacija I Kompromisno Resenje) approaches were combined. The intended level of adoption of an e-health cloud computing system could be determined by using the proposed approach. The results of a case study performed on the Taiwanese healthcare industry indicated that the cloud management function must be primarily enhanced and that cost effectiveness is the most significant factor in the adoption of e-health cloud computing. This result is valuable for allocating resources to decrease performance gaps in the Taiwanese healthcare industry.}, } @article {pmid33916901, year = {2021}, author = {Cafuta, D and Dodig, I and Cesar, I and Kramberger, T}, title = {Developing a Modern Greenhouse Scientific Research Facility-A Case Study.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {8}, pages = {}, pmid = {33916901}, issn = {1424-8220}, support = {IP-2019-04//Hrvatska Zaklada za Znanost/ ; }, mesh = {*Algorithms ; *Artificial Intelligence ; *Greenhouse Effect ; Reproducibility of Results ; }, abstract = {Multidisciplinary approaches in science are still rare, especially in completely different fields such as agronomy science and computer science. We aim to create a state-of-the-art floating ebb and flow system greenhouse that can be used in future scientific experiments. The objective is to create a self-sufficient greenhouse with sensors, cloud connectivity, and artificial intelligence for real-time data processing and decision making. We investigated various approaches and proposed an optimal solution that can be used in much future research on plant growth in floating ebb and flow systems. A novel microclimate pocket-detection solution is proposed using an automatically guided suspended platform sensor system. Furthermore, we propose a methodology for replacing sensor data knowledge with artificial intelligence for plant health estimation. Plant health estimation allows longer ebb periods and increases the nutrient level in the final product. With intelligent design and the use of artificial intelligence algorithms, we will reduce the cost of plant research and increase the usability and reliability of research data. Thus, our newly developed greenhouse would be more suitable for plant growth research and production.}, } @article {pmid33916818, year = {2021}, author = {Alenizi, F and Rana, O}, title = {Dynamically Controlling Offloading Thresholds in Fog Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33916818}, issn = {1424-8220}, abstract = {Fog computing is a potential solution to overcome the shortcomings of cloud-based processing of IoT tasks. These drawbacks can include high latency, location awareness, and security-attributed to the distance between IoT devices and cloud-hosted servers. Although fog computing has evolved as a solution to address these challenges, it is known for having limited resources that need to be effectively utilized, or its advantages could be lost. Computational offloading and resource management are critical to be able to benefit from fog computing systems. We introduce a dynamic, online, offloading scheme that involves the execution of delay-sensitive tasks. This paper proposes an architecture of a fog node able to adjust its offloading threshold dynamically (i.e., the criteria by which a fog node decides whether tasks should be offloaded rather than executed locally) using two algorithms: dynamic task scheduling (DTS) and dynamic energy control (DEC). These algorithms seek to minimize overall delay, maximize throughput, and minimize energy consumption at the fog layer. Compared to other benchmarks, our approach could reduce latency by up to 95%, improve throughput by 71%, and reduce energy consumption by up to 67% in fog nodes.}, } @article {pmid33916120, year = {2021}, author = {Kelly, C and Pitropakis, N and Mylonas, A and McKeown, S and Buchanan, WJ}, title = {A Comparative Analysis of Honeypots on Different Cloud Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33916120}, issn = {1424-8220}, support = {952672//Horizon 2020 Framework Programme/ ; }, abstract = {In 2019, the majority of companies used at least one cloud computing service and it is expected that by the end of 2021, cloud data centres will process 94% of workloads. The financial and operational advantages of moving IT infrastructure to specialised cloud providers are clearly compelling. However, with such volumes of private and personal data being stored in cloud computing infrastructures, security concerns have risen. Motivated to monitor and analyze adversarial activities, we deploy multiple honeypots on the popular cloud providers, namely Amazon Web Services (AWS), Google Cloud Platform (GCP) and Microsoft Azure, and operate them in multiple regions. Logs were collected over a period of three weeks in May 2020 and then comparatively analysed, evaluated and visualised. Our work revealed heterogeneous attackers' activity on each cloud provider, both when one considers the volume and origin of attacks, as well as the targeted services and vulnerabilities. Our results highlight the attempt of threat actors to abuse popular services, which were widely used during the COVID-19 pandemic for remote working, such as remote desktop sharing. Furthermore, the attacks seem to exit not only from countries that are commonly found to be the source of attacks, such as China, Russia and the United States, but also from uncommon ones such as Vietnam, India and Venezuela. Our results provide insights on the adversarial activity during our experiments, which can be used to inform the Situational Awareness operations of an organisation.}, } @article {pmid33908021, year = {2021}, author = {Katz, JE}, title = {Deploying Mass Spectrometric Data Analysis in the Amazon AWS Cloud Computing Environment.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2271}, number = {}, pages = {375-397}, doi = {10.1007/978-1-0716-1241-5_26}, pmid = {33908021}, issn = {1940-6029}, mesh = {*Cloud Computing ; Information Dissemination ; *Mass Spectrometry ; Research Design ; Software ; Workflow ; }, abstract = {There are many advantages for deploying a mass spectrometry workflow to the cloud. While "cloud computing" can have many meanings, in this case, I am simply referring to a virtual computer that is remotely accessible over the Internet. This "computer" can have as many or few resources (CPU, RAM, disk space, etc.) as your demands require and those resources can be changed as you need without requiring complete reinstalls. Systems can be easily "checkpointed" and restored. I will describe how to deploy virtualized, remotely accessible computers on which you can perform your basic mass spectrometry data analysis. This use is a quite restricted microcosm of what is available under the umbrella of "cloud computing" but it is also the (useful!) niche use for which straightforward how-to documentation is lacking.This chapter is intended for people with little or no experience in creating cloud computing instances. Executing the steps in this chapter, will empower you to instantiate a computer with the performance of your choosing with preconfigured software already installed using the Amazon Web Service (AWS) suite of tools. You can use this for use cases that span when you need limited access to high end computing thru when you give your collaborators access to preconfigured computers to look at their data.}, } @article {pmid33907414, year = {2021}, author = {Ningrum, DNA and Yuan, SP and Kung, WM and Wu, CC and Tzeng, IS and Huang, CY and Li, JY and Wang, YC}, title = {Deep Learning Classifier with Patient's Metadata of Dermoscopic Images in Malignant Melanoma Detection.}, journal = {Journal of multidisciplinary healthcare}, volume = {14}, number = {}, pages = {877-885}, pmid = {33907414}, issn = {1178-2390}, abstract = {BACKGROUND: Incidence of skin cancer is one of the global burdens of malignancies that increase each year, with melanoma being the deadliest one. Imaging-based automated skin cancer detection still remains challenging owing to variability in the skin lesions and limited standard dataset availability. Recent research indicates the potential of deep convolutional neural networks (CNN) in predicting outcomes from simple as well as highly complicated images. However, its implementation requires high-class computational facility, that is not feasible in low resource and remote areas of health care. There is potential in combining image and patient's metadata, but the study is still lacking.

OBJECTIVE: We want to develop malignant melanoma detection based on dermoscopic images and patient's metadata using an artificial intelligence (AI) model that will work on low-resource devices.

METHODS: We used an open-access dermatology repository of International Skin Imaging Collaboration (ISIC) Archive dataset consist of 23,801 biopsy-proven dermoscopic images. We tested performance for binary classification malignant melanomas vs nonmalignant melanomas. From 1200 sample images, we split the data for training (72%), validation (18%), and testing (10%). We compared CNN with image data only (CNN model) vs CNN for image data combined with an artificial neural network (ANN) for patient's metadata (CNN+ANN model).

RESULTS: The balanced accuracy for CNN+ANN model was higher (92.34%) than the CNN model (73.69%). Combination of the patient's metadata using ANN prevents the overfitting that occurs in the CNN model using dermoscopic images only. This small size (24 MB) of this model made it possible to run on a medium class computer without the need of cloud computing, suitable for deployment on devices with limited resources.

CONCLUSION: The CNN+ANN model can increase the accuracy of classification in malignant melanoma detection even with limited data and is promising for development as a screening device in remote and low resources health care.}, } @article {pmid33892568, year = {2021}, author = {Wang, B and Liu, F}, title = {Task arrival based energy efficient optimization in smart-IoT data center.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {3}, pages = {2713-2732}, doi = {10.3934/mbe.2021138}, pmid = {33892568}, issn = {1551-0018}, abstract = {With the growth and expansion of cloud data centers, energy consumption has become an urgent issue for smart cities system. However, most of the current resource management approaches focus on the traditional cloud computing scheduling scenarios but fail to consider the feature of workloads from the Internet of Things (IoT) devices. In this paper, we analyze the characteristic of IoT requests and propose an improved Poisson task model with a novel mechanism to predict the arrivals of IoT requests. To achieve the trade-off between energy saving and service level agreement, we introduce an adaptive energy efficiency model to adjust the priority of the optimization objectives. Finally, an energy-efficient virtual machine scheduling algorithm is proposed to maximize the energy efficiency of the data center. The experimental results show that our strategy can achieve the best performance in comparison to other popular schemes.}, } @article {pmid33882012, year = {2021}, author = {Lee, H and Kang, J and Yeo, J}, title = {Medical Specialty Recommendations by an Artificial Intelligence Chatbot on a Smartphone: Development and Deployment.}, journal = {Journal of medical Internet research}, volume = {23}, number = {5}, pages = {e27460}, pmid = {33882012}, issn = {1438-8871}, mesh = {COVID-19/*epidemiology ; *Deep Learning ; Humans ; Pandemics ; Primary Health Care/*methods ; *Referral and Consultation ; SARS-CoV-2/isolation & purification ; *Smartphone ; Specialization ; Telemedicine/*methods ; }, abstract = {BACKGROUND: The COVID-19 pandemic has limited daily activities and even contact between patients and primary care providers. This makes it more difficult to provide adequate primary care services, which include connecting patients to an appropriate medical specialist. A smartphone-compatible artificial intelligence (AI) chatbot that classifies patients' symptoms and recommends the appropriate medical specialty could provide a valuable solution.

OBJECTIVE: In order to establish a contactless method of recommending the appropriate medical specialty, this study aimed to construct a deep learning-based natural language processing (NLP) pipeline and to develop an AI chatbot that can be used on a smartphone.

METHODS: We collected 118,008 sentences containing information on symptoms with labels (medical specialty), conducted data cleansing, and finally constructed a pipeline of 51,134 sentences for this study. Several deep learning models, including 4 different long short-term memory (LSTM) models with or without attention and with or without a pretrained FastText embedding layer, as well as bidirectional encoder representations from transformers for NLP, were trained and validated using a randomly selected test data set. The performance of the models was evaluated on the basis of the precision, recall, F1-score, and area under the receiver operating characteristic curve (AUC). An AI chatbot was also designed to make it easy for patients to use this specialty recommendation system. We used an open-source framework called "Alpha" to develop our AI chatbot. This takes the form of a web-based app with a frontend chat interface capable of conversing in text and a backend cloud-based server application to handle data collection, process the data with a deep learning model, and offer the medical specialty recommendation in a responsive web that is compatible with both desktops and smartphones.

RESULTS: The bidirectional encoder representations from transformers model yielded the best performance, with an AUC of 0.964 and F1-score of 0.768, followed by LSTM model with embedding vectors, with an AUC of 0.965 and F1-score of 0.739. Considering the limitations of computing resources and the wide availability of smartphones, the LSTM model with embedding vectors trained on our data set was adopted for our AI chatbot service. We also deployed an Alpha version of the AI chatbot to be executed on both desktops and smartphones.

CONCLUSIONS: With the increasing need for telemedicine during the current COVID-19 pandemic, an AI chatbot with a deep learning-based NLP model that can recommend a medical specialty to patients through their smartphones would be exceedingly useful. This chatbot allows patients to identify the proper medical specialist in a rapid and contactless manner, based on their symptoms, thus potentially supporting both patients and primary care providers.}, } @article {pmid33879200, year = {2021}, author = {Youn, YC and Pyun, JM and Ryu, N and Baek, MJ and Jang, JW and Park, YH and Ahn, SW and Shin, HW and Park, KY and Kim, SY}, title = {Use of the Clock Drawing Test and the Rey-Osterrieth Complex Figure Test-copy with convolutional neural networks to predict cognitive impairment.}, journal = {Alzheimer's research & therapy}, volume = {13}, number = {1}, pages = {85}, pmid = {33879200}, issn = {1758-9193}, mesh = {Cognition ; *Cognitive Dysfunction/diagnosis ; Humans ; Mass Screening ; Neural Networks, Computer ; Neuropsychological Tests ; }, abstract = {BACKGROUND: The Clock Drawing Test (CDT) and Rey-Osterrieth Complex Figure Test (RCFT) are widely used as a part of neuropsychological test batteries to assess cognitive function. Our objective was to confirm the prediction accuracies of the RCFT-copy and CDT for cognitive impairment (CI) using convolutional neural network algorithms as a screening tool.

METHODS: The CDT and RCFT-copy data were obtained from patients aged 60-80 years who had more than 6 years of education. In total, 747 CDT and 980 RCFT-copy figures were utilized. Convolutional neural network algorithms using TensorFlow (ver. 2.3.0) on the Colab cloud platform (www.colab.

RESEARCH: google.com) were used for preprocessing and modeling. We measured the prediction accuracy of each drawing test 10 times using this dataset with the following classes: normal cognition (NC) vs. mildly impaired cognition (MI), NC vs. severely impaired cognition (SI), and NC vs. CI (MI + SI).

RESULTS: The accuracy of the CDT was better for differentiating MI (CDT, 78.04 ± 2.75; RCFT-copy, not being trained) and SI from NC (CDT, 91.45 ± 0.83; RCFT-copy, 90.27 ± 1.52); however, the RCFT-copy was better at predicting CI (CDT, 77.37 ± 1.77; RCFT, 83.52 ± 1.41). The accuracy for a 3-way classification (NC vs. MI vs. SI) was approximately 71% for both tests; no significant difference was found between them.

CONCLUSIONS: The two drawing tests showed good performance for predicting severe impairment of cognition; however, a drawing test alone is not enough to predict overall CI. There are some limitations to our study: the sample size was small, all the participants did not perform both the CDT and RCFT-copy, and only the copy condition of the RCFT was used. Algorithms involving memory performance and longitudinal changes are worth future exploration. These results may contribute to improved home-based healthcare delivery.}, } @article {pmid33861767, year = {2021}, author = {Li, Y and Wei, J and Wu, B and Wang, C and Wang, C and Zhang, Y and Yang, X}, title = {Obfuscating encrypted threshold signature algorithm and its applications in cloud computing.}, journal = {PloS one}, volume = {16}, number = {4}, pages = {e0250259}, pmid = {33861767}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; *Computer Security ; Humans ; *Privacy ; }, abstract = {Current cloud computing causes serious restrictions to safeguarding users' data privacy. Since users' sensitive data is submitted in unencrypted forms to remote machines possessed and operated by untrusted service providers, users' sensitive data may be leaked by service providers. Program obfuscation shows the unique advantages that it can provide for cloud computing. In this paper, we construct an encrypted threshold signature functionality, which can outsource the threshold signing rights of users to cloud server securely by applying obfuscation, while revealing no more sensitive information. The obfuscator is proven to satisfy the average case virtual black box property and existentially unforgeable under the decisional linear (DLIN) assumption and computational Diffie-Hellman (CDH) assumption in the standard model. Moreover, we implement our scheme using the Java pairing-based cryptography library on a laptop.}, } @article {pmid33859193, year = {2021}, author = {von Chamier, L and Laine, RF and Jukkala, J and Spahn, C and Krentzel, D and Nehme, E and Lerche, M and Hernández-Pérez, S and Mattila, PK and Karinou, E and Holden, S and Solak, AC and Krull, A and Buchholz, TO and Jones, ML and Royer, LA and Leterrier, C and Shechtman, Y and Jug, F and Heilemann, M and Jacquemet, G and Henriques, R}, title = {Democratising deep learning for microscopy with ZeroCostDL4Mic.}, journal = {Nature communications}, volume = {12}, number = {1}, pages = {2276}, pmid = {33859193}, issn = {2041-1723}, support = {MR/K015826/1/MRC_/Medical Research Council/United Kingdom ; MC_UU_00012/1/MRC_/Medical Research Council/United Kingdom ; FC001999/CRUK_/Cancer Research UK/United Kingdom ; 203276/Z/16/Z/WT_/Wellcome Trust/United Kingdom ; 206670/Z/17/Z/WT_/Wellcome Trust/United Kingdom ; FC001999/MRC_/Medical Research Council/United Kingdom ; /WT_/Wellcome Trust/United Kingdom ; FC001999/ARC_/Arthritis Research UK/United Kingdom ; MR/T027924/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Animals ; Cell Line, Tumor ; Cloud Computing ; Datasets as Topic ; *Deep Learning ; Humans ; Image Processing, Computer-Assisted/*methods ; Microscopy/*methods ; Primary Cell Culture ; Rats ; Software ; }, abstract = {Deep Learning (DL) methods are powerful analytical tools for microscopy and can outperform conventional image processing pipelines. Despite the enthusiasm and innovations fuelled by DL technology, the need to access powerful and compatible resources to train DL networks leads to an accessibility barrier that novice users often find difficult to overcome. Here, we present ZeroCostDL4Mic, an entry-level platform simplifying DL access by leveraging the free, cloud-based computational resources of Google Colab. ZeroCostDL4Mic allows researchers with no coding expertise to train and apply key DL networks to perform tasks including segmentation (using U-Net and StarDist), object detection (using YOLOv2), denoising (using CARE and Noise2Void), super-resolution microscopy (using Deep-STORM), and image-to-image translation (using Label-free prediction - fnet, pix2pix and CycleGAN). Importantly, we provide suitable quantitative tools for each network to evaluate model performance, allowing model optimisation. We demonstrate the application of the platform to study multiple biological processes.}, } @article {pmid33840908, year = {2021}, author = {Meena, V and Gorripatti, M and Suriya Praba, T}, title = {Trust Enforced Computational Offloading for Health Care Applications in Fog Computing.}, journal = {Wireless personal communications}, volume = {119}, number = {2}, pages = {1369-1386}, pmid = {33840908}, issn = {0929-6212}, abstract = {Internet of Things (IoT) is a network of internet connected devices that generates huge amount of data every day. The usage of IoT devices such as smart wearables, smart phones, smart cities are increasing in the linear scale. Health care is one of the primary applications today that uses IoT devices. Data generated in this application may need computation, storage and data analytics operations which requires resourceful environment for remote patient health monitoring. The data related with health care applications are primarily private and should be readily available to the users. Enforcing these two constraints in cloud environment is a hard task. Fog computing is an emergent architecture for providing computation, storage, control and network services within user's proximity. To handle private data, the processing elements should be trustable entities in Fog environment. In this paper we propose novel Trust Enforced computation ofFLoading technique for trust worthy applications using fOg computiNg (TEFLON). The proposed system comprises of two algorithms namely optimal service offloader and trust assessment for addressing security and trust issues with reduced response time. And the simulation results show that proposed TEFLON framework improves success rate of fog collaboration with reduced average latency for delay sensitive applications and ensures trust for trustworthy applications.}, } @article {pmid33833714, year = {2021}, author = {Trenerry, B and Chng, S and Wang, Y and Suhaila, ZS and Lim, SS and Lu, HY and Oh, PH}, title = {Preparing Workplaces for Digital Transformation: An Integrative Review and Framework of Multi-Level Factors.}, journal = {Frontiers in psychology}, volume = {12}, number = {}, pages = {620766}, pmid = {33833714}, issn = {1664-1078}, abstract = {The rapid advancement of new digital technologies, such as smart technology, artificial intelligence (AI) and automation, robotics, cloud computing, and the Internet of Things (IoT), is fundamentally changing the nature of work and increasing concerns about the future of jobs and organizations. To keep pace with rapid disruption, companies need to update and transform business models to remain competitive. Meanwhile, the growth of advanced technologies is changing the types of skills and competencies needed in the workplace and demanded a shift in mindset among individuals, teams and organizations. The recent COVID-19 pandemic has accelerated digitalization trends, while heightening the importance of employee resilience and well-being in adapting to widespread job and technological disruption. Although digital transformation is a new and urgent imperative, there is a long trajectory of rigorous research that can readily be applied to grasp these emerging trends. Recent studies and reviews of digital transformation have primarily focused on the business and strategic levels, with only modest integration of employee-related factors. Our review article seeks to fill these critical gaps by identifying and consolidating key factors important for an organization's overarching digital transformation. We reviewed studies across multiple disciplines and integrated the findings into a multi-level framework. At the individual level, we propose five overarching factors related to effective digital transformation among employees: technology adoption; perceptions and attitudes toward technological change; skills and training; workplace resilience and adaptability, and work-related wellbeing. At the group-level, we identified three factors necessary for digital transformation: team communication and collaboration; workplace relationships and team identification, and team adaptability and resilience. Finally, at the organizational-level, we proposed three factors for digital transformation: leadership; human resources, and organizational culture/climate. Our review of the literature confirms that multi-level factors are important when planning for and embarking on digital transformation, thereby providing a framework for future research and practice.}, } @article {pmid33833303, year = {2021}, author = {Armgarth, A and Pantzare, S and Arven, P and Lassnig, R and Jinno, H and Gabrielsson, EO and Kifle, Y and Cherian, D and Arbring Sjöström, T and Berthou, G and Dowling, J and Someya, T and Wikner, JJ and Gustafsson, G and Simon, DT and Berggren, M}, title = {A digital nervous system aiming toward personalized IoT healthcare.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {7757}, pmid = {33833303}, issn = {2045-2322}, abstract = {Body area networks (BANs), cloud computing, and machine learning are platforms that can potentially enable advanced healthcare outside the hospital. By applying distributed sensors and drug delivery devices on/in our body and connecting to such communication and decision-making technology, a system for remote diagnostics and therapy is achieved with additional autoregulation capabilities. Challenges with such autarchic on-body healthcare schemes relate to integrity and safety, and interfacing and transduction of electronic signals into biochemical signals, and vice versa. Here, we report a BAN, comprising flexible on-body organic bioelectronic sensors and actuators utilizing two parallel pathways for communication and decision-making. Data, recorded from strain sensors detecting body motion, are both securely transferred to the cloud for machine learning and improved decision-making, and sent through the body using a secure body-coupled communication protocol to auto-actuate delivery of neurotransmitters, all within seconds. We conclude that both highly stable and accurate sensing-from multiple sensors-are needed to enable robust decision making and limit the frequency of retraining. The holistic platform resembles the self-regulatory properties of the nervous system, i.e., the ability to sense, communicate, decide, and react accordingly, thus operating as a digital nervous system.}, } @article {pmid33830059, year = {2021}, author = {Li, Y and Ye, H and Ye, F and Liu, Y and Lv, L and Zhang, P and Zhang, X and Zhou, Y}, title = {The Current Situation and Future Prospects of Simulators in Dental Education.}, journal = {Journal of medical Internet research}, volume = {23}, number = {4}, pages = {e23635}, pmid = {33830059}, issn = {1438-8871}, mesh = {Clinical Competence ; Computer Simulation ; Education, Dental ; Humans ; Software ; *Virtual Reality ; }, abstract = {The application of virtual reality has become increasingly extensive as this technology has developed. In dental education, virtual reality is mainly used to assist or replace traditional methods of teaching clinical skills in preclinical training for several subjects, such as endodontics, prosthodontics, periodontics, implantology, and dental surgery. The application of dental simulators in teaching can make up for the deficiency of traditional teaching methods and reduce the teaching burden, improving convenience for both teachers and students. However, because of the technology limitations of virtual reality and force feedback, dental simulators still have many hardware and software disadvantages that have prevented them from being an alternative to traditional dental simulators as a primary skill training method. In the future, when combined with big data, cloud computing, 5G, and deep learning technology, dental simulators will be able to give students individualized learning assistance, and their functions will be more diverse and suitable for preclinical training. The purpose of this review is to provide an overview of current dental simulators on related technologies, advantages and disadvantages, methods of evaluating effectiveness, and future directions for development.}, } @article {pmid33824714, year = {2021}, author = {Li, F and Qu, Z and Li, R}, title = {Medical Cloud Computing Data Processing to Optimize the Effect of Drugs.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {5560691}, pmid = {33824714}, issn = {2040-2309}, mesh = {*Cloud Computing ; Endothelial Cells ; Information Storage and Retrieval ; *Pharmaceutical Preparations ; Software ; }, abstract = {In recent years, cloud computing technology is maturing in the process of growing. Hadoop originated from Apache Nutch and is an open-source cloud computing platform. Moreover, the platform is characterized by large scale, virtualization, strong stability, strong versatility, and support for scalability. It is necessary and far-reaching, based on the characteristics of unstructured medical images, to combine content-based medical image retrieval with the Hadoop cloud platform to conduct research. This study combines the impact mechanism of senile dementia vascular endothelial cells with cloud computing to construct a corresponding data retrieval platform of the cloud computing image set. Moreover, this study uses Hadoop's core framework distributed file system HDFS to upload images, store the images in the HDFS and image feature vectors in HBase, and use MapReduce programming mode to perform parallel retrieval, and each of the nodes cooperates with each other. The results show that the proposed method has certain effects and can be applied to medical research.}, } @article {pmid33822891, year = {2021}, author = {Arisdakessian, CG and Nigro, OD and Steward, GF and Poisson, G and Belcaid, M}, title = {CoCoNet: an efficient deep learning tool for viral metagenome binning.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {18}, pages = {2803-2810}, doi = {10.1093/bioinformatics/btab213}, pmid = {33822891}, issn = {1367-4811}, support = {1636402//National Science Foundation Division of Ocean Sciences/ ; 1557349-Ike Wai//Office of Integrative Activities/ ; 1736030-G2P//Securing Hawaii's Water Future/ ; }, mesh = {Metagenome ; Algorithms ; *Deep Learning ; Software ; *Microbiota/genetics ; Sequence Analysis, DNA/methods ; Metagenomics/methods ; }, abstract = {MOTIVATION: Metagenomic approaches hold the potential to characterize microbial communities and unravel the intricate link between the microbiome and biological processes. Assembly is one of the most critical steps in metagenomics experiments. It consists of transforming overlapping DNA sequencing reads into sufficiently accurate representations of the community's genomes. This process is computationally difficult and commonly results in genomes fragmented across many contigs. Computational binning methods are used to mitigate fragmentation by partitioning contigs based on their sequence composition, abundance or chromosome organization into bins representing the community's genomes. Existing binning methods have been principally tuned for bacterial genomes and do not perform favorably on viral metagenomes.

RESULTS: We propose Composition and Coverage Network (CoCoNet), a new binning method for viral metagenomes that leverages the flexibility and the effectiveness of deep learning to model the co-occurrence of contigs belonging to the same viral genome and provide a rigorous framework for binning viral contigs. Our results show that CoCoNet substantially outperforms existing binning methods on viral datasets.

CoCoNet was implemented in Python and is available for download on PyPi (https://pypi.org/). The source code is hosted on GitHub at https://github.com/Puumanamana/CoCoNet and the documentation is available at https://coconet.readthedocs.io/en/latest/index.html. CoCoNet does not require extensive resources to run. For example, binning 100k contigs took about 4 h on 10 Intel CPU Cores (2.4 GHz), with a memory peak at 27 GB (see Supplementary Fig. S9). To process a large dataset, CoCoNet may need to be run on a high RAM capacity server. Such servers are typically available in high-performance or cloud computing settings.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid33817018, year = {2021}, author = {Iyer, TJ and Joseph Raj, AN and Ghildiyal, S and Nersisson, R}, title = {Performance analysis of lightweight CNN models to segment infectious lung tissues of COVID-19 cases from tomographic images.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e368}, pmid = {33817018}, issn = {2376-5992}, abstract = {The pandemic of Coronavirus Disease-19 (COVID-19) has spread around the world, causing an existential health crisis. Automated detection of COVID-19 infections in the lungs from Computed Tomography (CT) images offers huge potential in tackling the problem of slow detection and augments the conventional diagnostic procedures. However, segmenting COVID-19 from CT Scans is problematic, due to high variations in the types of infections and low contrast between healthy and infected tissues. While segmenting Lung CT Scans for COVID-19, fast and accurate results are required and furthermore, due to the pandemic, most of the research community has opted for various cloud based servers such as Google Colab, etc. to develop their algorithms. High accuracy can be achieved using Deep Networks but the prediction time would vary as the resources are shared amongst many thus requiring the need to compare different lightweight segmentation model. To address this issue, we aim to analyze the segmentation of COVID-19 using four Convolutional Neural Networks (CNN). The images in our dataset are preprocessed where the motion artifacts are removed. The four networks are UNet, Segmentation Network (Seg Net), High-Resolution Network (HR Net) and VGG UNet. Trained on our dataset of more than 3,000 images, HR Net was found to be the best performing network achieving an accuracy of 96.24% and a Dice score of 0.9127. The analysis shows that lightweight CNN models perform better than other neural net models when to segment infectious tissue due to COVID-19 from CT slices.}, } @article {pmid33817001, year = {2021}, author = {Rizwan Ali, M and Ahmad, F and Hasanain Chaudary, M and Ashfaq Khan, Z and Alqahtani, MA and Saad Alqurni, J and Ullah, Z and Khan, WU}, title = {Petri Net based modeling and analysis for improved resource utilization in cloud computing.}, journal = {PeerJ. Computer science}, volume = {7}, number = {}, pages = {e351}, pmid = {33817001}, issn = {2376-5992}, abstract = {The cloud is a shared pool of systems that provides multiple resources through the Internet, users can access a lot of computing power using their computer. However, with the strong migration rate of multiple applications towards the cloud, more disks and servers are required to store huge data. Most of the cloud storage service providers are replicating full copies of data over multiple data centers to ensure data availability. Further, the replication is not only a costly process but also a wastage of energy resources. Furthermore, erasure codes reduce the storage cost by splitting data in n chunks and storing these chunks into n + k different data centers, to tolerate k failures. Moreover, it also needs extra computation cost to regenerate the data object. Cache-A Replica On Modification (CAROM) is a hybrid file system that gets combined benefits from both the replication and erasure codes to reduce access latency and bandwidth consumption. However, in the literature, no formal analysis of CAROM is available which can validate its performance. To address this issue, this research firstly presents a colored Petri net based formal model of CAROM. The research proceeds by presenting a formal analysis and simulation to validate the performance of the proposed system. This paper contributes towards the utilization of resources in clouds by presenting a comprehensive formal analysis of CAROM.}, } @article {pmid33816176, year = {2021}, author = {Wan, KW and Wong, CH and Ip, HF and Fan, D and Yuen, PL and Fong, HY and Ying, M}, title = {Evaluation of the performance of traditional machine learning algorithms, convolutional neural network and AutoML Vision in ultrasound breast lesions classification: a comparative study.}, journal = {Quantitative imaging in medicine and surgery}, volume = {11}, number = {4}, pages = {1381-1393}, pmid = {33816176}, issn = {2223-4292}, abstract = {BACKGROUND: In recent years, there was an increasing popularity in applying artificial intelligence in the medical field from computer-aided diagnosis (CAD) to patient prognosis prediction. Given the fact that not all healthcare professionals have the required expertise to develop a CAD system, the aim of this study was to investigate the feasibility of using AutoML Vision, a highly automatic machine learning model, for future clinical applications by comparing AutoML Vision with some commonly used CAD algorithms in the differentiation of benign and malignant breast lesions on ultrasound.

METHODS: A total of 895 breast ultrasound images were obtained from the two online open-access ultrasound breast images datasets. Traditional machine learning models (comprising of seven commonly used CAD algorithms) with three content-based radiomic features (Hu Moments, Color Histogram, Haralick Texture) extracted, and a convolutional neural network (CNN) model were built using python language. AutoML Vision was trained in Google Cloud Platform. Sensitivity, specificity, F1 score and average precision (AUCPR) were used to evaluate the diagnostic performance of the models. Cochran's Q test was used to evaluate the statistical significance between all studied models and McNemar test was used as the post-hoc test to perform pairwise comparisons. The proposed AutoML model was also compared with the current related studies that involve similar medical imaging modalities in characterizing benign or malignant breast lesions.

RESULTS: There was significant difference in the diagnostic performance among all studied traditional machine learning classifiers (P<0.05). Random Forest achieved the best performance in the differentiation of benign and malignant breast lesions (accuracy: 90%; sensitivity: 71%; specificity: 100%; F1 score: 0.83; AUCPR: 0.90) which was statistically comparable to the performance of CNN (accuracy: 91%; sensitivity: 82%; specificity: 96%; F1 score: 0.87; AUCPR: 0.88) and AutoML Vision (accuracy: 86%; sensitivity: 84%; specificity: 88%; F1 score: 0.83; AUCPR: 0.95) based on Cochran's Q test (P>0.05).

CONCLUSIONS: In this study, the performance of AutoML Vision was not significantly different from that of Random Forest (the best classifier among traditional machine learning models) and CNN. AutoML Vision showed relatively high accuracy and comparable to current commonly used classifiers which may prompt for future application in clinical practice.}, } @article {pmid33814966, year = {2021}, author = {Andreas, A and Mavromoustakis, CX and Mastorakis, G and Do, DT and Batalla, JM and Pallis, E and Markakis, EK}, title = {Towards an optimized security approach to IoT devices with confidential healthcare data exchange.}, journal = {Multimedia tools and applications}, volume = {80}, number = {20}, pages = {31435-31449}, pmid = {33814966}, issn = {1380-7501}, abstract = {Reliable data exchange and efficient image transfer are currently significant research challenges in health care systems. To incentivize data exchange within the Internet of Things (IoT) framework, we need to ensure data sovereignty by facilitating secure data exchange between trusted parties. The security and reliability of data-sharing infrastructure require a community of trust. Therefore, this paper introduces an encryption frame based on data fragmentation. It also presents a novel, deterministic grey-scale optical encryption scheme based on fundamental mathematics. The objective is to use encryption as the underlying measure to make the data unintelligible while exploiting fragmentation to break down sensitive relationships between attributes. Thus, sensitive data distributed in separate data repositories for decryption and reconstruction using interpolation by knowing polynomial coefficients and personal values from the DBMS Database Management System. Aims also to ensure the secure acquisition of diagnostic images, micrography, and all types of medical imagery based on probabilistic approaches. Visual sharing of confidential medical imageries based on implementing a novel method, where transparencies ≤k - 1 out of n cannot reveal the original image.}, } @article {pmid33808037, year = {2021}, author = {Mijuskovic, A and Chiumento, A and Bemthuis, R and Aldea, A and Havinga, P}, title = {Resource Management Techniques for Cloud/Fog and Edge Computing: An Evaluation Framework and Classification.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33808037}, issn = {1424-8220}, abstract = {Processing IoT applications directly in the cloud may not be the most efficient solution for each IoT scenario, especially for time-sensitive applications. A promising alternative is to use fog and edge computing, which address the issue of managing the large data bandwidth needed by end devices. These paradigms impose to process the large amounts of generated data close to the data sources rather than in the cloud. One of the considerations of cloud-based IoT environments is resource management, which typically revolves around resource allocation, workload balance, resource provisioning, task scheduling, and QoS to achieve performance improvements. In this paper, we review resource management techniques that can be applied for cloud, fog, and edge computing. The goal of this review is to provide an evaluation framework of metrics for resource management algorithms aiming at the cloud/fog and edge environments. To this end, we first address research challenges on resource management techniques in that domain. Consequently, we classify current research contributions to support in conducting an evaluation framework. One of the main contributions is an overview and analysis of research papers addressing resource management techniques. Concluding, this review highlights opportunities of using resource management techniques within the cloud/fog/edge paradigm. This practice is still at early development and barriers need to be overcome.}, } @article {pmid33807986, year = {2021}, author = {H Hasan, M and Abbasalipour, A and Nikfarjam, H and Pourkamali, S and Emad-Ud-Din, M and Jafari, R and Alsaleem, F}, title = {Exploiting Pull-In/Pull-Out Hysteresis in Electrostatic MEMS Sensor Networks to Realize a Novel Sensing Continuous-Time Recurrent Neural Network.}, journal = {Micromachines}, volume = {12}, number = {3}, pages = {}, pmid = {33807986}, issn = {2072-666X}, support = {1935641//National Science Foundation/ ; }, abstract = {The goal of this paper is to provide a novel computing approach that can be used to reduce the power consumption, size, and cost of wearable electronics. To achieve this goal, the use of microelectromechanical systems (MEMS) sensors for simultaneous sensing and computing is introduced. Specifically, by enabling sensing and computing locally at the MEMS sensor node and utilizing the usually unwanted pull in/out hysteresis, we may eliminate the need for cloud computing and reduce the use of analog-to-digital converters, sampling circuits, and digital processors. As a proof of concept, we show that a simulation model of a network of three commercially available MEMS accelerometers can classify a train of square and triangular acceleration signals inherently using pull-in and release hysteresis. Furthermore, we develop and fabricate a network with finger arrays of parallel plate actuators to facilitate coupling between MEMS devices in the network using actuating assemblies and biasing assemblies, thus bypassing the previously reported coupling challenge in MEMS neural networks.}, } @article {pmid33807759, year = {2021}, author = {Pintavirooj, C and Keatsamarn, T and Treebupachatsakul, T}, title = {Multi-Parameter Vital Sign Telemedicine System Using Web Socket for COVID-19 Pandemics.}, journal = {Healthcare (Basel, Switzerland)}, volume = {9}, number = {3}, pages = {}, pmid = {33807759}, issn = {2227-9032}, support = {2563-02-01-007//King Mongkut's Institute of Technology Ladkrabang/ ; }, abstract = {Telemedicine has become an increasingly important part of the modern healthcare infrastructure, especially in the present situation with the COVID-19 pandemics. Many cloud platforms have been used intensively for Telemedicine. The most popular ones include PubNub, Amazon Web Service, Google Cloud Platform and Microsoft Azure. One of the crucial challenges of telemedicine is the real-time application monitoring for the vital sign. The commercial platform is, by far, not suitable for real-time applications. The alternative is to design a web-based application exploiting Web Socket. This research paper concerns the real-time six-parameter vital-sign monitoring using a web-based application. The six vital-sign parameters are electrocardiogram, temperature, plethysmogram, percent saturation oxygen, blood pressure and heart rate. The six vital-sign parameters were encoded in a web server site and sent to a client site upon logging on. The encoded parameters were then decoded into six vital sign signals. Our proposed multi-parameter vital-sign telemedicine system using Web Socket has successfully remotely monitored the six-parameter vital signs on 4G mobile network with a latency of less than 5 milliseconds.}, } @article {pmid33806888, year = {2021}, author = {Kang, S and David, DSK and Yang, M and Yu, YC and Ham, S}, title = {Energy-Efficient Ultrasonic Water Level Detection System with Dual-Target Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33806888}, issn = {1424-8220}, support = {20STUTA26//U.S. Department of Transportation/ ; }, abstract = {This study presents a developed ultrasonic water level detection (UWLD) system with an energy-efficient design and dual-target monitoring. The water level monitoring system with a non-contact sensor is one of the suitable methods since it is not directly exposed to water. In addition, a web-based monitoring system using a cloud computing platform is a well-known technique to provide real-time water level monitoring. However, the long-term stable operation of remotely communicating units is an issue for real-time water level monitoring. Therefore, this paper proposes a UWLD unit using a low-power consumption design for renewable energy harvesting (e.g., solar) by controlling the unit with dual microcontrollers (MCUs) to improve the energy efficiency of the system. In addition, dual targeting to the pavement and streamside is uniquely designed to monitor both the urban inundation and stream overflow. The real-time water level monitoring data obtained from the proposed UWLD system is analyzed with water level changing rate (WLCR) and water level index. The quantified WLCR and water level index with various sampling rates present a different sensitivity to heavy rain.}, } @article {pmid33806770, year = {2021}, author = {Sergi, I and Montanaro, T and Benvenuto, FL and Patrono, L}, title = {A Smart and Secure Logistics System Based on IoT and Cloud Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33806770}, issn = {1424-8220}, support = {TECNONIDI PROGRAMMA OPERATIVO REGIONALE 2014-2020 REGIONE PUGLIA//Regione Puglia/ ; }, abstract = {Recently, one of the hottest topics in the logistics sector has been the traceability of goods and the monitoring of their condition during transportation. Perishable goods, such as fresh goods, have specifically attracted attention of the researchers that have already proposed different solutions to guarantee quality and freshness of food through the whole cold chain. In this regard, the use of Internet of Things (IoT)-enabling technologies and its specific branch called edge computing is bringing different enhancements thereby achieving easy remote and real-time monitoring of transported goods. Due to the fast changes of the requirements and the difficulties that researchers can encounter in proposing new solutions, the fast prototype approach could contribute to rapidly enhance both the research and the commercial sector. In order to make easy the fast prototyping of solutions, different platforms and tools have been proposed in the last years, however it is difficult to guarantee end-to-end security at all the levels through such platforms. For this reason, based on the experiments reported in literature and aiming at providing support for fast-prototyping, end-to-end security in the logistics sector, the current work presents a solution that demonstrates how the advantages offered by the Azure Sphere platform, a dedicated hardware (i.e., microcontroller unit, the MT3620) device and Azure Sphere Security Service can be used to realize a fast prototype to trace fresh food conditions through its transportation. The proposed solution guarantees end-to-end security and can be exploited by future similar works also in other sectors.}, } @article {pmid33805471, year = {2021}, author = {El-Rashidy, N and El-Sappagh, S and Islam, SMR and M El-Bakry, H and Abdelrazek, S}, title = {Mobile Health in Remote Patient Monitoring for Chronic Diseases: Principles, Trends, and Challenges.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {11}, number = {4}, pages = {}, pmid = {33805471}, issn = {2075-4418}, abstract = {Chronic diseases are becoming more widespread. Treatment and monitoring of these diseases require going to hospitals frequently, which increases the burdens of hospitals and patients. Presently, advancements in wearable sensors and communication protocol contribute to enriching the healthcare system in a way that will reshape healthcare services shortly. Remote patient monitoring (RPM) is the foremost of these advancements. RPM systems are based on the collection of patient vital signs extracted using invasive and noninvasive techniques, then sending them in real-time to physicians. These data may help physicians in taking the right decision at the right time. The main objective of this paper is to outline research directions on remote patient monitoring, explain the role of AI in building RPM systems, make an overview of the state of the art of RPM, its advantages, its challenges, and its probable future directions. For studying the literature, five databases have been chosen (i.e., science direct, IEEE-Explore, Springer, PubMed, and science.gov). We followed the (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) PRISMA, which is a standard methodology for systematic reviews and meta-analyses. A total of 56 articles are reviewed based on the combination of a set of selected search terms including RPM, data mining, clinical decision support system, electronic health record, cloud computing, internet of things, and wireless body area network. The result of this study approved the effectiveness of RPM in improving healthcare delivery, increase diagnosis speed, and reduce costs. To this end, we also present the chronic disease monitoring system as a case study to provide enhanced solutions for RPMs.}, } @article {pmid33805187, year = {2021}, author = {Lovén, L and Lähderanta, T and Ruha, L and Peltonen, E and Launonen, I and Sillanpää, MJ and Riekki, J and Pirttikangas, S}, title = {EDISON: An Edge-Native Method and Architecture for Distributed Interpolation.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33805187}, issn = {1424-8220}, support = {318927//Academy of Finland/ ; 877056//Electronic Components and Systems for European Leadership/ ; N/A//Infotech Oulu research institute/ ; N/A//Future Makers program of the Jane and Aatos Erkko Foundation and the Technology Industries of Finland Centennial Foundation/ ; personal grant for L.L.//Tauno Tönning foundation/ ; }, abstract = {Spatio-temporal interpolation provides estimates of observations in unobserved locations and time slots. In smart cities, interpolation helps to provide a fine-grained contextual and situational understanding of the urban environment, in terms of both short-term (e.g., weather, air quality, traffic) or long term (e.g., crime, demographics) spatio-temporal phenomena. Various initiatives improve spatio-temporal interpolation results by including additional data sources such as vehicle-fitted sensors, mobile phones, or micro weather stations of, for example, smart homes. However, the underlying computing paradigm in such initiatives is predominantly centralized, with all data collected and analyzed in the cloud. This solution is not scalable, as when the spatial and temporal density of sensor data grows, the required transmission bandwidth and computational capacity become unfeasible. To address the scaling problem, we propose EDISON: algorithms for distributed learning and inference, and an edge-native architecture for distributing spatio-temporal interpolation models, their computations, and the observed data vertically and horizontally between device, edge and cloud layers. We demonstrate EDISON functionality in a controlled, simulated spatio-temporal setup with 1 M artificial data points. While the main motivation of EDISON is the distribution of the heavy computations, the results show that EDISON also provides an improvement over alternative approaches, reaching at best a 10% smaller RMSE than a global interpolation and 6% smaller RMSE than a baseline distributed approach.}, } @article {pmid33803561, year = {2021}, author = {Zhang, J and Lu, C and Cheng, G and Guo, T and Kang, J and Zhang, X and Yuan, X and Yan, X}, title = {A Blockchain-Based Trusted Edge Platform in Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33803561}, issn = {1424-8220}, abstract = {Edge computing is a product of the evolution of IoT and the development of cloud computing technology, providing computing, storage, network, and other infrastructure close to users. Compared with the centralized deployment model of traditional cloud computing, edge computing solves the problems of extended communication time and high convergence traffic, providing better support for low latency and high bandwidth services. With the increasing amount of data generated by users and devices in IoT, security and privacy issues in the edge computing environment have become concerns. Blockchain, a security technology developed rapidly in recent years, has been adopted by many industries, such as finance and insurance. With the edge computing capability, deploying blockchain platforms/applications on edge computing platforms can provide security services for network edge environments. Although there are already solutions for integrating edge computing with blockchain in many IoT application scenarios, they slightly lack scalability, portability, and heterogeneous data processing. In this paper, we propose a trusted edge platform to integrate the edge computing framework and blockchain network for building an edge security environment. The proposed platform aims to preserve the data privacy of the edge computing client. The design based on the microservice architecture makes the platform lighter. To improve the portability of the platform, we introduce the Edgex Foundry framework and design an edge application module on the platform to improve the business capability of Edgex. Simultaneously, we designed a series of well-defined security authentication microservices. These microservices use the Hyperledger Fabric blockchain network to build a reliable security mechanism in the edge environment. Finally, we build an edge computing network using different hardware devices and deploy the trusted edge platform on multiple network nodes. The usability of the proposed platform is demonstrated by testing the round-trip time (RTT) of several important workflows. The experimental results demonstrate that the platform can meet the availability requirements in real-world usage scenarios.}, } @article {pmid33803360, year = {2021}, author = {Klein, I and Oppelt, N and Kuenzer, C}, title = {Application of Remote Sensing Data for Locust Research and Management-A Review.}, journal = {Insects}, volume = {12}, number = {3}, pages = {}, pmid = {33803360}, issn = {2075-4450}, abstract = {Recently, locust outbreaks around the world have destroyed agricultural and natural vegetation and caused massive damage endangering food security. Unusual heavy rainfalls in habitats of the desert locust (Schistocerca gregaria) and lack of monitoring due to political conflicts or inaccessibility of those habitats lead to massive desert locust outbreaks and swarms migrating over the Arabian Peninsula, East Africa, India and Pakistan. At the same time, swarms of the Moroccan locust (Dociostaurus maroccanus) in some Central Asian countries and swarms of the Italian locust (Calliptamus italicus) in Russia and China destroyed crops despite developed and ongoing monitoring and control measurements. These recent events underline that the risk and damage caused by locust pests is as present as ever and affects 100 million of human lives despite technical progress in locust monitoring, prediction and control approaches. Remote sensing has become one of the most important data sources in locust management. Since the 1980s, remote sensing data and applications have accompanied many locust management activities and contributed to an improved and more effective control of locust outbreaks and plagues. Recently, open-access remote sensing data archives as well as progress in cloud computing provide unprecedented opportunity for remote sensing-based locust management and research. Additionally, unmanned aerial vehicle (UAV) systems bring up new prospects for a more effective and faster locust control. Nevertheless, the full capacity of available remote sensing applications and possibilities have not been exploited yet. This review paper provides a comprehensive and quantitative overview of international research articles focusing on remote sensing application for locust management and research. We reviewed 110 articles published over the last four decades, and categorized them into different aspects and main research topics to summarize achievements and gaps for further research and application development. The results reveal a strong focus on three species-the desert locust, the migratory locust (Locusta migratoria), and the Australian plague locust (Chortoicetes terminifera)-and corresponding regions of interest. There is still a lack of international studies for other pest species such as the Italian locust, the Moroccan locust, the Central American locust (Schistocerca piceifrons), the South American locust (Schistocerca cancellata), the brown locust (Locustana pardalina) and the red locust (Nomadacris septemfasciata). In terms of applied sensors, most studies utilized Advanced Very-High-Resolution Radiometer (AVHRR), Satellite Pour l'Observation de la Terre VEGETATION (SPOT-VGT), Moderate-Resolution Imaging Spectroradiometer (MODIS) as well as Landsat data focusing mainly on vegetation monitoring or land cover mapping. Application of geomorphological metrics as well as radar-based soil moisture data is comparably rare despite previous acknowledgement of their importance for locust outbreaks. Despite great advance and usage of available remote sensing resources, we identify several gaps and potential for future research to further improve the understanding and capacities of the use of remote sensing in supporting locust outbreak- research and management.}, } @article {pmid33803329, year = {2021}, author = {Poniszewska-Marańda, A and Czechowska, E}, title = {Kubernetes Cluster for Automating Software Production Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33803329}, issn = {1424-8220}, abstract = {Microservices, Continuous Integration and Delivery, Docker, DevOps, Infrastructure as Code-these are the current trends and buzzwords in the technological world of 2020. A popular tool which can facilitate the deployment and maintenance of microservices is Kubernetes. Kubernetes is a platform for running containerized applications, for example microservices. There are two main questions which answer was important for us: how to deploy Kubernetes itself and how to ensure that the deployment fulfils the needs of a production environment. Our research concentrates on the analysis and evaluation of Kubernetes cluster as the software production environment. However, firstly it is necessary to determine and evaluate the requirements of production environment. The paper presents the determination and analysis of such requirements and their evaluation in the case of Kubernetes cluster. Next, the paper compares two methods of deploying a Kubernetes cluster: kops and eksctl. Both of the methods concern the AWS cloud, which was chosen mainly because of its wide popularity and the range of provided services. Besides the two chosen methods of deployment, there are many more, including the DIY method and deploying on-premises.}, } @article {pmid33802673, year = {2021}, author = {Hadzovic, S and Mrdovic, S and Radonjic, M}, title = {Identification of IoT Actors.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33802673}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) is a leading trend with numerous opportunities accompanied by advantages as well as disadvantages. Parallel with IoT development, significant privacy and personal data protection challenges are also growing. In this regard, the General Data Protection Regulation (GDPR) is often considered the world's strongest set of data protection rules and has proven to be a catalyst for many countries around the world. The concepts and interaction of the data controller, the joint controllers, and the data processor play a key role in the implementation of the GDPR. Therefore, clarifying the blurred IoT actors' relationships to determine corresponding responsibilities is necessary. Given the IoT transformation reflected in shifting computing power from cloud to the edge, in this research we have considered how these computing paradigms are affecting IoT actors. In this regard, we have introduced identification of IoT actors according to a new five-computing layer IoT model based on the cloud, fog, edge, mist, and dew computing. Our conclusion is that identifying IoT actors in the light of the corresponding IoT data manager roles could be useful in determining the responsibilities of IoT actors for their compliance with data protection and privacy rules.}, } @article {pmid33802669, year = {2021}, author = {Sedar, R and Vázquez-Gallego, F and Casellas, R and Vilalta, R and Muñoz, R and Silva, R and Dizambourg, L and Fernández Barciela, AE and Vilajosana, X and Datta, SK and Härri, J and Alonso-Zarate, J}, title = {Standards-Compliant Multi-Protocol On-Board Unit for the Evaluation of Connected and Automated Mobility Services in Multi-Vendor Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {6}, pages = {}, pmid = {33802669}, issn = {1424-8220}, support = {825050//Horizon 2020 Framework Programme/ ; }, abstract = {Vehicle-to-everything (V2X) communications enable real-time information exchange between vehicles and infrastructure, which extends the perception range of vehicles beyond the limits of on-board sensors and, thus, facilitating the realisation of cooperative, connected, and automated mobility (CCAM) services that will improve road safety and traffic efficiency. In the context of CCAM, the successful deployments of cooperative intelligent transport system (C-ITS) use cases, with the integration of advanced wireless communication technologies, are effectively leading to make transport safer and more efficient. However, the evaluation of multi-vendor and multi-protocol based CCAM service architectures can become challenging and complex. Additionally, conducting on-demand field trials of such architectures with real vehicles involved is prohibitively expensive and time-consuming. In order to overcome these obstacles, in this paper, we present the development of a standards-compliant experimental vehicular on-board unit (OBU) that supports the integration of multiple V2X protocols from different vendors to communicate with heterogeneous cloud-based services that are offered by several original equipment manufacturers (OEMs). We experimentally demonstrate the functionalities of the OBU in a real-world deployment of a cooperative collision avoidance service infrastructure that is based on edge and cloud servers. In addition, we measure end-to-end application-level latencies of multi-protocol supported V2X information flows to show the effectiveness of interoperability in V2X communications between different vehicle OEMs.}, } @article {pmid33800530, year = {2021}, author = {Wang, Y and Wang, L and Zheng, R and Zhao, X and Liu, M}, title = {Latency-Optimal Computational Offloading Strategy for Sensitive Tasks in Smart Homes.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33800530}, issn = {1424-8220}, support = {61976243//National Natural Science Foundation of China/ ; 61971458//National Natural Science Foundation of China/ ; }, abstract = {In smart homes, the computational offloading technology of edge cloud computing (ECC) can effectively deal with the large amount of computation generated by smart devices. In this paper, we propose a computational offloading strategy for minimizing delay based on the back-pressure algorithm (BMDCO) to get the offloading decision and the number of tasks that can be offloaded. Specifically, we first construct a system with multiple local smart device task queues and multiple edge processor task queues. Then, we formulate an offloading strategy to minimize the queue length of tasks in each time slot by minimizing the Lyapunov drift optimization problem, so as to realize the stability of queues and improve the offloading performance. In addition, we give a theoretical analysis on the stability of the BMDCO algorithm by deducing the upper bound of all queues in this system. The simulation results show the stability of the proposed algorithm, and demonstrate that the BMDCO algorithm is superior to other alternatives. Compared with other algorithms, this algorithm can effectively reduce the computation delay.}, } @article {pmid33800262, year = {2021}, author = {Agapiou, A}, title = {Multi-Temporal Change Detection Analysis of Vertical Sprawl over Limassol City Centre and Amathus Archaeological Site in Cyprus during 2015-2020 Using the Sentinel-1 Sensor and the Google Earth Engine Platform.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33800262}, issn = {1424-8220}, support = {EXCELLENCE/0918/0052//Republic of Cyprus and the Structural Funds of the European Union in Cyprus/ ; }, abstract = {Urban sprawl can negatively impact the archaeological record of an area. In order to study the urbanisation process and its patterns, satellite images were used in the past to identify land-use changes and detect individual buildings and constructions. However, this approach involves the acquisition of high-resolution satellite images, the cost of which is increases according to the size of the area under study, as well as the time interval of the analysis. In this paper, we implemented a quick, automatic and low-cost exploration of large areas, for addressing this purpose, aiming to provide at a medium resolution of an overview of the landscape changes. This study focuses on using radar Sentinel-1 images to monitor and detect multi-temporal changes during the period 2015-2020 in Limassol, Cyprus. In addition, the big data cloud platform, Google Earth Engine, was used to process the data. Three different change detection methods were implemented in this platform as follow: (a) vertical transmit, vertical receive (VV) and vertical transmit, horizontal receive (VH) polarisations pseudo-colour composites; (b) the Rapid and Easy Change Detection in Radar Time-Series by Variation Coefficient (REACTIV) Google Earth Engine algorithm; and (c) a multi-temporal Wishart-based change detection algorithm. The overall findings are presented for the wider area of the Limassol city, with special focus on the archaeological site of "Amathus" and the city centre of Limassol. For validation purposes, satellite images from the multi-temporal archive from the Google Earth platform were used. The methods mentioned above were able to capture the urbanization process of the city that has been initiated during this period due to recent large construction projects.}, } @article {pmid33800232, year = {2021}, author = {Hsiao, CH and Lin, FY and Fang, ES and Chen, YF and Wen, YF and Huang, Y and Su, YC and Wu, YS and Kuo, HY}, title = {Optimization-Based Resource Management Algorithms with Considerations of Client Satisfaction and High Availability in Elastic 5G Network Slices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33800232}, issn = {1424-8220}, abstract = {A combined edge and core cloud computing environment is a novel solution in 5G network slices. The clients' high availability requirement is a challenge because it limits the possible admission control in front of the edge cloud. This work proposes an orchestrator with a mathematical programming model in a global viewpoint to solve resource management problems and satisfying the clients' high availability requirements. The proposed Lagrangian relaxation-based approach is adopted to solve the problems at a near-optimal level for increasing the system revenue. A promising and straightforward resource management approach and several experimental cases are used to evaluate the efficiency and effectiveness. Preliminary results are presented as performance evaluations to verify the proposed approach's suitability for edge and core cloud computing environments. The proposed orchestrator significantly enables the network slicing services and efficiently enhances the clients' satisfaction of high availability.}, } @article {pmid33797700, year = {2021}, author = {Bentes, PCL and Nadal, J}, title = {A telediagnosis assistance system for multiple-lead electrocardiography.}, journal = {Physical and engineering sciences in medicine}, volume = {44}, number = {2}, pages = {473-485}, pmid = {33797700}, issn = {2662-4737}, mesh = {Cloud Computing ; Electrocardiography ; Humans ; *Myocardial Ischemia ; Signal Processing, Computer-Assisted ; *Telemedicine ; }, abstract = {The diffusion of telemedicine opens-up a new perspective for the development of technologies furthered by Biomedical Engineering. In particular, herein we deal with those related to telediagnosis through multiple-lead electrocardiographic signals. This study focuses on the proof-of-concept of an internet-based telemedicine system as a use case that attests to the feasibility for the development, within the university environment, of techniques for remote processing of biomedical signals for adjustable detection of myocardial ischemia episodes. At each signal lead, QRS complexes are detected and delimited with the J-point marking. The same procedure to detect the complex is used to identify the respective T wave, then the area over the ST segment is applied to detect ischemia-related elevations. The entire system is designed on web-based telemedicine services using multiuser, remote access technologies, and database. The measurements for sensitivity and precision had their respective averages calculated at 11.79 and 24.21% for the leads of lower noise. The evaluations regarding the aspects of user friendliness and the usefulness of the application, resulted in 88.57 and 89.28% of broad or total acceptance, respectively. They are robust enough to enable scalability and can be offered by cloud computing, besides enabling the development of new biomedical signal processing techniques within the concept of distance services, using a modular architecture with collaborative bias.}, } @article {pmid33790958, year = {2021}, author = {Deepika, J and Rajan, C and Senthil, T}, title = {Security and Privacy of Cloud- and IoT-Based Medical Image Diagnosis Using Fuzzy Convolutional Neural Network.}, journal = {Computational intelligence and neuroscience}, volume = {2021}, number = {}, pages = {6615411}, pmid = {33790958}, issn = {1687-5273}, mesh = {Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; Neural Networks, Computer ; *Privacy ; }, abstract = {In recent times, security in cloud computing has become a significant part in healthcare services specifically in medical data storage and disease prediction. A large volume of data are produced in the healthcare environment day by day due to the development in the medical devices. Thus, cloud computing technology is utilised for storing, processing, and handling these large volumes of data in a highly secured manner from various attacks. This paper focuses on disease classification by utilising image processing with secured cloud computing environment using an extended zigzag image encryption scheme possessing a greater tolerance to different data attacks. Secondly, a fuzzy convolutional neural network (FCNN) algorithm is proposed for effective classification of images. The decrypted images are used for classification of cancer levels with different layers of training. After classification, the results are transferred to the concern doctors and patients for further treatment process. Here, the experimental process is carried out by utilising the standard dataset. The results from the experiment concluded that the proposed algorithm shows better performance than the other existing algorithms and can be effectively utilised for the medical image diagnosis.}, } @article {pmid33783626, year = {2021}, author = {Floreano, IX and de Moraes, LAF}, title = {Land use/land cover (LULC) analysis (2009-2019) with Google Earth Engine and 2030 prediction using Markov-CA in the Rondônia State, Brazil.}, journal = {Environmental monitoring and assessment}, volume = {193}, number = {4}, pages = {239}, pmid = {33783626}, issn = {1573-2959}, mesh = {Agriculture ; Brazil ; *Conservation of Natural Resources ; Ecosystem ; *Environmental Monitoring ; }, abstract = {The Amazonian biome is important not only for South America but also for the entire planet, providing essential environmental services. The state of Rondônia ranks third in deforestation rates in the Brazilian Legal Amazon (BLA) political division. This study aims to evaluate the land use/land cover (LULC) changes over the past ten years (2009-2019), as well as, to predict the LULC in the next 10 years, using TerrSet 18.3 software, in the state of Rondônia, Brazil. The machine learning algorithms within the Google Earth Engine cloud-based platform employed a Random Forest classifier in image classifications. The Markov-CA deep learning algorithm predicted future LULC changes by comparing scenarios of one and three transitions. The results showed a reduction in forested areas of about 15.7% between 2009 and 2019 in the Rondônia state. According to the predictive model, by 2030, around 30% of the remaining forests will be logged, most likely converted into occupied areas. The results reinforce the importance of measures and policies integrated with investments in research and satellite monitoring to reduce deforestation in the Brazilian Amazon and ensure the continuity of the Amazonian role in halting climate change.}, } @article {pmid33775559, year = {2021}, author = {Wimberly, MC and de Beurs, KM and Loboda, TV and Pan, WK}, title = {Satellite Observations and Malaria: New Opportunities for Research and Applications.}, journal = {Trends in parasitology}, volume = {37}, number = {6}, pages = {525-537}, pmid = {33775559}, issn = {1471-5007}, support = {P2C HD065563/HD/NICHD NIH HHS/United States ; R01 AI079411/AI/NIAID NIH HHS/United States ; }, mesh = {*Environmental Monitoring/instrumentation/methods ; Humans ; Malaria/*prevention & control ; Remote Sensing Technology/*instrumentation ; Research/*trends ; *Satellite Imagery ; }, abstract = {Satellite remote sensing provides a wealth of information about environmental factors that influence malaria transmission cycles and human populations at risk. Long-term observations facilitate analysis of climate-malaria relationships, and high-resolution data can be used to assess the effects of agriculture, urbanization, deforestation, and water management on malaria. New sources of very-high-resolution satellite imagery and synthetic aperture radar data will increase the precision and frequency of observations. Cloud computing platforms for remote sensing data combined with analysis-ready datasets and high-level data products have made satellite remote sensing more accessible to nonspecialists. Further collaboration between the malaria and remote sensing communities is needed to develop and implement useful geospatial data products that will support global efforts toward malaria control, elimination, and eradication.}, } @article {pmid33770943, year = {2021}, author = {Li, C and Bao, K and Qin, S and Guan, K and Xu, G and Su, J}, title = {Grating-enabled high-speed high-efficiency surface-illuminated silicon photodiodes.}, journal = {Optics express}, volume = {29}, number = {3}, pages = {3458-3464}, doi = {10.1364/OE.412412}, pmid = {33770943}, issn = {1094-4087}, abstract = {High-speed, high-efficiency silicon photodetectors play important roles in the optical communication links that are used increasingly in data centers to handle the increasing volumes of data traffic and higher bandwidths required as use of big data and cloud computing continues to grow exponentially. Monolithic integration of the optical components with signal processing electronics on a single silicon chip is of paramount importance in the drive to reduce costs and improve performance. Here we report grating-enhanced light absorption in a silicon photodiode. The absorption efficiency is determined theoretically to be as high as 77% at 850 nm for the optimal structure, which has a thin intrinsic absorption layer with a thickness of 220 nm. The fabricated devices demonstrate a high bandwidth of 11.3 GHz and improved radio-frequency output power of more than 14 dB, thus making them suitable for use in data center optical communications.}, } @article {pmid33763309, year = {2021}, author = {Schoenbachler, JL and Hughey, JJ}, title = {pmparser and PMDB: resources for large-scale, open studies of the biomedical literature.}, journal = {PeerJ}, volume = {9}, number = {}, pages = {e11071}, pmid = {33763309}, issn = {2167-8359}, support = {R35 GM124685/GM/NIGMS NIH HHS/United States ; }, abstract = {PubMed is an invaluable resource for the biomedical community. Although PubMed is freely available, the existing API is not designed for large-scale analyses and the XML structure of the underlying data is inconvenient for complex queries. We developed an R package called pmparser to convert the data in PubMed to a relational database. Our implementation of the database, called PMDB, currently contains data on over 31 million PubMed Identifiers (PMIDs) and is updated regularly. Together, pmparser and PMDB can enable large-scale, reproducible, and transparent analyses of the biomedical literature. pmparser is licensed under GPL-2 and available at https://pmparser.hugheylab.org. PMDB is available in both PostgreSQL (DOI 10.5281/zenodo.4008109) and Google BigQuery (https://console.cloud.google.com/bigquery?project=pmdb-bq&d=pmdb).}, } @article {pmid33763195, year = {2021}, author = {Yao, L and Shang, D and Zhao, H and Hu, S}, title = {Medical Equipment Comprehensive Management System Based on Cloud Computing and Internet of Things.}, journal = {Journal of healthcare engineering}, volume = {2021}, number = {}, pages = {6685456}, pmid = {33763195}, issn = {2040-2309}, mesh = {Algorithms ; *Cloud Computing ; Hospitals ; Internet ; *Internet of Things ; }, abstract = {The continuous progress in modern medicine is not only the level of medical technology, but also various high-tech medical auxiliary equipment. With the rapid development of hospital information construction, medical equipment plays a very important role in the diagnosis, treatment, and prognosis observation of the disease. However, the continuous growth of the types and quantity of medical equipment has caused considerable difficulties in the management of hospital equipment. In order to improve the efficiency of medical equipment management in hospital, based on cloud computing and the Internet of Things, this paper develops a comprehensive management system of medical equipment and uses the improved particle swarm optimization algorithm and chicken swarm algorithm to help the system reasonably achieve dynamic task scheduling. The purpose of this paper is to develop a comprehensive intelligent management system to master the procurement, maintenance, and use of all medical equipment in the hospital, so as to maximize the scientific management of medical equipment in the hospital. Scientific Management. It is very necessary to develop a preventive maintenance plan for medical equipment. From the experimental data, it can be seen that when the system simultaneously accesses 100 simulated users online, the corresponding time for submitting the equipment maintenance application form is 1228 ms, and the accuracy rate is 99.8%. When there are 1000 simulated online users, the corresponding time for submitting the equipment maintenance application form is 5123 ms, and the correct rate is 99.4%. On the whole, the medical equipment management information system has excellent performance in stress testing. It not only predicts the initial performance requirements, but also provides a large amount of data support for equipment management and maintenance.}, } @article {pmid33751044, year = {2022}, author = {Caufield, JH and Sigdel, D and Fu, J and Choi, H and Guevara-Gonzalez, V and Wang, D and Ping, P}, title = {Cardiovascular informatics: building a bridge to data harmony.}, journal = {Cardiovascular research}, volume = {118}, number = {3}, pages = {732-745}, pmid = {33751044}, issn = {1755-3245}, support = {R01 HL146739/HL/NHLBI NIH HHS/United States ; R35 HL135772/HL/NHLBI NIH HHS/United States ; T32 HL139450/HL/NHLBI NIH HHS/United States ; }, mesh = {*Artificial Intelligence ; *Cardiovascular Diseases/diagnosis/therapy ; Cloud Computing ; Humans ; Informatics ; Machine Learning ; }, abstract = {The search for new strategies for better understanding cardiovascular (CV) disease is a constant one, spanning multitudinous types of observations and studies. A comprehensive characterization of each disease state and its biomolecular underpinnings relies upon insights gleaned from extensive information collection of various types of data. Researchers and clinicians in CV biomedicine repeatedly face questions regarding which types of data may best answer their questions, how to integrate information from multiple datasets of various types, and how to adapt emerging advances in machine learning and/or artificial intelligence to their needs in data processing. Frequently lauded as a field with great practical and translational potential, the interface between biomedical informatics and CV medicine is challenged with staggeringly massive datasets. Successful application of computational approaches to decode these complex and gigantic amounts of information becomes an essential step toward realizing the desired benefits. In this review, we examine recent efforts to adapt informatics strategies to CV biomedical research: automated information extraction and unification of multifaceted -omics data. We discuss how and why this interdisciplinary space of CV Informatics is particularly relevant to and supportive of current experimental and clinical research. We describe in detail how open data sources and methods can drive discovery while demanding few initial resources, an advantage afforded by widespread availability of cloud computing-driven platforms. Subsequently, we provide examples of how interoperable computational systems facilitate exploration of data from multiple sources, including both consistently formatted structured data and unstructured data. Taken together, these approaches for achieving data harmony enable molecular phenotyping of CV diseases and unification of CV knowledge.}, } @article {pmid33748749, year = {2021}, author = {Ogle, C and Reddick, D and McKnight, C and Biggs, T and Pauly, R and Ficklin, SP and Feltus, FA and Shannigrahi, S}, title = {Named Data Networking for Genomics Data Management and Integrated Workflows.}, journal = {Frontiers in big data}, volume = {4}, number = {}, pages = {582468}, pmid = {33748749}, issn = {2624-909X}, abstract = {Advanced imaging and DNA sequencing technologies now enable the diverse biology community to routinely generate and analyze terabytes of high resolution biological data. The community is rapidly heading toward the petascale in single investigator laboratory settings. As evidence, the single NCBI SRA central DNA sequence repository contains over 45 petabytes of biological data. Given the geometric growth of this and other genomics repositories, an exabyte of mineable biological data is imminent. The challenges of effectively utilizing these datasets are enormous as they are not only large in the size but also stored in geographically distributed repositories in various repositories such as National Center for Biotechnology Information (NCBI), DNA Data Bank of Japan (DDBJ), European Bioinformatics Institute (EBI), and NASA's GeneLab. In this work, we first systematically point out the data-management challenges of the genomics community. We then introduce Named Data Networking (NDN), a novel but well-researched Internet architecture, is capable of solving these challenges at the network layer. NDN performs all operations such as forwarding requests to data sources, content discovery, access, and retrieval using content names (that are similar to traditional filenames or filepaths) and eliminates the need for a location layer (the IP address) for data management. Utilizing NDN for genomics workflows simplifies data discovery, speeds up data retrieval using in-network caching of popular datasets, and allows the community to create infrastructure that supports operations such as creating federation of content repositories, retrieval from multiple sources, remote data subsetting, and others. Named based operations also streamlines deployment and integration of workflows with various cloud platforms. Our contributions in this work are as follows 1) we enumerate the cyberinfrastructure challenges of the genomics community that NDN can alleviate, and 2) we describe our efforts in applying NDN for a contemporary genomics workflow (GEMmaker) and quantify the improvements. The preliminary evaluation shows a sixfold speed up in data insertion into the workflow. 3) As a pilot, we have used an NDN naming scheme (agreed upon by the community and discussed in Section 4) to publish data from broadly used data repositories including the NCBI SRA. We have loaded the NDN testbed with these pre-processed genomes that can be accessed over NDN and used by anyone interested in those datasets. Finally, we discuss our continued effort in integrating NDN with cloud computing platforms, such as the Pacific Research Platform (PRP). The reader should note that the goal of this paper is to introduce NDN to the genomics community and discuss NDN's properties that can benefit the genomics community. We do not present an extensive performance evaluation of NDN-we are working on extending and evaluating our pilot deployment and will present systematic results in a future work.}, } @article {pmid33740542, year = {2021}, author = {Guo, J and Chen, S and Tian, S and Liu, K and Ni, J and Zhao, M and Kang, Y and Ma, X and Guo, J}, title = {5G-enabled ultra-sensitive fluorescence sensor for proactive prognosis of COVID-19.}, journal = {Biosensors & bioelectronics}, volume = {181}, number = {}, pages = {113160}, pmid = {33740542}, issn = {1873-4235}, mesh = {*Biosensing Techniques ; COVID-19/*diagnosis ; *Computer Systems ; Fluorescence ; Humans ; *Immunoassay ; Prognosis ; SARS-CoV-2 ; }, abstract = {The severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2) is spreading around the globe since December 2019. There is an urgent need to develop sensitive and online methods for on-site diagnosing and monitoring of suspected COVID-19 patients. With the huge development of Internet of Things (IoT), the impact of Internet of Medical Things (IoMT) provides an impressive solution to this problem. In this paper, we proposed a 5G-enabled fluorescence sensor for quantitative detection of spike protein and nucleocapsid protein of SARS-CoV-2 by using mesoporous silica encapsulated up-conversion nanoparticles (UCNPs@mSiO2) labeled lateral flow immunoassay (LFIA). The sensor can detect spike protein (SP) with a detection of limit (LOD) 1.6 ng/mL and nucleocapsid protein (NP) with an LOD of 2.2 ng/mL. The feasibility of the sensor in clinical use was further demonstrated by utilizing virus culture as real clinical samples. Moreover, the proposed fluorescence sensor is IoMT enabled, which is accessible to edge hardware devices (personal computers, 5G smartphones, IPTV, etc.) through Bluetooth. Medical data can be transmitted to the fog layer of the network and 5G cloud server with ultra-low latency and high reliably for edge computing and big data analysis. Furthermore, a COVID-19 monitoring module working with the proposed the system is developed on a smartphone application (App), which endows patients and their families to record their medical data and daily conditions remotely, releasing the burdens of going to central hospitals. We believe that the proposed system will be highly practical in the future treatment and prevention of COVID-19 and other mass infectious diseases.}, } @article {pmid33739401, year = {2021}, author = {Blamey, B and Toor, S and Dahlö, M and Wieslander, H and Harrison, PJ and Sintorn, IM and Sabirsh, A and Wählby, C and Spjuth, O and Hellander, A}, title = {Rapid development of cloud-native intelligent data pipelines for scientific data streams using the HASTE Toolkit.}, journal = {GigaScience}, volume = {10}, number = {3}, pages = {}, pmid = {33739401}, issn = {2047-217X}, mesh = {*Biological Science Disciplines ; Diagnostic Imaging ; *Software ; }, abstract = {BACKGROUND: Large streamed datasets, characteristic of life science applications, are often resource-intensive to process, transport and store. We propose a pipeline model, a design pattern for scientific pipelines, where an incoming stream of scientific data is organized into a tiered or ordered "data hierarchy". We introduce the HASTE Toolkit, a proof-of-concept cloud-native software toolkit based on this pipeline model, to partition and prioritize data streams to optimize use of limited computing resources.

FINDINGS: In our pipeline model, an "interestingness function" assigns an interestingness score to data objects in the stream, inducing a data hierarchy. From this score, a "policy" guides decisions on how to prioritize computational resource use for a given object. The HASTE Toolkit is a collection of tools to adopt this approach. We evaluate with 2 microscopy imaging case studies. The first is a high content screening experiment, where images are analyzed in an on-premise container cloud to prioritize storage and subsequent computation. The second considers edge processing of images for upload into the public cloud for real-time control of a transmission electron microscope.

CONCLUSIONS: Through our evaluation, we created smart data pipelines capable of effective use of storage, compute, and network resources, enabling more efficient data-intensive experiments. We note a beneficial separation between scientific concerns of data priority, and the implementation of this behaviour for different resources in different deployment contexts. The toolkit allows intelligent prioritization to be `bolted on' to new and existing systems - and is intended for use with a range of technologies in different deployment scenarios.}, } @article {pmid33737518, year = {2021}, author = {Kumar, D}, title = {Urban objects detection from C-band synthetic aperture radar (SAR) satellite images through simulating filter properties.}, journal = {Scientific reports}, volume = {11}, number = {1}, pages = {6241}, pmid = {33737518}, issn = {2045-2322}, abstract = {Satellite-based remote sensing has a key role in the monitoring earth features, but due to flaws like cloud penetration capability and selective duration for remote sensing in traditional remote sensing methods, now the attention has shifted towards the use of alternative methods such as microwave or radar sensing technology. Microwave remote sensing utilizes synthetic aperture radar (SAR) technology for remote sensing and it can operate in all weather conditions. Previous researchers have reported about effects of SAR pre-processing for urban objects detection and mapping. Preparing high accuracy urban maps are critical to disaster planning and response efforts, thus result from this study can help to users on the required pre-processing steps and its effects. Owing to the induced errors (such as calibration, geometric, speckle noise) in the radar images, these images are affected by several distortions, therefore these distortions need to be processed before any applications, as it causes issues in image interpretation and these can destroy valuable information about shapes, size, pattern and tone of various desired objects. The present work aims to utilize the sentinel-1 SAR datasets for urban studies (i.e. urban object detection through simulation of filter properties). The work uses C-band SAR datasets acquired from Sentinel-1A/B sensor, and the Google Earth datasets to validate the recognized objects. It was observed that the Refined-Lee filter performed well to provide detailed information about the various urban objects. It was established that the attempted approach cannot be generalised as one suitable method for sensing or identifying accurate urban objects from the C-band SAR images. Hence some more datasets in different polarisation combinations are required to be attempted.}, } @article {pmid33733530, year = {2021}, author = {Chandak, T and Wong, CF}, title = {EDock-ML: A web server for using ensemble docking with machine learning to aid drug discovery.}, journal = {Protein science : a publication of the Protein Society}, volume = {30}, number = {5}, pages = {1087-1097}, pmid = {33733530}, issn = {1469-896X}, support = {R15 CA224033/CA/NCI NIH HHS/United States ; }, mesh = {*Databases, Chemical ; *Drug Discovery ; *Internet ; *Machine Learning ; *Molecular Docking Simulation ; *Software ; }, abstract = {EDock-ML is a web server that facilitates the use of ensemble docking with machine learning to help decide whether a compound is worthwhile to be considered further in a drug discovery process. Ensemble docking provides an economical way to account for receptor flexibility in molecular docking. Machine learning improves the use of the resulting docking scores to evaluate whether a compound is likely to be useful. EDock-ML takes a bottom-up approach in which machine-learning models are developed one protein at a time to improve predictions for the proteins included in its database. Because the machine-learning models are intended to be used without changing the docking and model parameters with which the models were trained, novice users can use it directly without worrying about what parameters to choose. A user simply submits a compound specified by an ID from the ZINC database (Sterling, T.; Irwin, J. J., J Chem Inf Model 2015, 55[11], 2,324-2,337.) or upload a file prepared by a chemical drawing program and receives an output helping the user decide the likelihood of the compound to be active or inactive for a drug target. EDock-ML can be accessed freely at edock-ml.umsl.edu.}, } @article {pmid33732040, year = {2021}, author = {Ali, MA}, title = {Phylotranscriptomic analysis of Dillenia indica L. (Dilleniales, Dilleniaceae) and its systematics implication.}, journal = {Saudi journal of biological sciences}, volume = {28}, number = {3}, pages = {1557-1560}, pmid = {33732040}, issn = {1319-562X}, abstract = {The recent massive development in the next-generation sequencing platforms and bioinformatics tools including cloud based computing have proven extremely useful in understanding the deeper-level phylogenetic relationships of angiosperms. The present phylotranscriptomic analyses address the poorly known evolutionary relationships of the order Dilleniales to order of the other angiosperms using the minimum evolution method. The analyses revealed the nesting of the representative taxon of Dilleniales in the MPT but distinct from the representative of the order Santalales, Caryophyllales, Asterales, Cornales, Ericales, Lamiales, Saxifragales, Fabales, Malvales, Vitales and Berberidopsidales.}, } @article {pmid33727760, year = {2021}, author = {Bandara, E and Liang, X and Foytik, P and Shetty, S and Hall, C and Bowden, D and Ranasinghe, N and De Zoysa, K}, title = {A blockchain empowered and privacy preserving digital contact tracing platform.}, journal = {Information processing & management}, volume = {58}, number = {4}, pages = {102572}, pmid = {33727760}, issn = {0306-4573}, abstract = {The spread of the COVID-19 virus continues to increase fatality rates and exhaust the capacity of healthcare providers. Efforts to prevent transmission of the virus among humans remains a high priority. The current efforts to quarantine involve social distancing, monitoring and tracking the infected patients. However, the spread of the virus is too rapid to be contained only by manual and inefficient human contact tracing activities. To address this challenge, we have developed Connect, a blockchain empowered digital contact tracing platform that can leverage information on positive cases and notify people in their immediate proximity which would thereby reduce the rate at which the infection could spread. This would particularly be effective if sufficient people use the platform and benefit from the targeted recommendations. The recommendations would be made in a privacy-preserving fashion and contain the spread of the virus without the need for an extended period of potential lockdown. Connect is an identity wallet platform which will keep user digital identities and user activity trace data on a blockchain platform using Self-Sovereign Identity(SSI) proofs. User activities include the places he/she has travelled, the country of origin he/she came from, travel and dispatch updates from the airport etc. With these activity trace records, Connect platform can easily identify suspected patients who may be infected with the COVID-19 virus and take precautions before spreading it. By storing digital identities and activity trace records on blockchain-based SSI platform, Connect addresses the common issues in centralized cloud-based storage platforms (e.g. lack of data immutability, lack of traceability).}, } @article {pmid33724836, year = {2021}, author = {Olivella, R and Chiva, C and Serret, M and Mancera, D and Cozzuto, L and Hermoso, A and Borràs, E and Espadas, G and Morales, J and Pastor, O and Solé, A and Ponomarenko, J and Sabidó, E}, title = {QCloud2: An Improved Cloud-based Quality-Control System for Mass-Spectrometry-based Proteomics Laboratories.}, journal = {Journal of proteome research}, volume = {20}, number = {4}, pages = {2010-2013}, doi = {10.1021/acs.jproteome.0c00853}, pmid = {33724836}, issn = {1535-3907}, mesh = {*Cloud Computing ; Laboratories ; Mass Spectrometry ; *Proteomics ; Quality Control ; Reproducibility of Results ; Software ; }, abstract = {QCloud is a cloud-based system to support proteomics laboratories in daily quality assessment using a user-friendly interface, easy setup, and automated data processing. Since its release, QCloud has facilitated automated quality control for proteomics experiments in many laboratories. QCloud provides a quick and effortless evaluation of instrument performance that helps to overcome many analytical challenges derived from clinical and translational research. Here we present an improved version of the system, QCloud2. This new version includes enhancements in the scalability and reproducibility of the quality-control pipelines, and it features an improved front end for data visualization, user management, and chart annotation. The QCloud2 system also includes programmatic access and a standalone local version.}, } @article {pmid33719569, year = {2021}, author = {Tanwar, AS and Evangelatos, N and Venne, J and Ogilvie, LA and Satyamoorthy, K and Brand, A}, title = {Global Open Health Data Cooperatives Cloud in an Era of COVID-19 and Planetary Health.}, journal = {Omics : a journal of integrative biology}, volume = {25}, number = {3}, pages = {169-175}, doi = {10.1089/omi.2020.0134}, pmid = {33719569}, issn = {1557-8100}, mesh = {*Big Data ; COVID-19/*epidemiology/virology ; *Cloud Computing ; Delivery of Health Care ; *Global Health ; High-Throughput Nucleotide Sequencing ; Humans ; *Information Dissemination ; *International Cooperation ; *SARS-CoV-2/genetics ; }, abstract = {Big data in both the public domain and the health care industry are growing rapidly, for example, with broad availability of next-generation sequencing and large-scale phenomics datasets on patient-reported outcomes. In parallel, we are witnessing new research approaches that demand sharing of data for the benefit of planetary society. Health data cooperatives (HDCs) is one such approach, where health data are owned and governed collectively by citizens who take part in the HDCs. Data stored in HDCs should remain readily available for translation to public health practice but at the same time, governed in a critically informed manner to ensure data integrity, veracity, and privacy, to name a few pressing concerns. As a solution, we suggest that data generated from high-throughput omics research and phenomics can be stored in an open cloud platform so that researchers around the globe can share health data and work collaboratively. We describe here the Global Open Health Data Cooperatives Cloud (GOHDCC) as a proposed cloud platform-based model for the sharing of health data between different HDCCs around the globe. GOHDCC's main objective is to share health data on a global scale for robust and responsible global science, research, and development. GOHDCC is a citizen-oriented model cooperatively governed by citizens. The model essentially represents a global sharing platform that could benefit all stakeholders along the health care value chain.}, } @article {pmid33713354, year = {2021}, author = {Paredes-Pacheco, J and López-González, FJ and Silva-Rodríguez, J and Efthimiou, N and Niñerola-Baizán, A and Ruibal, Á and Roé-Vellvé, N and Aguiar, P}, title = {SimPET-An open online platform for the Monte Carlo simulation of realistic brain PET data. Validation for [18] F-FDG scans.}, journal = {Medical physics}, volume = {48}, number = {5}, pages = {2482-2493}, pmid = {33713354}, issn = {2473-4209}, support = {FPU16/05108//Ministerio de Educación, Cultura y Deporte (MECD)/ ; FPU17/04470//Ministerio de Educación, Cultura y Deporte (MECD)/ ; EAPA_791/2018//European Commission (EC)/ ; RYC-2015/17430//MEC | Consejo Superior de Investigaciones Científicas (CSIC)/ ; }, mesh = {Algorithms ; Brain/diagnostic imaging ; *Fluorodeoxyglucose F18 ; Humans ; Image Processing, Computer-Assisted ; Monte Carlo Method ; *Positron Emission Tomography Computed Tomography ; Positron-Emission Tomography ; }, abstract = {PURPOSE: SimPET (www.sim-pet.org) is a free cloud-based platform for the generation of realistic brain positron emission tomography (PET) data. In this work, we introduce the key features of the platform. In addition, we validate the platform by performing a comparison between simulated healthy brain FDG-PET images and real healthy subject data for three commercial scanners (GE Advance NXi, GE Discovery ST, and Siemens Biograph mCT).

METHODS: The platform provides a graphical user interface to a set of automatic scripts taking care of the code execution for the phantom generation, simulation (SimSET), and tomographic image reconstruction (STIR). We characterize the performance using activity and attenuation maps derived from PET/CT and MRI data of 25 healthy subjects acquired with a GE Discovery ST. We then use the created maps to generate synthetic data for the GE Discovery ST, the GE Advance NXi, and the Siemens Biograph mCT. The validation was carried out by evaluating Bland-Altman differences between real and simulated images for each scanner. In addition, SPM voxel-wise comparison was performed to highlight regional differences. Examples for amyloid PET and for the generation of ground-truth pathological patients are included.

RESULTS: The platform can be efficiently used for generating realistic simulated FDG-PET images in a reasonable amount of time. The validation showed small differences between SimPET and acquired FDG-PET images, with errors below 10% for 98.09% (GE Discovery ST), 95.09% (GE Advance NXi), and 91.35% (Siemens Biograph mCT) of the voxels. Nevertheless, our SPM analysis showed significant regional differences between the simulated images and real healthy patients, and thus, the use of the platform for converting control subject databases between different scanners requires further investigation.

CONCLUSIONS: The presented platform can potentially allow scientists in clinical and research settings to perform MC simulation experiments without the need for high-end hardware or advanced computing knowledge and in a reasonable amount of time.}, } @article {pmid33711538, year = {2021}, author = {Wang, X and Jiang, X and Vaidya, J}, title = {Efficient verification for outsourced genome-wide association studies.}, journal = {Journal of biomedical informatics}, volume = {117}, number = {}, pages = {103714}, pmid = {33711538}, issn = {1532-0480}, support = {R01 GM114612/GM/NIGMS NIH HHS/United States ; R01 GM118574/GM/NIGMS NIH HHS/United States ; R35 GM134927/GM/NIGMS NIH HHS/United States ; U01 TR002062/TR/NCATS NIH HHS/United States ; }, mesh = {Algorithms ; *Cloud Computing ; *Genome-Wide Association Study ; Humans ; Phenotype ; Polymorphism, Single Nucleotide ; }, abstract = {With cloud computing is being widely adopted in conducting genome-wide association studies (GWAS), how to verify the integrity of outsourced GWAS computation remains to be accomplished. Here, we propose two novel algorithms to generate synthetic SNPs that are indistinguishable from real SNPs. The first method creates synthetic SNPs based on the phenotype vector, while the second approach creates synthetic SNPs based on real SNPs that are most similar to the phenotype vector. The time complexity of the first approach and the second approach is Om and Omlogn[2], respectively, where m is the number of subjects while n is the number of SNPs. Furthermore, through a game theoretic analysis, we demonstrate that it is possible to incentivize honest behavior by the server by coupling appropriate payoffs with randomized verification. We conduct extensive experiments of our proposed methods, and the results show that beyond a formal adversarial model, when only a few synthetic SNPs are generated and mixed into the real data they cannot be distinguished from the real SNPs even by a variety of predictive machine learning models. We demonstrate that the proposed approach can ensure that logistic regression for GWAS can be outsourced in an efficient and trustworthy way.}, } @article {pmid33693476, year = {2021}, author = {Bahmani, A and Xing, Z and Krishnan, V and Ray, U and Mueller, F and Alavi, A and Tsao, PS and Snyder, MP and Pan, C}, title = {Hummingbird: efficient performance prediction for executing genomic applications in the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {17}, pages = {2537-2543}, pmid = {33693476}, issn = {1367-4811}, support = {P50 HG007735/HG/NHGRI NIH HHS/United States ; RM1 HG007735/HG/NHGRI NIH HHS/United States ; U24 HG009397/HG/NHGRI NIH HHS/United States ; }, abstract = {MOTIVATION: A major drawback of executing genomic applications on cloud computing facilities is the lack of tools to predict which instance type is the most appropriate, often resulting in an over- or under- matching of resources. Determining the right configuration before actually running the applications will save money and time. Here, we introduce Hummingbird, a tool for predicting performance of computing instances with varying memory and CPU on multiple cloud platforms.

RESULTS: Our experiments on three major genomic data pipelines, including GATK HaplotypeCaller, GATK Mutect2 and ENCODE ATAC-seq, showed that Hummingbird was able to address applications in command line specified in JSON format or workflow description language (WDL) format, and accurately predicted the fastest, the cheapest and the most cost-efficient compute instances in an economic manner.

Hummingbird is available as an open source tool at: https://github.com/StanfordBioinformatics/Hummingbird.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid33693426, year = {2020}, author = {Wang, M and Yang, T and Flechas, MA and Harris, P and Hawks, B and Holzman, B and Knoepfel, K and Krupa, J and Pedro, K and Tran, N}, title = {GPU-Accelerated Machine Learning Inference as a Service for Computing in Neutrino Experiments.}, journal = {Frontiers in big data}, volume = {3}, number = {}, pages = {604083}, pmid = {33693426}, issn = {2624-909X}, abstract = {Machine learning algorithms are becoming increasingly prevalent and performant in the reconstruction of events in accelerator-based neutrino experiments. These sophisticated algorithms can be computationally expensive. At the same time, the data volumes of such experiments are rapidly increasing. The demand to process billions of neutrino events with many machine learning algorithm inferences creates a computing challenge. We explore a computing model in which heterogeneous computing with GPU coprocessors is made available as a web service. The coprocessors can be efficiently and elastically deployed to provide the right amount of computing for a given processing task. With our approach, Services for Optimized Network Inference on Coprocessors (SONIC), we integrate GPU acceleration specifically for the ProtoDUNE-SP reconstruction chain without disrupting the native computing workflow. With our integrated framework, we accelerate the most time-consuming task, track and particle shower hit identification, by a factor of 17. This results in a factor of 2.7 reduction in the total processing time when compared with CPU-only production. For this particular task, only 1 GPU is required for every 68 CPU threads, providing a cost-effective solution.}, } @article {pmid33693420, year = {2020}, author = {Qayyum, A and Ijaz, A and Usama, M and Iqbal, W and Qadir, J and Elkhatib, Y and Al-Fuqaha, A}, title = {Securing Machine Learning in the Cloud: A Systematic Review of Cloud Machine Learning Security.}, journal = {Frontiers in big data}, volume = {3}, number = {}, pages = {587139}, pmid = {33693420}, issn = {2624-909X}, abstract = {With the advances in machine learning (ML) and deep learning (DL) techniques, and the potency of cloud computing in offering services efficiently and cost-effectively, Machine Learning as a Service (MLaaS) cloud platforms have become popular. In addition, there is increasing adoption of third-party cloud services for outsourcing training of DL models, which requires substantial costly computational resources (e.g., high-performance graphics processing units (GPUs)). Such widespread usage of cloud-hosted ML/DL services opens a wide range of attack surfaces for adversaries to exploit the ML/DL system to achieve malicious goals. In this article, we conduct a systematic evaluation of literature of cloud-hosted ML/DL models along both the important dimensions-attacks and defenses-related to their security. Our systematic review identified a total of 31 related articles out of which 19 focused on attack, six focused on defense, and six focused on both attack and defense. Our evaluation reveals that there is an increasing interest from the research community on the perspective of attacking and defending different attacks on Machine Learning as a Service platforms. In addition, we identify the limitations and pitfalls of the analyzed articles and highlight open research issues that require further investigation.}, } @article {pmid33691446, year = {2021}, author = {Cai, Y and Zeng, M and Chen, YZ}, title = {The pharmacological mechanism of Huashi Baidu Formula for the treatment of COVID-19 by combined network pharmacology and molecular docking.}, journal = {Annals of palliative medicine}, volume = {10}, number = {4}, pages = {3864-3895}, doi = {10.21037/apm-20-1759}, pmid = {33691446}, issn = {2224-5839}, mesh = {*COVID-19 ; *Drugs, Chinese Herbal/pharmacology/therapeutic use ; Humans ; Molecular Docking Simulation ; SARS-CoV-2 ; }, abstract = {BACKGROUND: Huashi Baidu Formula (HSBDF) is a traditional Chinese medicine formula consisting of fourteen parts, which has been proven effective for treating coronavirus disease 2019 (COVID-19) clinically. However, the therapeutic mechanism of the effect of HSBDF on COVID-19 remains unclear.

METHODS: The components and action targets of HSBDF were searched in the TCMSP, YaTCM, PubChem, and TargetNet databases. Disease targets related to ACE2 were screened in single-cell sequence data of colon epithelial cells from other reports. The therapeutic targets of HSBDF for COVID-19 were obtained by integrated analysis, and the protein-protein interaction was analyzed using the STRING database. The Gene Ontology (GO) and Kyoto Encyclopedia of Genes and Genomes (KEGG) processes were analyzed using the OmicsBean and Metascape databases. The communication between networks [component-target (C-T) network, component-target-pathway (C-T-P) network, herb-target (H-T) network, target-pathway (T-P) network, and meridian-tropism (M-T) network] was constructed by Cytoscape software. The Cloud computing molecular docking platform was used to verify the molecular docking.

RESULTS: The obtained 223 active ingredients and 358 targets of HSBDF. The 5,555 COVID-19 disease targets related to ACE2 were extracted, and 84 compound-disease common targets were found, of which the principal targets included ACE, ESR1, ADRA1A, and HDAC1. A total of 3,946 items were seized by GO enrichment analysis, mainly related to metabolism, protein binding, cellular response to the stimulus, and receptor activity. The enriched KEGG pathways screened 46 signaling pathways, including the renin-angiotensin system, the renin secretion, NF-kappa B pathway, the arachidonic acid metabolism, and the AMPK signaling pathway. The molecular docking results showed that the bioactive components of HSBDF have an excellent binding ability with main proteins related to severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2).

CONCLUSIONS: HSBDF might act on SARS-CoV-2 through multiple components, targets, and pathways. Here we reveal preliminary results of the mechanism of action of HSBDF on SARS-CoV-2, providing a theoretical basis for future clinical applications.}, } @article {pmid33686319, year = {2021}, author = {Qi, Q and Tao, F and Cheng, Y and Cheng, J and Nee, AYC}, title = {New IT driven rapid manufacturing for emergency response.}, journal = {Journal of manufacturing systems}, volume = {60}, number = {}, pages = {928-935}, pmid = {33686319}, issn = {1878-6642}, abstract = {COVID-19, which is rampant around the world, has seriously disrupted people's normal work and living. To respond to public urgent needs such as COVID-19, emergency supplies are essential. However, due to the special requirements of supplies, when an emergency occurs, the supply reserve mostly cannot cope with the high demand. Given the importance of emergency supplies in public emergencies, rapid response manufacturing of emergency supplies is a necessity. The faster emergency supplies and facilities are manufactured, the more likely the pandemic can be controlled and the more human lives are saved. Besides, new generation information technology represented by cloud computing, IoT, big data, AI, etc. is rapidly developing and can be widely used to address such situations. Therefore, rapid response manufacturing enabled by New IT is presented to quickly meet emergency demands. And some policy suggestions are presented.}, } @article {pmid33681063, year = {2020}, author = {Jha, RR and Verma, RK and Kishore, A and Rana, RK and Barnwal, RK and Singh, HK and Kumar, D}, title = {Mapping fear among doctors manning screening clinics for COVID19. Results from cloud based survey in Eastern parts of India.}, journal = {Journal of family medicine and primary care}, volume = {9}, number = {12}, pages = {6194-6200}, pmid = {33681063}, issn = {2249-4863}, abstract = {BACKGROUND: As the number of cases of COVID19 from novel corona virus 2019 rises so are the number of deaths ensuing from it. Doctors have been in front in these calamitous times across the world. India has less number of doctors so doctors are overwhelmed with more number of patients to cater. Thereby they are also fearing that they will be exposed much as they often work in limited resource settings.

METHODS: An on line survey was to include doctors from eastern states in India for measuring the reasons of their fear and suggest possible solutions based on the results achieved thus. After IEC clearance a semi-structured anonymous questionnaire was sent on google forms as links on known to doctors, working in screening OPDs or flu clinics especially for COVID-19.

RESULTS: Out of 59 Doctors majority were provided with sanitizers for practicing hand hygiene. Gloves were provided everywhere but masks particularly N95 and Triple Layer surgical masks were not there for all. Training was not given universally. Fear was dependent on age in our sample.

CONCLUSION: Training and strict adherence to infection control measures along with resources can help in removing the fear.}, } @article {pmid33676373, year = {2021}, author = {Augustyn, DR and Wyciślik, Ł and Mrozek, D}, title = {Perspectives of using Cloud computing in integrative analysis of multi-omics data.}, journal = {Briefings in functional genomics}, volume = {20}, number = {4}, pages = {198-206}, doi = {10.1093/bfgp/elab007}, pmid = {33676373}, issn = {2041-2657}, support = {02/020/RGPL9/0184//Rector of the Silesian University of Technology, Gliwice, Poland/ ; 02/100/BK_21/0008//Statutory Research funds of Department of Applied Informatics, Silesian University of Technology, Gliwice, Poland/ ; }, mesh = {*Cloud Computing ; Data Analysis ; *Models, Theoretical ; }, abstract = {Integrative analysis of multi-omics data is usually computationally demanding. It frequently requires building complex, multi-step analysis pipelines, applying dedicated techniques for data processing and combining several data sources. These efforts lead to a better understanding of life processes, current health state or the effects of therapeutic activities. However, many omics data analysis solutions focus only on a selected problem, disease, types of data or organisms. Moreover, they are implemented for general-purpose scientific computational platforms that most often do not easily scale the calculations natively. These features are not conducive to advances in understanding genotype-phenotypic relationships. Fortunately, with new technological paradigms, including Cloud computing, virtualization and containerization, these functionalities could be orchestrated for easy scaling and building independent analysis pipelines for omics data. Therefore, solutions can be re-used for purposes that they were not primarily designed. This paper shows perspectives of using Cloud computing advances and containerization approach for such a purpose. We first review how the Cloud computing model is utilized in multi-omics data analysis and show weak points of the adopted solutions. Then, we introduce containerization concepts, which allow both scaling and linking of functional services designed for various purposes. Finally, on the Bioconductor software package example, we disclose a verified concept model of a universal solution that exhibits the potentials for performing integrative analysis of multiple omics data sources.}, } @article {pmid33672768, year = {2021}, author = {Hossain, MD and Sultana, T and Hossain, MA and Hossain, MI and Huynh, LNT and Park, J and Huh, EN}, title = {Fuzzy Decision-Based Efficient Task Offloading Management Scheme in Multi-Tier MEC-Enabled Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33672768}, issn = {1424-8220}, support = {No.2017-0-00294//Institute for Information & Communications Technology Planning & Evaluation/ ; }, abstract = {Multi-access edge computing (MEC) is a new leading technology for meeting the demands of key performance indicators (KPIs) in 5G networks. However, in a rapidly changing dynamic environment, it is hard to find the optimal target server for processing offloaded tasks because we do not know the end users' demands in advance. Therefore, quality of service (QoS) deteriorates because of increasing task failures and long execution latency from congestion. To reduce latency and avoid task failures from resource-constrained edge servers, vertical offloading between mobile devices with local-edge collaboration or with local edge-remote cloud collaboration have been proposed in previous studies. However, they ignored the nearby edge server in the same tier that has excess computing resources. Therefore, this paper introduces a fuzzy decision-based cloud-MEC collaborative task offloading management system called FTOM, which takes advantage of powerful remote cloud-computing capabilities and utilizes neighboring edge servers. The main objective of the FTOM scheme is to select the optimal target node for task offloading based on server capacity, latency sensitivity, and the network's condition. Our proposed scheme can make dynamic decisions where local or nearby MEC servers are preferred for offloading delay-sensitive tasks, and delay-tolerant high resource-demand tasks are offloaded to a remote cloud server. Simulation results affirm that our proposed FTOM scheme significantly improves the rate of successfully executing offloaded tasks by approximately 68.5%, and reduces task completion time by 66.6%, when compared with a local edge offloading (LEO) scheme. The improved and reduced rates are 32.4% and 61.5%, respectively, when compared with a two-tier edge orchestration-based offloading (TTEO) scheme. They are 8.9% and 47.9%, respectively, when compared with a fuzzy orchestration-based load balancing (FOLB) scheme, approximately 3.2% and 49.8%, respectively, when compared with a fuzzy workload orchestration-based task offloading (WOTO) scheme, and approximately 38.6%% and 55%, respectively, when compared with a fuzzy edge-orchestration based collaborative task offloading (FCTO) scheme.}, } @article {pmid33671542, year = {2021}, author = {Choi, J and Ahn, S}, title = {Optimal Service Provisioning for the Scalable Fog/Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33671542}, issn = {1424-8220}, support = {NRF-2018R1D1A1B07047339//National Research Foundation of Korea/ ; }, abstract = {In recent years, we observed the proliferation of cloud data centers (CDCs) and the Internet of Things (IoT). Cloud computing based on CDCs has the drawback of unpredictable response times due to variant delays between service requestors (IoT devices and end devices) and CDCs. This deficiency of cloud computing is especially problematic in providing IoT services with strict timing requirements and as a result, gives birth to fog/edge computing (FEC) whose responsiveness is achieved by placing service images near service requestors. In FEC, the computing nodes located close to service requestors are called fog/edge nodes (FENs). In addition, for an FEN to execute a specific service, it has to be provisioned with the corresponding service image. Most of the previous work on the service provisioning in the FEC environment deals with determining an appropriate FEN satisfying the requirements like delay, CPU and storage from the perspective of one or more service requests. In this paper, we determined how to optimally place service images in consideration of the pre-obtained service demands which may be collected during the prior time interval. The proposed FEC environment is scalable in the sense that the resources of FENs are effectively utilized thanks to the optimal provisioning of services on FENs. We propose two approaches to provision service images on FENs. In order to validate the performance of the proposed mechanisms, intensive simulations were carried out for various service demand scenarios.}, } @article {pmid33671281, year = {2021}, author = {Adnan, M and Iqbal, J and Waheed, A and Amin, NU and Zareei, M and Goudarzi, S and Umer, A}, title = {On the Design of Efficient Hierarchic Architecture for Software Defined Vehicular Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33671281}, issn = {1424-8220}, support = {grant ID: GGPM-2020-029 and grant ID: PP-FTSM-2020//The Ministry of Higher Education Malaysia and Universiti Kebangsaan Malaysia/ ; }, abstract = {Modern vehicles are equipped with various sensors, onboard units, and devices such as Application Unit (AU) that support routing and communication. In VANETs, traffic management and Quality of Service (QoS) are the main research dimensions to be considered while designing VANETs architectures. To cope with the issues of QoS faced by the VANETs, we design an efficient SDN-based architecture where we focus on the QoS of VANETs. In this paper, QoS is achieved by a priority-based scheduling algorithm in which we prioritize traffic flow messages in the safety queue and non-safety queue. In the safety queue, the messages are prioritized based on deadline and size using the New Deadline and Size of data method (NDS) with constrained location and deadline. In contrast, the non-safety queue is prioritized based on First Come First Serve (FCFS) method. For the simulation of our proposed scheduling algorithm, we use a well-known cloud computing framework CloudSim toolkit. The simulation results of safety messages show better performance than non-safety messages in terms of execution time.}, } @article {pmid33671142, year = {2021}, author = {Fang, J and Shi, J and Lu, S and Zhang, M and Ye, Z}, title = {An Efficient Computation Offloading Strategy with Mobile Edge Computing for IoT.}, journal = {Micromachines}, volume = {12}, number = {2}, pages = {}, pmid = {33671142}, issn = {2072-666X}, support = {61202076//National Natural Science Foundation of China/ ; 4192007//Beijing Natural Science Foundation/ ; }, abstract = {With the rapidly development of mobile cloud computing (MCC), the Internet of Things (IoT), and artificial intelligence (AI), user equipment (UEs) are facing explosive growth. In order to effectively solve the problem that UEs may face with insufficient capacity when dealing with computationally intensive and delay sensitive applications, we take Mobile Edge Computing (MEC) of the IoT as the starting point and study the computation offloading strategy of UEs. First, we model the application generated by UEs as a directed acyclic graph (DAG) to achieve fine-grained task offloading scheduling, which makes the parallel processing of tasks possible and speeds up the execution efficiency. Then, we propose a multi-population cooperative elite algorithm (MCE-GA) based on the standard genetic algorithm, which can solve the offloading problem for tasks with dependency in MEC to minimize the execution delay and energy consumption of applications. Experimental results show that MCE-GA has better performance compared to the baseline algorithms. To be specific, the overhead reduction by MCE-GA can be up to 72.4%, 38.6%, and 19.3%, respectively, which proves the effectiveness and reliability of MCE-GA.}, } @article {pmid33670040, year = {2021}, author = {Shang, M and Luo, J}, title = {The Tapio Decoupling Principle and Key Strategies for Changing Factors of Chinese Urban Carbon Footprint Based on Cloud Computing.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {4}, pages = {}, pmid = {33670040}, issn = {1660-4601}, mesh = {Carbon/analysis ; *Carbon Footprint ; China ; Cities ; Cloud Computing ; Economic Development ; *Ecosystem ; }, abstract = {The expansion of Xi'an City has caused the consumption of energy and land resources, leading to serious environmental pollution problems. For this purpose, this study was carried out to measure the carbon carrying capacity, net carbon footprint and net carbon footprint pressure index of Xi'an City, and to characterize the carbon sequestration capacity of Xi'an ecosystem, thereby laying a foundation for developing comprehensive and reasonable low-carbon development measures. This study expects to provide a reference for China to develop a low-carbon economy through Tapio decoupling principle. The decoupling relationship between CO2 and driving factors was explored through Tapio decoupling model. The time-series data was used to calculate the carbon footprint. The auto-encoder in deep learning technology was combined with the parallel algorithm in cloud computing. A general multilayer perceptron neural network realized by a parallel BP learning algorithm was proposed based on Map-Reduce on a cloud computing cluster. A partial least squares (PLS) regression model was constructed to analyze driving factors. The results show that in terms of city size, the variable importance in projection (VIP) output of the urbanization rate has a strong inhibitory effect on carbon footprint growth, and the VIP value of permanent population ranks the last; in terms of economic development, the impact of fixed asset investment and added value of the secondary industry on carbon footprint ranks third and fourth. As a result, the marginal effect of carbon footprint is greater than that of economic growth after economic growth reaches a certain stage, revealing that the driving forces and mechanisms can promote the growth of urban space.}, } @article {pmid33668282, year = {2021}, author = {Goyal, S and Bhushan, S and Kumar, Y and Rana, AUHS and Bhutta, MR and Ijaz, MF and Son, Y}, title = {An Optimized Framework for Energy-Resource Allocation in A Cloud Environment based on the Whale Optimization Algorithm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33668282}, issn = {1424-8220}, support = {2020R1C1C1003425//National Research Foundation of Korea/ ; }, abstract = {Cloud computing offers the services to access, manipulate and configure data online over the web. The cloud term refers to an internet network which is remotely available and accessible at anytime from anywhere. Cloud computing is undoubtedly an innovation as the investment in the real and physical infrastructure is much greater than the cloud technology investment. The present work addresses the issue of power consumption done by cloud infrastructure. As there is a need for algorithms and techniques that can reduce energy consumption and schedule resource for the effectiveness of servers. Load balancing is also a significant part of cloud technology that enables the balanced distribution of load among multiple servers to fulfill users' growing demand. The present work used various optimization algorithms such as particle swarm optimization (PSO), cat swarm optimization (CSO), BAT, cuckoo search algorithm (CSA) optimization algorithm and the whale optimization algorithm (WOA) for balancing the load, energy efficiency, and better resource scheduling to make an efficient cloud environment. In the case of seven servers and eight server's settings, the results revealed that whale optimization algorithm outperformed other algorithms in terms of response time, energy consumption, execution time and throughput.}, } @article {pmid33664984, year = {2020}, author = {Stevens, L and Kao, D and Hall, J and Görg, C and Abdo, K and Linstead, E}, title = {ML-MEDIC: A Preliminary Study of an Interactive Visual Analysis Tool Facilitating Clinical Applications of Machine Learning for Precision Medicine.}, journal = {Applied sciences (Basel, Switzerland)}, volume = {10}, number = {9}, pages = {}, pmid = {33664984}, issn = {2076-3417}, support = {T15 LM009451/LM/NLM NIH HHS/United States ; }, abstract = {Accessible interactive tools that integrate machine learning methods with clinical research and reduce the programming experience required are needed to move science forward. Here, we present Machine Learning for Medical Exploration and Data-Inspired Care (ML-MEDIC), a point-and-click, interactive tool with a visual interface for facilitating machine learning and statistical analyses in clinical research. We deployed ML-MEDIC in the American Heart Association (AHA) Precision Medicine Platform to provide secure internet access and facilitate collaboration. ML-MEDIC's efficacy for facilitating the adoption of machine learning was evaluated through two case studies in collaboration with clinical domain experts. A domain expert review was also conducted to obtain an impression of the usability and potential limitations.}, } @article {pmid33664272, year = {2021}, author = {Shiff, S and Helman, D and Lensky, IM}, title = {Worldwide continuous gap-filled MODIS land surface temperature dataset.}, journal = {Scientific data}, volume = {8}, number = {1}, pages = {74}, pmid = {33664272}, issn = {2052-4463}, support = {203-1184-19//Ministry of Agriculture and Rural Development (Israeli Ministry of Agriculture and Rural Development)/ ; }, abstract = {Satellite land surface temperature (LST) is vital for climatological and environmental studies. However, LST datasets are not continuous in time and space mainly due to cloud cover. Here we combine LST with Climate Forecast System Version 2 (CFSv2) modeled temperatures to derive a continuous gap filled global LST dataset at a spatial resolution of 1 km. Temporal Fourier analysis is used to derive the seasonality (climatology) on a pixel-by-pixel basis, for LST and CFSv2 temperatures. Gaps are filled by adding the CFSv2 temperature anomaly to climatological LST. The accuracy is evaluated in nine regions across the globe using cloud-free LST (mean values: R[2] = 0.93, Root Mean Square Error (RMSE) = 2.7 °C, Mean Absolute Error (MAE) = 2.1 °C). The provided dataset contains day, night, and daily mean LST for the Eastern Mediterranean. We provide a Google Earth Engine code and a web app that generates gap filled LST in any part of the world, alongside a pixel-based evaluation of the data in terms of MAE, RMSE and Pearson's r.}, } @article {pmid35782189, year = {2021}, author = {Liu, J and Miao, F and Yin, L and Pang, Z and Li, Y}, title = {A Noncontact Ballistocardiography-Based IoMT System for Cardiopulmonary Health Monitoring of Discharged COVID-19 Patients.}, journal = {IEEE internet of things journal}, volume = {8}, number = {21}, pages = {15807-15817}, pmid = {35782189}, issn = {2327-4662}, abstract = {We developed a ballistocardiography (BCG)-based Internet-of-Medical-Things (IoMT) system for remote monitoring of cardiopulmonary health. The system composes of BCG sensor, edge node, and cloud platform. To improve computational efficiency and system stability, the system adopted collaborative computing between edge nodes and cloud platforms. Edge nodes undertake signal processing tasks, namely approximate entropy for signal quality assessment, a lifting wavelet scheme for separating the BCG and respiration signal, and the lightweight BCG and respiration signal peaks detection. Heart rate variability (HRV), respiratory rate variability (RRV) analysis and other intelligent computing are performed on cloud platform. In experiments with 25 participants, the proposed method achieved a mean absolute error (MAE)±standard deviation of absolute error (SDAE) of 9.6±8.2 ms for heartbeat intervals detection, and a MAE±SDAE of 22.4±31.1 ms for respiration intervals detection. To study the recovery of cardiopulmonary function in patients with coronavirus disease 2019 (COVID-19), this study recruited 186 discharged patients with COVID-19 and 186 control volunteers. The results indicate that the recovery performance of the respiratory rhythm is better than the heart rhythm among discharged patients with COVID-19. This reminds the patients to be aware of the risk of cardiovascular disease after recovering from COVID-19. Therefore, our remote monitoring system has the ability to play a major role in the follow up and management of discharged patients with COVID-19.}, } @article {pmid33657217, year = {2021}, author = {Figueroa, CA and Aguilera, A and Chakraborty, B and Modiri, A and Aggarwal, J and Deliu, N and Sarkar, U and Jay Williams, J and Lyles, CR}, title = {Adaptive learning algorithms to optimize mobile applications for behavioral health: guidelines for design decisions.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {28}, number = {6}, pages = {1225-1234}, pmid = {33657217}, issn = {1527-974X}, support = {R01 HS025429/HS/AHRQ HHS/United States ; }, mesh = {Algorithms ; Humans ; Machine Learning ; *Mobile Applications ; Reproducibility of Results ; *Telemedicine ; }, abstract = {OBJECTIVE: Providing behavioral health interventions via smartphones allows these interventions to be adapted to the changing behavior, preferences, and needs of individuals. This can be achieved through reinforcement learning (RL), a sub-area of machine learning. However, many challenges could affect the effectiveness of these algorithms in the real world. We provide guidelines for decision-making.

MATERIALS AND METHODS: Using thematic analysis, we describe challenges, considerations, and solutions for algorithm design decisions in a collaboration between health services researchers, clinicians, and data scientists. We use the design process of an RL algorithm for a mobile health study "DIAMANTE" for increasing physical activity in underserved patients with diabetes and depression. Over the 1.5-year project, we kept track of the research process using collaborative cloud Google Documents, Whatsapp messenger, and video teleconferencing. We discussed, categorized, and coded critical challenges. We grouped challenges to create thematic topic process domains.

RESULTS: Nine challenges emerged, which we divided into 3 major themes: 1. Choosing the model for decision-making, including appropriate contextual and reward variables; 2. Data handling/collection, such as how to deal with missing or incorrect data in real-time; 3. Weighing the algorithm performance vs effectiveness/implementation in real-world settings.

CONCLUSION: The creation of effective behavioral health interventions does not depend only on final algorithm performance. Many decisions in the real world are necessary to formulate the design of problem parameters to which an algorithm is applied. Researchers must document and evaulate these considerations and decisions before and during the intervention period, to increase transparency, accountability, and reproducibility.

TRIAL REGISTRATION: clinicaltrials.gov, NCT03490253.}, } @article {pmid33656996, year = {2022}, author = {Huang, Q and Yue, W and Yang, Y and Chen, L}, title = {P2GT: Fine-Grained Genomic Data Access Control With Privacy-Preserving Testing in Cloud Computing.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {19}, number = {4}, pages = {2385-2398}, doi = {10.1109/TCBB.2021.3063388}, pmid = {33656996}, issn = {1557-9964}, mesh = {Algorithms ; *Cloud Computing ; Computer Security ; Genomics ; *Privacy ; }, abstract = {With the rapid development of bioinformatics and the availability of genetic sequencing technologies, genomic data has been used to facilitate personalized medicine. Cloud computing, features as low cost, rich storage and rapid processing can precisely respond to the challenges brought by the emergence of massive genomic data. Considering the security of cloud platform and the privacy of genomic data, we first introduce P2GT which utilizes key-policy attribute-based encryption to realize genomic data access control with unbounded attributes, and employs equality test algorithm to achieve personalized medicine test by matching digitized single nucleotide polymorphisms (SNPs) directly on the users' ciphertext without encrypting multiple times. We then propose an enhanced scheme P2GT+, which adopts identity-based encryption with equality test supporting flexible joint authorization to realize privacy-preserving paternity test, genetic compatibility test and disease susceptibility test over the encrypted SNPs with P2GT. We prove the security of proposed schemes and conduct extensive experiments with the 1,000 Genomes dataset. The results show that P2GT and P2GT+ are practical and scalable enough to meet the privacy-preserving and authorized genetic testing requirements in cloud computing.}, } @article {pmid33656352, year = {2021}, author = {Elgendy, IA and Muthanna, A and Hammoudeh, M and Shaiba, H and Unal, D and Khayyat, M}, title = {Advanced Deep Learning for Resource Allocation and Security Aware Data Offloading in Industrial Mobile Edge Computing.}, journal = {Big data}, volume = {9}, number = {4}, pages = {265-278}, doi = {10.1089/big.2020.0284}, pmid = {33656352}, issn = {2167-647X}, mesh = {Algorithms ; Cloud Computing ; Computer Security ; *Deep Learning ; Resource Allocation ; }, abstract = {The Internet of Things (IoT) is permeating our daily lives through continuous environmental monitoring and data collection. The promise of low latency communication, enhanced security, and efficient bandwidth utilization lead to the shift from mobile cloud computing to mobile edge computing. In this study, we propose an advanced deep reinforcement resource allocation and security-aware data offloading model that considers the constrained computation and radio resources of industrial IoT devices to guarantee efficient sharing of resources between multiple users. This model is formulated as an optimization problem with the goal of decreasing energy consumption and computation delay. This type of problem is non-deterministic polynomial time-hard due to the curse-of-dimensionality challenge, thus, a deep learning optimization approach is presented to find an optimal solution. In addition, a 128-bit Advanced Encryption Standard-based cryptographic approach is proposed to satisfy the data security requirements. Experimental evaluation results show that the proposed model can reduce offloading overhead in terms of energy and time by up to 64.7% in comparison with the local execution approach. It also outperforms the full offloading scenario by up to 13.2%, where it can select some computation tasks to be offloaded while optimally rejecting others. Finally, it is adaptable and scalable for a large number of mobile devices.}, } @article {pmid33655263, year = {2021}, author = {Machi, D and Bhattacharya, P and Hoops, S and Chen, J and Mortveit, H and Venkatramanan, S and Lewis, B and Wilson, M and Fadikar, A and Maiden, T and Barrett, CL and Marathe, MV}, title = {Scalable Epidemiological Workflows to Support COVID-19 Planning and Response.}, journal = {medRxiv : the preprint server for health sciences}, volume = {}, number = {}, pages = {}, doi = {10.1101/2021.02.23.21252325}, pmid = {33655263}, abstract = {The COVID-19 global outbreak represents the most significant epidemic event since the 1918 influenza pandemic. Simulations have played a crucial role in supporting COVID-19 planning and response efforts. Developing scalable workflows to provide policymakers quick responses to important questions pertaining to logistics, resource allocation, epidemic forecasts and intervention analysis remains a challenging computational problem. In this work, we present scalable high performance computing-enabled workflows for COVID-19 pandemic planning and response. The scalability of our methodology allows us to run fine-grained simulations daily, and to generate county-level forecasts and other counter-factual analysis for each of the 50 states (and DC), 3140 counties across the USA. Our workflows use a hybrid cloud/cluster system utilizing a combination of local and remote cluster computing facilities, and using over 20,000 CPU cores running for 6-9 hours every day to meet this objective. Our state (Virginia), state hospital network, our university, the DOD and the CDC use our models to guide their COVID-19 planning and response efforts. We began executing these pipelines March 25, 2020, and have delivered and briefed weekly updates to these stakeholders for over 30 weeks without interruption.}, } @article {pmid33644298, year = {2021}, author = {Abbasi, WA and Abbas, SA and Andleeb, S and Ul Islam, G and Ajaz, SA and Arshad, K and Khalil, S and Anjam, A and Ilyas, K and Saleem, M and Chughtai, J and Abbas, A}, title = {COVIDC: An expert system to diagnose COVID-19 and predict its severity using chest CT scans: Application in radiology.}, journal = {Informatics in medicine unlocked}, volume = {23}, number = {}, pages = {100540}, pmid = {33644298}, issn = {2352-9148}, abstract = {Early diagnosis of Coronavirus disease 2019 (COVID-19) is significantly important, especially in the absence or inadequate provision of a specific vaccine, to stop the surge of this lethal infection by advising quarantine. This diagnosis is challenging as most of the patients having COVID-19 infection stay asymptomatic while others showing symptoms are hard to distinguish from patients having different respiratory infections such as severe flu and Pneumonia. Due to cost and time-consuming wet-lab diagnostic tests for COVID-19, there is an utmost requirement for some alternate, non-invasive, rapid, and discounted automatic screening system. A chest CT scan can effectively be used as an alternative modality to detect and diagnose the COVID-19 infection. In this study, we present an automatic COVID-19 diagnostic and severity prediction system called COVIDC (COVID-19 detection using CT scans) that uses deep feature maps from the chest CT scans for this purpose. Our newly proposed system not only detects COVID-19 but also predicts its severity by using a two-phase classification approach (COVID vs non-COVID, and COVID-19 severity) with deep feature maps and different shallow supervised classification algorithms such as SVMs and random forest to handle data scarcity. We performed a stringent COVIDC performance evaluation not only through 10-fold cross-validation and an external validation dataset but also in a real setting under the supervision of an experienced radiologist. In all the evaluation settings, COVIDC outperformed all the existing state-of-the-art methods designed to detect COVID-19 with an F1 score of 0.94 on the validation dataset and justified its use to diagnose COVID-19 effectively in the real setting by classifying correctly 9 out of 10 COVID-19 CT scans. We made COVIDC openly accessible through a cloud-based webserver and python code available at https://sites.google.com/view/wajidarshad/software and https://github.com/wajidarshad/covidc.}, } @article {pmid33643498, year = {2021}, author = {Smidt, HJ and Jokonya, O}, title = {The challenge of privacy and security when using technology to track people in times of COVID-19 pandemic.}, journal = {Procedia computer science}, volume = {181}, number = {}, pages = {1018-1026}, pmid = {33643498}, issn = {1877-0509}, abstract = {Since the start of the Coronavirus disease 2019 (COVID-19) governments and health authorities across the world have find it very difficult in controlling infections. Digital technologies such as artificial intelligence (AI), big data, cloud computing, blockchain and 5G have effectively improved the efficiency of efforts in epidemic monitoring, virus tracking, prevention, control and treatment. Surveillance to halt COVID-19 has raised privacy concerns, as many governments are willing to overlook privacy implications to save lives. The purpose of this paper is to conduct a focused Systematic Literature Review (SLR), to explore the potential benefits and implications of using digital technologies such as AI, big data and cloud to track COVID-19 amongst people in different societies. The aim is to highlight the risks of security and privacy to personal data when using technology to track COVID-19 in societies and identify ways to govern these risks. The paper uses the SLR approach to examine 40 articles published during 2020, ultimately down selecting to the most relevant 24 studies. In this SLR approach we adopted the following steps; formulated the problem, searched the literature, gathered information from studies, evaluated the quality of studies, analysed and integrated the outcomes of studies while concluding by interpreting the evidence and presenting the results. Papers were classified into different categories such as technology use, impact on society and governance. The study highlighted the challenge for government to balance the need of what is good for public health versus individual privacy and freedoms. The findings revealed that although the use of technology help governments and health agencies reduce the spread of the COVID-19 virus, government surveillance to halt has sparked privacy concerns. We suggest some requirements for government policy to be ethical and capable of commanding the trust of the public and present some research questions for future research.}, } @article {pmid33633531, year = {2021}, author = {Brivio, S and Ly, DRB and Vianello, E and Spiga, S}, title = {Non-linear Memristive Synaptic Dynamics for Efficient Unsupervised Learning in Spiking Neural Networks.}, journal = {Frontiers in neuroscience}, volume = {15}, number = {}, pages = {580909}, pmid = {33633531}, issn = {1662-4548}, abstract = {Spiking neural networks (SNNs) are a computational tool in which the information is coded into spikes, as in some parts of the brain, differently from conventional neural networks (NNs) that compute over real-numbers. Therefore, SNNs can implement intelligent information extraction in real-time at the edge of data acquisition and correspond to a complementary solution to conventional NNs working for cloud-computing. Both NN classes face hardware constraints due to limited computing parallelism and separation of logic and memory. Emerging memory devices, like resistive switching memories, phase change memories, or memristive devices in general are strong candidates to remove these hurdles for NN applications. The well-established training procedures of conventional NNs helped in defining the desiderata for memristive device dynamics implementing synaptic units. The generally agreed requirements are a linear evolution of memristive conductance upon stimulation with train of identical pulses and a symmetric conductance change for conductance increase and decrease. Conversely, little work has been done to understand the main properties of memristive devices supporting efficient SNN operation. The reason lies in the lack of a background theory for their training. As a consequence, requirements for NNs have been taken as a reference to develop memristive devices for SNNs. In the present work, we show that, for efficient CMOS/memristive SNNs, the requirements for synaptic memristive dynamics are very different from the needs of a conventional NN. System-level simulations of a SNN trained to classify hand-written digit images through a spike timing dependent plasticity protocol are performed considering various linear and non-linear plausible synaptic memristive dynamics. We consider memristive dynamics bounded by artificial hard conductance values and limited by the natural dynamics evolution toward asymptotic values (soft-boundaries). We quantitatively analyze the impact of resolution and non-linearity properties of the synapses on the network training and classification performance. Finally, we demonstrate that the non-linear synapses with hard boundary values enable higher classification performance and realize the best trade-off between classification accuracy and required training time. With reference to the obtained results, we discuss how memristive devices with non-linear dynamics constitute a technologically convenient solution for the development of on-line SNN training.}, } @article {pmid33625229, year = {2021}, author = {Bai, J and Bandla, C and Guo, J and Vera Alvarez, R and Bai, M and Vizcaíno, JA and Moreno, P and Grüning, B and Sallou, O and Perez-Riverol, Y}, title = {BioContainers Registry: Searching Bioinformatics and Proteomics Tools, Packages, and Containers.}, journal = {Journal of proteome research}, volume = {20}, number = {4}, pages = {2056-2061}, pmid = {33625229}, issn = {1535-3907}, support = {/WT_/Wellcome Trust/United Kingdom ; 208391/WT_/Wellcome Trust/United Kingdom ; 208391/Z/17/Z/WT_/Wellcome Trust/United Kingdom ; }, mesh = {*Computational Biology ; *Proteomics ; Registries ; Reproducibility of Results ; Software ; }, abstract = {BioContainers is an open-source project that aims to create, store, and distribute bioinformatics software containers and packages. The BioContainers community has developed a set of guidelines to standardize software containers including the metadata, versions, licenses, and software dependencies. BioContainers supports multiple packaging and container technologies such as Conda, Docker, and Singularity. The BioContainers provide over 9000 bioinformatics tools, including more than 200 proteomics and mass spectrometry tools. Here we introduce the BioContainers Registry and Restful API to make containerized bioinformatics tools more findable, accessible, interoperable, and reusable (FAIR). The BioContainers Registry provides a fast and convenient way to find and retrieve bioinformatics tool packages and containers. By doing so, it will increase the use of bioinformatics packages and containers while promoting replicability and reproducibility in research.}, } @article {pmid33621175, year = {2021}, author = {Katakol, S and Elbarashy, B and Herranz, L and van de Weijer, J and Lopez, AM}, title = {Distributed Learning and Inference With Compressed Images.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {30}, number = {}, pages = {3069-3083}, doi = {10.1109/TIP.2021.3058545}, pmid = {33621175}, issn = {1941-0042}, abstract = {Modern computer vision requires processing large amounts of data, both while training the model and/or during inference, once the model is deployed. Scenarios where images are captured and processed in physically separated locations are increasingly common (e.g. autonomous vehicles, cloud computing, smartphones). In addition, many devices suffer from limited resources to store or transmit data (e.g. storage space, channel capacity). In these scenarios, lossy image compression plays a crucial role to effectively increase the number of images collected under such constraints. However, lossy compression entails some undesired degradation of the data that may harm the performance of the downstream analysis task at hand, since important semantic information may be lost in the process. Moreover, we may only have compressed images at training time but are able to use original images at inference time (i.e. test), or vice versa, and in such a case, the downstream model suffers from covariate shift. In this paper, we analyze this phenomenon, with a special focus on vision-based perception for autonomous driving as a paradigmatic scenario. We see that loss of semantic information and covariate shift do indeed exist, resulting in a drop in performance that depends on the compression rate. In order to address the problem, we propose dataset restoration, based on image restoration with generative adversarial networks (GANs). Our method is agnostic to both the particular image compression method and the downstream task; and has the advantage of not adding additional cost to the deployed models, which is particularly important in resource-limited devices. The presented experiments focus on semantic segmentation as a challenging use case, cover a broad range of compression rates and diverse datasets, and show how our method is able to significantly alleviate the negative effects of compression on the downstream visual task.}, } @article {pmid33611874, year = {2021}, author = {Seong, Y and You, SC and Ostropolets, A and Rho, Y and Park, J and Cho, J and Dymshyts, D and Reich, CG and Heo, Y and Park, RW}, title = {Incorporation of Korean Electronic Data Interchange Vocabulary into Observational Medical Outcomes Partnership Vocabulary.}, journal = {Healthcare informatics research}, volume = {27}, number = {1}, pages = {29-38}, pmid = {33611874}, issn = {2093-3681}, support = {//Health Insurance Review and Assessment Service/ ; }, abstract = {OBJECTIVES: We incorporated the Korean Electronic Data Interchange (EDI) vocabulary into Observational Medical Outcomes Partnership (OMOP) vocabulary using a semi-automated process. The goal of this study was to improve the Korean EDI as a standard medical ontology in Korea.

METHODS: We incorporated the EDI vocabulary into OMOP vocabulary through four main steps. First, we improved the current classification of EDI domains and separated medical services into procedures and measurements. Second, each EDI concept was assigned a unique identifier and validity dates. Third, we built a vertical hierarchy between EDI concepts, fully describing child concepts through relationships and attributes and linking them to parent terms. Finally, we added an English definition for each EDI concept. We translated the Korean definitions of EDI concepts using Google.Cloud.Translation.V3, using a client library and manual translation. We evaluated the EDI using 11 auditing criteria for controlled vocabularies.

RESULTS: We incorporated 313,431 concepts from the EDI to the OMOP Standardized Vocabularies. For 10 of the 11 auditing criteria, EDI showed a better quality index within the OMOP vocabulary than in the original EDI vocabulary.

CONCLUSIONS: The incorporation of the EDI vocabulary into the OMOP Standardized Vocabularies allows better standardization to facilitate network research. Our research provides a promising model for mapping Korean medical information into a global standard terminology system, although a comprehensive mapping of official vocabulary remains to be done in the future.}, } @article {pmid33602102, year = {2022}, author = {Rao, PMM and Singh, SK and Khamparia, A and Bhushan, B and Podder, P}, title = {Multi-Class Breast Cancer Classification Using Ensemble of Pretrained models and Transfer Learning.}, journal = {Current medical imaging}, volume = {18}, number = {4}, pages = {409-416}, doi = {10.2174/1573405617666210218101418}, pmid = {33602102}, issn = {1573-4056}, mesh = {Breast ; *Breast Neoplasms/diagnostic imaging ; Female ; Humans ; Machine Learning ; Neural Networks, Computer ; }, abstract = {AIMS: Early detection of breast cancer has reduced many deaths. Earlier CAD systems used to be the second opinion for radiologists and clinicians. Machine learning and deep learning have brought tremendous changes in medical diagnosis and imagining.

BACKGROUND: Breast cancer is the most commonly occurring cancer in women and it is the second most common cancer overall. According to the 2018 statistics, there were over 2million cases all over the world. Belgium and Luxembourg have the highest rate of cancer.

OBJECTIVE: A method for breast cancer detection has been proposed using Ensemble learning. 2- class and 8-class classification is performed.

METHODS: To deal with imbalance classification, the authors have proposed an ensemble of pretrained models.

RESULTS: 98.5% training accuracy and 89% of test accuracy are achieved on 8-class classification. Moreover, 99.1% and 98% train and test accuracy are achieved on 2 class classification.

CONCLUSION: it is found that there are high misclassifications in class DC when compared to the other classes, this is due to the imbalance in the dataset. In the future, one can increase the size of the datasets or use different methods. In implement this research work, authors have used 2 Nvidia Tesla V100 GPU's in google cloud platform.}, } @article {pmid33600344, year = {2021}, author = {R Niakan Kalhori, S and Bahaadinbeigy, K and Deldar, K and Gholamzadeh, M and Hajesmaeel-Gohari, S and Ayyoubzadeh, SM}, title = {Digital Health Solutions to Control the COVID-19 Pandemic in Countries With High Disease Prevalence: Literature Review.}, journal = {Journal of medical Internet research}, volume = {23}, number = {3}, pages = {e19473}, pmid = {33600344}, issn = {1438-8871}, mesh = {Humans ; *COVID-19/epidemiology/prevention & control ; *Infection Control/methods ; Information Technology/standards ; *Pandemics/prevention & control ; Prevalence ; SARS-CoV-2/isolation & purification ; *Telemedicine/organization & administration ; }, abstract = {BACKGROUND: COVID-19, the disease caused by the novel coronavirus SARS-CoV-2, has become a global pandemic, affecting most countries worldwide. Digital health information technologies can be applied in three aspects, namely digital patients, digital devices, and digital clinics, and could be useful in fighting the COVID-19 pandemic.

OBJECTIVE: Recent reviews have examined the role of digital health in controlling COVID-19 to identify the potential of digital health interventions to fight the disease. However, this study aims to review and analyze the digital technology that is being applied to control the COVID-19 pandemic in the 10 countries with the highest prevalence of the disease.

METHODS: For this review, the Google Scholar, PubMed, Web of Science, and Scopus databases were searched in August 2020 to retrieve publications from December 2019 to March 15, 2020. Furthermore, the Google search engine was used to identify additional applications of digital health for COVID-19 pandemic control.

RESULTS: We included 32 papers in this review that reported 37 digital health applications for COVID-19 control. The most common digital health projects to address COVID-19 were telemedicine visits (11/37, 30%). Digital learning packages for informing people about the disease, geographic information systems and quick response code applications for real-time case tracking, and cloud- or mobile-based systems for self-care and patient tracking were in the second rank of digital tool applications (all 7/37, 19%). The projects were deployed in various European countries and in the United States, Australia, and China.

CONCLUSIONS: Considering the potential of available information technologies worldwide in the 21st century, particularly in developed countries, it appears that more digital health products with a higher level of intelligence capability remain to be applied for the management of pandemics and health-related crises.}, } @article {pmid33591447, year = {2022}, author = {Jheng, YC and Wang, YP and Lin, HE and Sung, KY and Chu, YC and Wang, HS and Jiang, JK and Hou, MC and Lee, FY and Lu, CL}, title = {A novel machine learning-based algorithm to identify and classify lesions and anatomical landmarks in colonoscopy images.}, journal = {Surgical endoscopy}, volume = {36}, number = {1}, pages = {640-650}, pmid = {33591447}, issn = {1432-2218}, mesh = {Algorithms ; *Artificial Intelligence ; *Colonic Polyps/diagnostic imaging ; Colonoscopy/methods ; Humans ; Machine Learning ; }, abstract = {OBJECTIVES: Computer-aided diagnosis (CAD)-based artificial intelligence (AI) has been shown to be highly accurate for detecting and characterizing colon polyps. However, the application of AI to identify normal colon landmarks and differentiate multiple colon diseases has not yet been established. We aimed to develop a convolutional neural network (CNN)-based algorithm (GUTAID) to recognize different colon lesions and anatomical landmarks.

METHODS: Colonoscopic images were obtained to train and validate the AI classifiers. An independent dataset was collected for verification. The architecture of GUTAID contains two major sub-models: the Normal, Polyp, Diverticulum, Cecum and CAncer (NPDCCA) and Narrow-Band Imaging for Adenomatous/Hyperplastic polyps (NBI-AH) models. The development of GUTAID was based on the 16-layer Visual Geometry Group (VGG16) architecture and implemented on Google Cloud Platform.

RESULTS: In total, 7838 colonoscopy images were used for developing and validating the AI model. An additional 1273 images were independently applied to verify the GUTAID. The accuracy for GUTAID in detecting various colon lesions/landmarks is 93.3% for polyps, 93.9% for diverticula, 91.7% for cecum, 97.5% for cancer, and 83.5% for adenomatous/hyperplastic polyps.

CONCLUSIONS: A CNN-based algorithm (GUTAID) to identify colonic abnormalities and landmarks was successfully established with high accuracy. This GUTAID system can further characterize polyps for optical diagnosis. We demonstrated that AI classification methodology is feasible to identify multiple and different colon diseases.}, } @article {pmid33583322, year = {2021}, author = {Hacking, S and Bijol, V}, title = {Deep learning for the classification of medical kidney disease: a pilot study for electron microscopy.}, journal = {Ultrastructural pathology}, volume = {45}, number = {2}, pages = {118-127}, doi = {10.1080/01913123.2021.1882628}, pmid = {33583322}, issn = {1521-0758}, mesh = {Artificial Intelligence ; *Deep Learning ; Humans ; *Kidney Diseases/diagnosis ; Microscopy, Electron ; Pilot Projects ; }, abstract = {Artificial intelligence (AI) is a new frontier and often enigmatic for medical professionals. Cloud computing could open up the field of computer vision to a wider medical audience and deep learning on the cloud allows one to design, develop, train and deploy applications with ease. In the field of histopathology, the implementation of various applications in AI has been successful for whole slide images rich in biological diversity. However, the analysis of other tissue medias, including electron microscopy, is yet to be explored. The present study aims to evaluate deep learning for the classification of medical kidney disease on electron microscopy images: amyloidosis, diabetic glomerulosclerosis, membranous nephropathy, membranoproliferative glomerulonephritis (MPGN), and thin basement membrane disease (TBMD). We found good overall classification with the MedKidneyEM-v1 Classifier and when looking at normal and diseased kidneys, the average area under the curve for precision and recall was 0.841. The average area under the curve for precision and recall on the disease only cohort was 0.909. Digital pathology will shape a new era for medical kidney disease and the present study demonstrates the feasibility of deep learning for electron microscopy. Future approaches could be used by renal pathologists to improve diagnostic concordance, determine therapeutic strategies, and optimize patient outcomes in a true clinical environment.}, } @article {pmid33572132, year = {2021}, author = {Tradacete, M and Santos, C and Jiménez, JA and Rodríguez, FJ and Martín, P and Santiso, E and Gayo, M}, title = {Turning Base Transceiver Stations into Scalable and Controllable DC Microgrids Based on a Smart Sensing Strategy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33572132}, issn = {1424-8220}, support = {RTC-2017-6231-3//Ministerio de Ciencia, Innovación y Universidades/ ; P2018/EMT-4366//Dirección General de Universidades e Investigación/ ; }, abstract = {This paper describes a practical approach to the transformation of Base Transceiver Stations (BTSs) into scalable and controllable DC Microgrids in which an energy management system (EMS) is developed to maximize the economic benefit. The EMS strategy focuses on efficiently managing a Battery Energy Storage System (BESS) along with photovoltaic (PV) energy generation, and non-critical load-shedding. The EMS collects data such as real-time energy consumption and generation, and environmental parameters such as temperature, wind speed and irradiance, using a smart sensing strategy whereby measurements can be recorded and computing can be performed both locally and in the cloud. Within the Spanish electricity market and applying a two-tariff pricing, annual savings per installed battery power of 16.8 euros/kW are achieved. The system has the advantage that it can be applied to both new and existing installations, providing a two-way connection to the electricity grid, PV generation, smart measurement systems and the necessary management software. All these functions are integrated in a flexible and low cost HW/SW architecture. Finally, the whole system is validated through real tests carried out on a pilot plant and under different weather conditions.}, } @article {pmid33569265, year = {2021}, author = {St-Onge, C and Benmakrelouf, S and Kara, N and Tout, H and Edstrom, C and Rabipour, R}, title = {Generic SDE and GA-based workload modeling for cloud systems.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {10}, number = {1}, pages = {6}, pmid = {33569265}, issn = {2192-113X}, abstract = {Workload models are typically built based on user and application behavior in a system, limiting them to specific domains. Undoubtedly, such a practice creates a dilemma in a cloud computing (cloud) environment, where a wide range of heterogeneous applications are running and many users have access to these resources. The workload model in such an infrastructure must adapt to the evolution of the system configuration parameters, such as job load fluctuation. The aim of this work is to propose an approach that generates generic workload models (1) which are independent of user behavior and the applications running in the system, and can fit any workload domain and type, (2) model sharp workload variations that are most likely to appear in cloud environments, and (3) with high degree of fidelity with respect to observed data, within a short execution time. We propose two approaches for workload estimation, the first being a Hull-White and Genetic Algorithm (GA) combination, while the second is a Support Vector Regression (SVR) and Kalman-filter combination. Thorough experiments are conducted on real CPU and throughput datasets from virtualized IP Multimedia Subsystem (IMS), Web and cloud environments to study the efficiency of both propositions. The results show a higher accuracy for the Hull-White-GA approach with marginal overhead over the SVR-Kalman-Filter combination.}, } @article {pmid33568057, year = {2021}, author = {Gangiredla, J and Rand, H and Benisatto, D and Payne, J and Strittmatter, C and Sanders, J and Wolfgang, WJ and Libuit, K and Herrick, JB and Prarat, M and Toro, M and Farrell, T and Strain, E}, title = {GalaxyTrakr: a distributed analysis tool for public health whole genome sequence data accessible to non-bioinformaticians.}, journal = {BMC genomics}, volume = {22}, number = {1}, pages = {114}, pmid = {33568057}, issn = {1471-2164}, mesh = {Computational Biology ; High-Throughput Nucleotide Sequencing ; Humans ; *Metagenomics ; *Public Health ; Whole Genome Sequencing ; }, abstract = {BACKGROUND: Processing and analyzing whole genome sequencing (WGS) is computationally intense: a single Illumina MiSeq WGS run produces ~ 1 million 250-base-pair reads for each of 24 samples. This poses significant obstacles for smaller laboratories, or laboratories not affiliated with larger projects, which may not have dedicated bioinformatics staff or computing power to effectively use genomic data to protect public health. Building on the success of the cloud-based Galaxy bioinformatics platform (http://galaxyproject.org), already known for its user-friendliness and powerful WGS analytical tools, the Center for Food Safety and Applied Nutrition (CFSAN) at the U.S. Food and Drug Administration (FDA) created a customized 'instance' of the Galaxy environment, called GalaxyTrakr (https://www.galaxytrakr.org), for use by laboratory scientists performing food-safety regulatory research. The goal was to enable laboratories outside of the FDA internal network to (1) perform quality assessments of sequence data, (2) identify links between clinical isolates and positive food/environmental samples, including those at the National Center for Biotechnology Information sequence read archive (https://www.ncbi.nlm.nih.gov/sra/), and (3) explore new methodologies such as metagenomics. GalaxyTrakr hosts a variety of free and adaptable tools and provides the data storage and computing power to run the tools. These tools support coordinated analytic methods and consistent interpretation of results across laboratories. Users can create and share tools for their specific needs and use sequence data generated locally and elsewhere.

RESULTS: In its first full year (2018), GalaxyTrakr processed over 85,000 jobs and went from 25 to 250 users, representing 53 different public and state health laboratories, academic institutions, international health laboratories, and federal organizations. By mid-2020, it has grown to 600 registered users and processed over 450,000 analytical jobs. To illustrate how laboratories are making use of this resource, we describe how six institutions use GalaxyTrakr to quickly analyze and review their data. Instructions for participating in GalaxyTrakr are provided.

CONCLUSIONS: GalaxyTrakr advances food safety by providing reliable and harmonized WGS analyses for public health laboratories and promoting collaboration across laboratories with differing resources. Anticipated enhancements to this resource will include workflows for additional foodborne pathogens, viruses, and parasites, as well as new tools and services.}, } @article {pmid33558984, year = {2021}, author = {Kumar, R and Al-Turjman, F and Anand, L and Kumar, A and Magesh, S and Vengatesan, K and Sitharthan, R and Rajesh, M}, title = {Genomic sequence analysis of lung infections using artificial intelligence technique.}, journal = {Interdisciplinary sciences, computational life sciences}, volume = {13}, number = {2}, pages = {192-200}, pmid = {33558984}, issn = {1867-1462}, mesh = {Genomics ; Lung ; Sequence Analysis ; *Support Vector Machine ; }, abstract = {Attributable to the modernization of Artificial Intelligence (AI) procedures in healthcare services, various developments including Support Vector Machine (SVM), and profound learning. For example, Convolutional Neural systems (CNN) have prevalently engaged in a significant job of various classificational investigation in lung malignant growth, and different infections. In this paper, Parallel based SVM (P-SVM) and IoT has been utilized to examine the ideal order of lung infections caused by genomic sequence. The proposed method develops a new methodology to locate the ideal characterization of lung sicknesses and determine its growth in its early stages, to control the growth and prevent lung sickness. Further, in the investigation, the P-SVM calculation has been created for arranging high-dimensional distinctive lung ailment datasets. The data used in the assessment has been fetched from real-time data through cloud and IoT. The acquired outcome demonstrates that the developed P-SVM calculation has 83% higher accuracy and 88% precision in characterization with ideal informational collections when contrasted with other learning methods.}, } @article {pmid33557230, year = {2021}, author = {Jensen, JN and Hannemose, M and Bærentzen, JA and Wilm, J and Frisvad, JR and Dahl, AB}, title = {Surface Reconstruction from Structured Light Images Using Differentiable Rendering.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33557230}, issn = {1424-8220}, support = {8057-00011B//Innovationsfonden/ ; }, abstract = {When 3D scanning objects, the objective is usually to obtain a continuous surface. However, most surface scanning methods, such as structured light scanning, yield a point cloud. Obtaining a continuous surface from a point cloud requires a subsequent surface reconstruction step, which is directly affected by any error from the computation of the point cloud. In this work, we propose a one-step approach in which we compute the surface directly from structured light images. Our method minimizes the least-squares error between photographs and renderings of a triangle mesh, where the vertex positions of the mesh are the parameters of the minimization problem. To ensure fast iterations during optimization, we use differentiable rendering, which computes images and gradients in a single pass. We present simulation experiments demonstrating that our method for computing a triangle mesh has several advantages over approaches that rely on an intermediate point cloud. Our method can produce accurate reconstructions when initializing the optimization from a sphere. We also show that our method is good at reconstructing sharp edges and that it is robust with respect to image noise. In addition, our method can improve the output from other reconstruction algorithms if we use these for initialization.}, } @article {pmid33557132, year = {2021}, author = {Lahoura, V and Singh, H and Aggarwal, A and Sharma, B and Mohammed, MA and Damaševičius, R and Kadry, S and Cengiz, K}, title = {Cloud Computing-Based Framework for Breast Cancer Diagnosis Using Extreme Learning Machine.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {11}, number = {2}, pages = {}, pmid = {33557132}, issn = {2075-4418}, abstract = {Globally, breast cancer is one of the most significant causes of death among women. Early detection accompanied by prompt treatment can reduce the risk of death due to breast cancer. Currently, machine learning in cloud computing plays a pivotal role in disease diagnosis, but predominantly among the people living in remote areas where medical facilities are scarce. Diagnosis systems based on machine learning act as secondary readers and assist radiologists in the proper diagnosis of diseases, whereas cloud-based systems can support telehealth services and remote diagnostics. Techniques based on artificial neural networks (ANN) have attracted many researchers to explore their capability for disease diagnosis. Extreme learning machine (ELM) is one of the variants of ANN that has a huge potential for solving various classification problems. The framework proposed in this paper amalgamates three research domains: Firstly, ELM is applied for the diagnosis of breast cancer. Secondly, to eliminate insignificant features, the gain ratio feature selection method is employed. Lastly, a cloud computing-based system for remote diagnosis of breast cancer using ELM is proposed. The performance of the cloud-based ELM is compared with some state-of-the-art technologies for disease diagnosis. The results achieved on the Wisconsin Diagnostic Breast Cancer (WBCD) dataset indicate that the cloud-based ELM technique outperforms other results. The best performance results of ELM were found for both the standalone and cloud environments, which were compared. The important findings of the experimental results indicate that the accuracy achieved is 0.9868, the recall is 0.9130, the precision is 0.9054, and the F1-score is 0.8129.}, } @article {pmid33552932, year = {2021}, author = {Ahmad, S and Mehfuz, S and Beg, J and Ahmad Khan, N and Husain Khan, A}, title = {Fuzzy Cloud Based COVID-19 Diagnosis Assistant for identifying affected cases globally using MCDM.}, journal = {Materials today. Proceedings}, volume = {}, number = {}, pages = {}, doi = {10.1016/j.matpr.2021.01.240}, pmid = {33552932}, issn = {2214-7853}, abstract = {The COVID-19, Coronavirus Disease 2019, emerged as a hazardous disease that led to many causalities across the world. Early detection of COVID-19 in patients and proper treatment along with awareness can help to contain COVID-19. Proposed Fuzzy Cloud-Based (FCB) COVID-19 Diagnosis Assistant aims to identify the patients as confirmed, suspects, or suspicious of COVID-19. It categorized the patients into four categories as mild, moderate, severe, or critical. As patients register themselves online on the FCB COVID-19 DA in real-time, it creates the database for the same. This database helps to improve diagnostic accuracy as it contains the latest updates from real-world cases data. A team of doctors, experts, consultants are integrated with the FCB COVID-19 DA for better consultation and prevention. The ultimate aim of this proposed theory of FCB COVID-19 DA is to take control of COVID-19 pandemic and de-accelerate its rate of transmission among the society.}, } @article {pmid33546394, year = {2021}, author = {Alsharif, M and Rawat, DB}, title = {Study of Machine Learning for Cloud Assisted IoT Security as a Service.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33546394}, issn = {1424-8220}, support = {001-2020//Data Science and Cybersecurity Center/ ; }, abstract = {Machine learning (ML) has been emerging as a viable solution for intrusion detection systems (IDS) to secure IoT devices against different types of attacks. ML based IDS (ML-IDS) normally detect network traffic anomalies caused by known attacks as well as newly introduced attacks. Recent research focuses on the functionality metrics of ML techniques, depicting their prediction effectiveness, but overlooked their operational requirements. ML techniques are resource-demanding that require careful adaptation to fit the limited computing resources of a large sector of their operational platform, namely, embedded systems. In this paper, we propose cloud-based service architecture for managing ML models that best fit different IoT device operational configurations for security. An IoT device may benefit from such a service by offloading to the cloud heavy-weight activities such as feature selection, model building, training, and validation, thus reducing its IDS maintenance workload at the IoT device and get the security model back from the cloud as a service.}, } @article {pmid33546287, year = {2021}, author = {Meyer, H and Wei, P and Jiang, X}, title = {Intelligent Video Highlights Generation with Front-Camera Emotion Sensing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {4}, pages = {}, pmid = {33546287}, issn = {1424-8220}, support = {CNS-1815274//National Science Foundation/ ; CNS-1704899//National Science Foundation/ ; CNS-11943396//National Science Foundation/ ; CNS-1837022//National Science Foundation/ ; }, abstract = {In this paper, we present HOMER, a cloud-based system for video highlight generation which enables the automated, relevant, and flexible segmentation of videos. Our system outperforms state-of-the-art solutions by fusing internal video content-based features with the user's emotion data. While current research mainly focuses on creating video summaries without the use of affective data, our solution achieves the subjective task of detecting highlights by leveraging human emotions. In two separate experiments, including videos filmed with a dual camera setup, and home videos randomly picked from Microsoft's Video Titles in the Wild (VTW) dataset, HOMER demonstrates an improvement of up to 38% in F1-score from baseline, while not requiring any external hardware. We demonstrated both the portability and scalability of HOMER through the implementation of two smartphone applications.}, } @article {pmid33545122, year = {2021}, author = {Alshehri, M and Bhardwaj, A and Kumar, M and Mishra, S and Gyani, J}, title = {Cloud and IoT based smart architecture for desalination water treatment.}, journal = {Environmental research}, volume = {195}, number = {}, pages = {110812}, doi = {10.1016/j.envres.2021.110812}, pmid = {33545122}, issn = {1096-0953}, mesh = {Models, Theoretical ; Seawater ; *Solar Energy ; Sunlight ; *Water Purification ; }, abstract = {Increasing water demand and the deteriorating environment has continuously stressed the requirement for new technology and methods to attain optimized use of resources and desalination management, converting seawater into pure drinking water. In this age, the Internet of Things use allows us to optimize a series of previously complicated processes to perform and required enormous resources. One of these is optimizing the management of water treatment. This research presents an implementable water treatment model and suggests smart environment that can control water treatment plants. The proposed system gathers data and analysing to provide the most efficient approach for water desalination operations. The desalination framework integrates smart enabling technologies such as Cloud Portal, Network communication, Internet of Things, Sensors powered by solar energy with ancient water purification as part of seawater's desalination project. The proposed framework incorporates the new-age technologies, which are essential for efficient and effective operations of desalination systems. The implemented desalination dual membrane framework uses solar energy for purifying saline water using ancient methods to produce clean water for drinking and irrigation. The desalination produced 0.47 m3/l of freshwater from a saline concentration of 10 g/l, consuming 8.31 KWh/m3 energy for production from the prototype implementation, which makes desalination process cost effective.}, } @article {pmid33544692, year = {2021}, author = {Vahidy, F and Jones, SL and Tano, ME and Nicolas, JC and Khan, OA and Meeks, JR and Pan, AP and Menser, T and Sasangohar, F and Naufal, G and Sostman, D and Nasir, K and Kash, BA}, title = {Rapid Response to Drive COVID-19 Research in a Learning Health Care System: Rationale and Design of the Houston Methodist COVID-19 Surveillance and Outcomes Registry (CURATOR).}, journal = {JMIR medical informatics}, volume = {9}, number = {2}, pages = {e26773}, pmid = {33544692}, issn = {2291-9694}, abstract = {BACKGROUND: The COVID-19 pandemic has exacerbated the challenges of meaningful health care digitization. The need for rapid yet validated decision-making requires robust data infrastructure. Organizations with a focus on learning health care (LHC) systems tend to adapt better to rapidly evolving data needs. Few studies have demonstrated a successful implementation of data digitization principles in an LHC context across health care systems during the COVID-19 pandemic.

OBJECTIVE: We share our experience and provide a framework for assembling and organizing multidisciplinary resources, structuring and regulating research needs, and developing a single source of truth (SSoT) for COVID-19 research by applying fundamental principles of health care digitization, in the context of LHC systems across a complex health care organization.

METHODS: Houston Methodist (HM) comprises eight tertiary care hospitals and an expansive primary care network across Greater Houston, Texas. During the early phase of the pandemic, institutional leadership envisioned the need to streamline COVID-19 research and established the retrospective research task force (RRTF). We describe an account of the structure, functioning, and productivity of the RRTF. We further elucidate the technical and structural details of a comprehensive data repository-the HM COVID-19 Surveillance and Outcomes Registry (CURATOR). We particularly highlight how CURATOR conforms to standard health care digitization principles in the LHC context.

RESULTS: The HM COVID-19 RRTF comprises expertise in epidemiology, health systems, clinical domains, data sciences, information technology, and research regulation. The RRTF initially convened in March 2020 to prioritize and streamline COVID-19 observational research; to date, it has reviewed over 60 protocols and made recommendations to the institutional review board (IRB). The RRTF also established the charter for CURATOR, which in itself was IRB-approved in April 2020. CURATOR is a relational structured query language database that is directly populated with data from electronic health records, via largely automated extract, transform, and load procedures. The CURATOR design enables longitudinal tracking of COVID-19 cases and controls before and after COVID-19 testing. CURATOR has been set up following the SSoT principle and is harmonized across other COVID-19 data sources. CURATOR eliminates data silos by leveraging unique and disparate big data sources for COVID-19 research and provides a platform to capitalize on institutional investment in cloud computing. It currently hosts deeply phenotyped sociodemographic, clinical, and outcomes data of approximately 200,000 individuals tested for COVID-19. It supports more than 30 IRB-approved protocols across several clinical domains and has generated numerous publications from its core and associated data sources.

CONCLUSIONS: A data-driven decision-making strategy is paramount to the success of health care organizations. Investment in cross-disciplinary expertise, health care technology, and leadership commitment are key ingredients to foster an LHC system. Such systems can mitigate the effects of ongoing and future health care catastrophes by providing timely and validated decision support.}, } @article {pmid33537385, year = {2021}, author = {Filippucci, M and Miccolis, S and Castagnozzi, A and Cecere, G and de Lorenzo, S and Donvito, G and Falco, L and Michele, M and Nicotri, S and Romeo, A and Selvaggi, G and Tallarico, A}, title = {Seismicity of the Gargano promontory (Southern Italy) after 7 years of local seismic network operation: Data release of waveforms from 2013 to 2018.}, journal = {Data in brief}, volume = {35}, number = {}, pages = {106783}, pmid = {33537385}, issn = {2352-3409}, abstract = {The University of Bari (Italy), in cooperation with the National Institute of Geophysics and Volcanology (INGV) (Italy), has installed the OTRIONS micro-earthquake network to better understand the active tectonics of the Gargano promontory (Southern Italy). The OTRIONS network operates since 2013 and consists of 12 short period, 3 components, seismic stations located in the Apulian territory (Southern Italy). This data article releases the waveform database collected from 2013 to 2018 and describes the characteristics of the local network in the current configuration. At the end of 2018, we implemented a cloud infrastructure to make more robust the acquisition and storage system of the network through a collaboration with the RECAS-Bari computing centre of the University of Bari (Italy) and of the National Institute of Nuclear Physics (Italy). Thanks to this implementation, waveforms recorded after the beginning of 2019 and the station metadata are accessible through the European Integrated Data Archive (EIDA, https://www.orfeus-eu.org/data/eida/nodes/INGV/).}, } @article {pmid33535432, year = {2021}, author = {Li, Z and Peng, E}, title = {Software-Defined Optimal Computation Task Scheduling in Vehicular Edge Networking.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {3}, pages = {}, pmid = {33535432}, issn = {1424-8220}, support = {BK20201415//Natural Science Foundation of Jiangsu Province/ ; 2017YFB1400703, 2020YFB1005503//National Key Research and Development Project/ ; U1736216, 61702233//National Natural Science Foundation of China/ ; }, abstract = {With the development of smart vehicles and various vehicular applications, Vehicular Edge Computing (VEC) paradigm has attracted from academic and industry. Compared with the cloud computing platform, VEC has several new features, such as the higher network bandwidth and the lower transmission delay. Recently, vehicular computation-intensive task offloading has become a new research field for the vehicular edge computing networks. However, dynamic network topology and the bursty computation tasks offloading, which causes to the computation load unbalancing for the VEC networking. To solve this issue, this paper proposed an optimal control-based computing task scheduling algorithm. Then, we introduce software defined networking/OpenFlow framework to build a software-defined vehicular edge networking structure. The proposed algorithm can obtain global optimum results and achieve the load-balancing by the virtue of the global load status information. Besides, the proposed algorithm has strong adaptiveness in dynamic network environments by automatic parameter tuning. Experimental results show that the proposed algorithm can effectively improve the utilization of computation resources and meet the requirements of computation and transmission delay for various vehicular tasks.}, } @article {pmid33532168, year = {2020}, author = {Uslu, BÇ and Okay, E and Dursun, E}, title = {Analysis of factors affecting IoT-based smart hospital design.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {9}, number = {1}, pages = {67}, pmid = {33532168}, issn = {2192-113X}, abstract = {Currently, rapidly developing digital technological innovations affect and change the integrated information management processes of all sectors. The high efficiency of these innovations has inevitably pushed the health sector into a digital transformation process to optimize the technologies and methodologies used to optimize healthcare management systems. In this transformation, the Internet of Things (IoT) technology plays an important role, which enables many devices to connect and work together. IoT allows systems to work together using sensors, connection methods, internet protocols, databases, cloud computing, and analytic as infrastructure. In this respect, it is necessary to establish the necessary technical infrastructure and a suitable environment for the development of smart hospitals. This study points out the optimization factors, challenges, available technologies, and opportunities, as well as the system architecture that come about by employing IoT technology in smart hospital environments. In order to do that, the required technical infrastructure is divided into five layers and the system infrastructure, constraints, and methods needed in each layer are specified, which also includes the smart hospital's dimensions and extent of intelligent computing and real-time big data analytic. As a result of the study, the deficiencies that may arise in each layer for the smart hospital design model and the factors that should be taken into account to eliminate them are explained. It is expected to provide a road map to managers, system developers, and researchers interested in optimization of the design of the smart hospital system.}, } @article {pmid33532167, year = {2020}, author = {Nguyen, V and Khanh, TT and Nguyen, TDT and Hong, CS and Huh, EN}, title = {Flexible computation offloading in a fuzzy-based mobile edge orchestrator for IoT applications.}, journal = {Journal of cloud computing (Heidelberg, Germany)}, volume = {9}, number = {1}, pages = {66}, pmid = {33532167}, issn = {2192-113X}, abstract = {In the Internet of Things (IoT) era, the capacity-limited Internet and uncontrollable service delays for various new applications, such as video streaming analysis and augmented reality, are challenges. Cloud computing systems, also known as a solution that offloads energy-consuming computation of IoT applications to a cloud server, cannot meet the delay-sensitive and context-aware service requirements. To address this issue, an edge computing system provides timely and context-aware services by bringing the computations and storage closer to the user. The dynamic flow of requests that can be efficiently processed is a significant challenge for edge and cloud computing systems. To improve the performance of IoT systems, the mobile edge orchestrator (MEO), which is an application placement controller, was designed by integrating end mobile devices with edge and cloud computing systems. In this paper, we propose a flexible computation offloading method in a fuzzy-based MEO for IoT applications in order to improve the efficiency in computational resource management. Considering the network, computation resources, and task requirements, a fuzzy-based MEO allows edge workload orchestration actions to decide whether to offload a mobile user to local edge, neighboring edge, or cloud servers. Additionally, increasing packet sizes will affect the failed-task ratio when the number of mobile devices increases. To reduce failed tasks because of transmission collisions and to improve service times for time-critical tasks, we define a new input crisp value, and a new output decision for a fuzzy-based MEO. Using the EdgeCloudSim simulator, we evaluate our proposal with four benchmark algorithms in augmented reality, healthcare, compute-intensive, and infotainment applications. Simulation results show that our proposal provides better results in terms of WLAN delay, service times, the number of failed tasks, and VM utilization.}, } @article {pmid33531307, year = {2022}, author = {Tan, H and Wang, Y and Wu, M and Huang, Z and Miao, Z}, title = {Distributed Group Coordination of Multiagent Systems in Cloud Computing Systems Using a Model-Free Adaptive Predictive Control Strategy.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {33}, number = {8}, pages = {3461-3473}, doi = {10.1109/TNNLS.2021.3053016}, pmid = {33531307}, issn = {2162-2388}, abstract = {This article studies the group coordinated control problem for distributed nonlinear multiagent systems (MASs) with unknown dynamics. Cloud computing systems are employed to divide agents into groups and establish networked distributed multigroup-agent systems (ND-MGASs). To achieve the coordination of all agents and actively compensate for communication network delays, a novel networked model-free adaptive predictive control (NMFAPC) strategy combining networked predictive control theory with model-free adaptive control method is proposed. In the NMFAPC strategy, each nonlinear agent is described as a time-varying data model, which only relies on the system measurement data for adaptive learning. To analyze the system performance, a simultaneous analysis method for stability and consensus of ND-MGASs is presented. Finally, the effectiveness and practicability of the proposed NMFAPC strategy are verified by numerical simulations and experimental examples. The achievement also provides a solution for the coordination of large-scale nonlinear MASs.}, } @article {pmid33525106, year = {2020}, author = {Su, K and Zhang, X and Liu, Q and Xiao, B}, title = {Strategies of similarity propagation in web service recommender systems.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {1}, pages = {530-550}, doi = {10.3934/mbe.2021029}, pmid = {33525106}, issn = {1551-0018}, abstract = {Recently, web service recommender systems have attracted much attention due to the popularity of Service-Oriented Computing and Cloud Computing. Memory-based collaborative filtering approaches which mainly rely on the similarity calculation are widely studied to realize the recommendation. In these research works, the similarity between two users is computed based on the QoS data of their commonly-invoked services and the similarity between two services is computed based on the common users who invoked them. However, most approaches ignore that the similarity calculation is not always accurate under a sparse data condition. To address this problem, we propose a similarity propagation method to accurately evaluate the similarities between users or services. Similarity propagation means that "if A and B are similar, and B and C are similar, then A and C will be similar to some extent". Firstly, the similarity graph of users or services is constructed according to the QoS data. Then, the similarity propagation paths between two nodes on the similarity graph are discovered. Finally, the similarity along each propagation path is measured and the indirect similarity between two users or services is evaluated by aggregating the similarities of different paths connecting them. Comprehensive experiments on real-world datasets demonstrate that our similarity propagation method can outstandingly improve the QoS prediction accuracy of memory-based collaborative filtering approaches.}, } @article {pmid33525084, year = {2020}, author = {Jiang, W and Ye, X and Chen, R and Su, F and Lin, M and Ma, Y and Zhu, Y and Huang, S}, title = {Wearable on-device deep learning system for hand gesture recognition based on FPGA accelerator.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {18}, number = {1}, pages = {132-153}, doi = {10.3934/mbe.2021007}, pmid = {33525084}, issn = {1551-0018}, mesh = {*Deep Learning ; Equipment Design ; Gestures ; Humans ; Neural Networks, Computer ; *Wearable Electronic Devices ; }, abstract = {Gesture recognition is critical in the field of Human-Computer Interaction, especially in healthcare, rehabilitation, sign language translation, etc. Conventionally, the gesture recognition data collected by the inertial measurement unit (IMU) sensors is relayed to the cloud or a remote device with higher computing power to train models. However, it is not convenient for remote follow-up treatment of movement rehabilitation training. In this paper, based on a field-programmable gate array (FPGA) accelerator and the Cortex-M0 IP core, we propose a wearable deep learning system that is capable of locally processing data on the end device. With a pre-stage processing module and serial-parallel hybrid method, the device is of low-power and low-latency at the micro control unit (MCU) level, however, it meets or exceeds the performance of single board computers (SBC). For example, its performance is more than twice as much of Cortex-A53 (which is usually used in Raspberry Pi). Moreover, a convolutional neural network (CNN) and a multilayer perceptron neural network (NN) is used in the recognition model to extract features and classify gestures, which helps achieve a high recognition accuracy at 97%. Finally, this paper offers a software-hardware co-design method that is worth referencing for the design of edge devices in other scenarios.}, } @article {pmid35782175, year = {2021}, author = {Tai, Y and Gao, B and Li, Q and Yu, Z and Zhu, C and Chang, V}, title = {Trustworthy and Intelligent COVID-19 Diagnostic IoMT Through XR and Deep-Learning-Based Clinic Data Access.}, journal = {IEEE internet of things journal}, volume = {8}, number = {21}, pages = {15965-15976}, pmid = {35782175}, issn = {2327-4662}, abstract = {This article presents a novel extended reality (XR) and deep-learning-based Internet-of-Medical-Things (IoMT) solution for the COVID-19 telemedicine diagnostic, which systematically combines virtual reality/augmented reality (AR) remote surgical plan/rehearse hardware, customized 5G cloud computing and deep learning algorithms to provide real-time COVID-19 treatment scheme clues. Compared to existing perception therapy techniques, our new technique can significantly improve performance and security. The system collected 25 clinic data from the 347 positive and 2270 negative COVID-19 patients in the Red Zone by 5G transmission. After that, a novel auxiliary classifier generative adversarial network-based intelligent prediction algorithm is conducted to train the new COVID-19 prediction model. Furthermore, The Copycat network is employed for the model stealing and attack for the IoMT to improve the security performance. To simplify the user interface and achieve an excellent user experience, we combined the Red Zone's guiding images with the Green Zone's view through the AR navigate clue by using 5G. The XR surgical plan/rehearse framework is designed, including all COVID-19 surgical requisite details that were developed with a real-time response guaranteed. The accuracy, recall, F1-score, and area under the ROC curve (AUC) area of our new IoMT were 0.92, 0.98, 0.95, and 0.98, respectively, which outperforms the existing perception techniques with significantly higher accuracy performance. The model stealing also has excellent performance, with the AUC area of 0.90 in Copycat slightly lower than the original model. This study suggests a new framework in the COVID-19 diagnostic integration and opens the new research about the integration of XR and deep learning for IoMT implementation.}, } @article {pmid33513299, year = {2021}, author = {Neely, BA}, title = {Cloudy with a Chance of Peptides: Accessibility, Scalability, and Reproducibility with Cloud-Hosted Environments.}, journal = {Journal of proteome research}, volume = {20}, number = {4}, pages = {2076-2082}, pmid = {33513299}, issn = {1535-3907}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, mesh = {Animals ; Computational Biology ; Peptides ; *Proteomics ; Reproducibility of Results ; *Software ; }, abstract = {Cloud-hosted environments offer known benefits when computational needs outstrip affordable local workstations, enabling high-performance computation without a physical cluster. What has been less apparent, especially to novice users, is the transformative potential for cloud-hosted environments to bridge the digital divide that exists between poorly funded and well-resourced laboratories, and to empower modern research groups with remote personnel and trainees. Using cloud-based proteomic bioinformatic pipelines is not predicated on analyzing thousands of files, but instead can be used to improve accessibility during remote work, extreme weather, or working with under-resourced remote trainees. The general benefits of cloud-hosted environments also allow for scalability and encourage reproducibility. Since one possible hurdle to adoption is awareness, this paper is written with the nonexpert in mind. The benefits and possibilities of using a cloud-hosted environment are emphasized by describing how to setup an example workflow to analyze a previously published label-free data-dependent acquisition mass spectrometry data set of mammalian urine. Cost and time of analysis are compared using different computational tiers, and important practical considerations are described. Overall, cloud-hosted environments offer the potential to solve large computational problems, but more importantly can enable and accelerate research in smaller research groups with inadequate infrastructure and suboptimal local computational resources.}, } @article {pmid33511996, year = {2021}, author = {Alvarez, RV and Mariño-Ramírez, L and Landsman, D}, title = {Transcriptome annotation in the cloud: complexity, best practices, and cost.}, journal = {GigaScience}, volume = {10}, number = {2}, pages = {}, pmid = {33511996}, issn = {2047-217X}, mesh = {Cloud Computing ; Computational Biology ; Databases, Factual ; *Software ; *Transcriptome ; Workflow ; }, abstract = {BACKGROUND: The NIH Science and Technology Research Infrastructure for Discovery, Experimentation, and Sustainability (STRIDES) initiative provides NIH-funded researchers cost-effective access to commercial cloud providers, such as Amazon Web Services (AWS) and Google Cloud Platform (GCP). These cloud providers represent an alternative for the execution of large computational biology experiments like transcriptome annotation, which is a complex analytical process that requires the interrogation of multiple biological databases with several advanced computational tools. The core components of annotation pipelines published since 2012 are BLAST sequence alignments using annotated databases of both nucleotide or protein sequences almost exclusively with networked on-premises compute systems.

FINDINGS: We compare multiple BLAST sequence alignments using AWS and GCP. We prepared several Jupyter Notebooks with all the code required to submit computing jobs to the batch system on each cloud provider. We consider the consequence of the number of query transcripts in input files and the effect on cost and processing time. We tested compute instances with 16, 32, and 64 vCPUs on each cloud provider. Four classes of timing results were collected: the total run time, the time for transferring the BLAST databases to the instance local solid-state disk drive, the time to execute the CWL script, and the time for the creation, set-up, and release of an instance. This study aims to establish an estimate of the cost and compute time needed for the execution of multiple BLAST runs in a cloud environment.

CONCLUSIONS: We demonstrate that public cloud providers are a practical alternative for the execution of advanced computational biology experiments at low cost. Using our cloud recipes, the BLAST alignments required to annotate a transcriptome with ∼500,000 transcripts can be processed in <2 hours with a compute cost of ∼$200-$250. In our opinion, for BLAST-based workflows, the choice of cloud platform is not dependent on the workflow but, rather, on the specific details and requirements of the cloud provider. These choices include the accessibility for institutional use, the technical knowledge required for effective use of the platform services, and the availability of open source frameworks such as APIs to deploy the workflow.}, } @article {pmid33511044, year = {2020}, author = {Liu, X and Kar, B and Montiel Ishino, FA and Zhang, C and Williams, F}, title = {Assessing the Reliability of Relevant Tweets and Validation Using Manual and Automatic Approaches for Flood Risk Communication.}, journal = {ISPRS international journal of geo-information}, volume = {9}, number = {9}, pages = {}, pmid = {33511044}, issn = {2220-9964}, support = {ZIA MD000015/ImNIH/Intramural NIH HHS/United States ; }, abstract = {While Twitter has been touted as a preeminent source of up-to-date information on hazard events, the reliability of tweets is still a concern. Our previous publication extracted relevant tweets containing information about the 2013 Colorado flood event and its impacts. Using the relevant tweets, this research further examined the reliability (accuracy and trueness) of the tweets by examining the text and image content and comparing them to other publicly available data sources. Both manual identification of text information and automated (Google Cloud Vision, application programming interface (API)) extraction of images were implemented to balance accurate information verification and efficient processing time. The results showed that both the text and images contained useful information about damaged/flooded roads/streets. This information will help emergency response coordination efforts and informed allocation of resources when enough tweets contain geocoordinates or location/venue names. This research will identify reliable crowdsourced risk information to facilitate near real-time emergency response through better use of crowdsourced risk communication platforms.}, } @article {pmid33507965, year = {2021}, author = {Gaw, LY and Richards, DR}, title = {Development of spontaneous vegetation on reclaimed land in Singapore measured by NDVI.}, journal = {PloS one}, volume = {16}, number = {1}, pages = {e0245220}, pmid = {33507965}, issn = {1932-6203}, mesh = {Cities/history ; *Ecosystem ; History, 20th Century ; History, 21st Century ; *Plants ; Singapore ; *Urbanization ; }, abstract = {Population and economic growth in Asia has led to increased urbanisation. Urbanisation has many detrimental impacts on ecosystems, especially when expansion is unplanned. Singapore is a city-state that has grown rapidly since independence, both in population and land area. However, Singapore aims to develop as a 'City in Nature', and urban greenery is integral to the landscape. While clearing some areas of forest for urban sprawl, Singapore has also reclaimed land from the sea to expand its coastline. Reclaimed land is usually designated for future urban development, but must first be left for many years to stabilise. During the period of stabilisation, pioneer plant species establish, growing into novel forest communities. The rate of this spontaneous vegetation development has not been quantified. This study tracks the temporal trends of normalized difference vegetation index (NDVI), as a proxy of vegetation maturity, on reclaimed land sensed using LANDSAT images. Google Earth Engine was used to mosaic cloud-free annual LANDSAT images of Singapore from 1988 to 2015. Singapore's median NDVI increased by 0.15 from 0.47 to 0.62 over the study period, while its land area grew by 71 km2. Five reclaimed sites with spontaneous vegetation development showed variable vegetation covers, ranging from 6% to 43% vegetated cover in 2015. On average, spontaneous vegetation takes 16.9 years to develop to a maturity of 0.7 NDVI, but this development is not linear and follows a quadratic trajectory. Patches of spontaneous vegetation on isolated reclaimed lands are unlikely to remain forever since they are in areas slated for future development. In the years that these patches exist, they have potential to increase urban greenery, support biodiversity, and provide a host of ecosystem services. With this knowledge on spontaneous vegetation development trajectories, urban planners can harness the resource when planning future developments.}, } @article {pmid33504314, year = {2021}, author = {Raza, K and Singh, NK}, title = {A Tour of Unsupervised Deep Learning for Medical Image Analysis.}, journal = {Current medical imaging}, volume = {17}, number = {9}, pages = {1059-1077}, doi = {10.2174/1573405617666210127154257}, pmid = {33504314}, issn = {1573-4056}, mesh = {Algorithms ; *Deep Learning ; Humans ; Image Processing, Computer-Assisted ; Protein Structure, Secondary ; Unsupervised Machine Learning ; }, abstract = {BACKGROUND: Interpretation of medical images for the diagnosis and treatment of complex diseases from high-dimensional and heterogeneous data remains a key challenge in transforming healthcare. In the last few years, both supervised and unsupervised deep learning achieved promising results in the area of medical image analysis. Several reviews on supervised deep learning are published, but hardly any rigorous review on unsupervised deep learning for medical image analysis is available.

OBJECTIVE: The objective of this review is to systematically present various unsupervised deep learning models, tools, and benchmark datasets applied to medical image analysis. Some of the discussed models are autoencoders and their variants, Restricted Boltzmann Machines (RBM), Deep Belief Networks (DBN), Deep Boltzmann Machine (DBM), and Generative Adversarial Network (GAN). Future research opportunities and challenges of unsupervised deep learning techniques for medical image analysis are also discussed.

CONCLUSION: Currently, interpretation of medical images for diagnostic purposes is usually performed by human experts that may be replaced by computer-aided diagnosis due to advancement in machine learning techniques, including deep learning, and the availability of cheap computing infrastructure through cloud computing. Both supervised and unsupervised machine learning approaches are widely applied in medical image analysis, each of them having certain pros and cons. Since human supervisions are not always available or are inadequate or biased, therefore, unsupervised learning algorithms give a big hope with lots of advantages for biomedical image analysis.}, } @article {pmid33501269, year = {2020}, author = {Akbar, A and Lewis, PR and Wanner, E}, title = {A Self-Aware and Scalable Solution for Efficient Mobile-Cloud Hybrid Robotics.}, journal = {Frontiers in robotics and AI}, volume = {7}, number = {}, pages = {102}, pmid = {33501269}, issn = {2296-9144}, abstract = {Backed by the virtually unbounded resources of the cloud, battery-powered mobile robotics can also benefit from cloud computing, meeting the demands of even the most computationally and resource-intensive tasks. However, many existing mobile-cloud hybrid (MCH) robotic tasks are inefficient in terms of optimizing trade-offs between simultaneously conflicting objectives, such as minimizing both battery power consumption and network usage. To tackle this problem we propose a novel approach that can be used not only to instrument an MCH robotic task but also to search for its efficient configurations representing compromise solution between the objectives. We introduce a general-purpose MCH framework to measure, at runtime, how well the tasks meet these two objectives. The framework employs these efficient configurations to make decisions at runtime, which are based on: (1) changing of the environment (i.e., WiFi signal level variation), and (2) itself in a changing environment (i.e., actual observed packet loss in the network). Also, we introduce a novel search-based multi-objective optimization (MOO) algorithm, which works in two steps to search for efficient configurations of MCH applications. Analysis of our results shows that: (i) using self-adaptive and self-aware decisions, an MCH foraging task performed by a battery-powered robot can achieve better optimization in a changing environment than using static offloading or running the task only on the robot. However, a self-adaptive decision would fall behind when the change in the environment happens within the system. In such a case, a self-aware system can perform well, in terms of minimizing the two objectives. (ii) The Two-Step algorithm can search for better quality configurations for MCH robotic tasks of having a size from small to medium scale, in terms of the total number of their offloadable modules.}, } @article {pmid33500600, year = {2021}, author = {Sood, SK and Rawat, KS}, title = {A scientometric analysis of ICT-assisted disaster management.}, journal = {Natural hazards (Dordrecht, Netherlands)}, volume = {106}, number = {3}, pages = {2863-2881}, pmid = {33500600}, issn = {0921-030X}, abstract = {In recent years, natural and manmade disasters such as floods, earthquakes, wildfires, and tsunamis have occurred with human losses and environmental deterioration. Henceforth, to reduce the damage caused by these catastrophic events, the administration and government need to track victims and perform synchronized relief efforts on time at the disaster sites. The promising technologies of Internet communication technology (ICT), like the Internet of things, cloud computing, and data analytics, can assist various phases of disaster management. Moreover, the role of higher education spans all stages of disaster management: preparedness, response, and recovery. As educational and research contributions, higher educational institutes are essentially involved in all the disaster management stages to contribute to society broadly. Henceforth, the scientific analysis of disaster management literature is required to analyze the overall structure and developments in this domain. This study presents a scientometric analysis that evaluates the ICT-assisted disaster management research over the last 15 years (2005-2020). It presents various empirical ways to analyze the evolution, status, and result of ICT-assisted in disaster management research. This study provides extensive insight into the publication growth, citation analysis, collaboration, and keyword co-occurrence analysis for technological trends of the ICT-assisted disaster management research. It identifies key journals, countries, and organizations that significantly contributed to this research domain. Overall, this study presents various patterns, research trends, and collaborations as the basic structure for future research in this field.}, } @article {pmid33498910, year = {2021}, author = {Chen, S and Li, Q and Zhou, M and Abusorrah, A}, title = {Recent Advances in Collaborative Scheduling of Computing Tasks in an Edge Computing Paradigm.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {3}, pages = {}, pmid = {33498910}, issn = {1424-8220}, abstract = {In edge computing, edge devices can offload their overloaded computing tasks to an edge server. This can give full play to an edge server's advantages in computing and storage, and efficiently execute computing tasks. However, if they together offload all the overloaded computing tasks to an edge server, it can be overloaded, thereby resulting in the high processing delay of many computing tasks and unexpectedly high energy consumption. On the other hand, the resources in idle edge devices may be wasted and resource-rich cloud centers may be underutilized. Therefore, it is essential to explore a computing task collaborative scheduling mechanism with an edge server, a cloud center and edge devices according to task characteristics, optimization objectives and system status. It can help one realize efficient collaborative scheduling and precise execution of all computing tasks. This work analyzes and summarizes the edge computing scenarios in an edge computing paradigm. It then classifies the computing tasks in edge computing scenarios. Next, it formulates the optimization problem of computation offloading for an edge computing system. According to the problem formulation, the collaborative scheduling methods of computing tasks are then reviewed. Finally, future research issues for advanced collaborative scheduling in the context of edge computing are indicated.}, } @article {pmid33478558, year = {2021}, author = {Sun, S and Xie, Z and Yu, K and Jiang, B and Zheng, S and Pan, X}, title = {COVID-19 and healthcare system in China: challenges and progression for a sustainable future.}, journal = {Globalization and health}, volume = {17}, number = {1}, pages = {14}, pmid = {33478558}, issn = {1744-8603}, support = {201810343007//National College Students Innovation and Entrepreneurship Training Program/International ; S20190024//Wenzhou Municipal Science and Technology Bureau/International ; }, mesh = {Artificial Intelligence ; COVID-19/*epidemiology/*prevention & control ; China/epidemiology ; Disaster Planning/*organization & administration ; Disease Outbreaks/*prevention & control/statistics & numerical data ; Government ; Health Personnel/organization & administration ; Humans ; }, abstract = {With the ongoing COVID-19 outbreak, healthcare systems across the world have been pushed to the brink. The approach of traditional healthcare systems to disaster preparedness and prevention has demonstrated intrinsic problems, such as failure to detect early the spread of the virus, public hospitals being overwhelmed, a dire shortage of personal protective equipment, and exhaustion of healthcare workers. Consequently, this situation resulted in manpower and resource costs, leading to the widespread and exponential rise of infected cases at the early stage of the epidemic. To limit the spread of infection, the Chinese government adopted innovative, specialized, and advanced systems, including empowered Fangcang and Internet hospitals, as well as high technologies such as 5G, big data analysis, cloud computing, and artificial intelligence. The efficient use of these new forces helped China win its fight against the virus. As the rampant spread of the virus continues outside China, these new forces need to be integrated into the global healthcare system to combat the disease. Global healthcare system integrated with new forces is essential not only for COVID-19 but also for unknown infections in the future.}, } @article {pmid33477963, year = {2021}, author = {Pan, SH and Wang, SC}, title = {Optimal Consensus with Dual Abnormality Mode of Cellular IoT Based on Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33477963}, issn = {1424-8220}, abstract = {The continuous development of fifth-generation (5G) networks is the main driving force for the growth of Internet of Things (IoT) applications. It is expected that the 5G network will greatly expand the applications of the IoT, thereby promoting the operation of cellular networks, the security and network challenges of the IoT, and pushing the future of the Internet to the edge. Because the IoT can make anything in anyplace be connected together at any time, it can provide ubiquitous services. With the establishment and use of 5G wireless networks, the cellular IoT (CIoT) will be developed and applied. In order to provide more reliable CIoT applications, a reliable network topology is very important. Reaching a consensus is one of the most important issues in providing a highly reliable CIoT design. Therefore, it is necessary to reach a consensus so that even if some components in the system is abnormal, the application in the system can still execute correctly in CIoT. In this study, a protocol of consensus is discussed in CIoT with dual abnormality mode that combines dormant abnormality and malicious abnormality. The protocol proposed in this research not only allows all normal components in CIoT to reach a consensus with the minimum times of data exchange, but also allows the maximum number of dormant and malicious abnormal components in CIoT. In the meantime, the protocol can make all normal components in CIoT satisfy the constraints of reaching consensus: Termination, Agreement, and Integrity.}, } @article {pmid33466730, year = {2021}, author = {Farid, F and Elkhodr, M and Sabrina, F and Ahamed, F and Gide, E}, title = {A Smart Biometric Identity Management Framework for Personalised IoT and Cloud Computing-Based Healthcare Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33466730}, issn = {1424-8220}, mesh = {Biometry ; *Cloud Computing ; Computer Security ; Delivery of Health Care ; Humans ; *Internet of Things ; }, abstract = {This paper proposes a novel identity management framework for Internet of Things (IoT) and cloud computing-based personalized healthcare systems. The proposed framework uses multimodal encrypted biometric traits to perform authentication. It employs a combination of centralized and federated identity access techniques along with biometric based continuous authentication. The framework uses a fusion of electrocardiogram (ECG) and photoplethysmogram (PPG) signals when performing authentication. In addition to relying on the unique identification characteristics of the users' biometric traits, the security of the framework is empowered by the use of Homomorphic Encryption (HE). The use of HE allows patients' data to stay encrypted when being processed or analyzed in the cloud. Thus, providing not only a fast and reliable authentication mechanism, but also closing the door to many traditional security attacks. The framework's performance was evaluated and validated using a machine learning (ML) model that tested the framework using a dataset of 25 users in seating positions. Compared to using just ECG or PPG signals, the results of using the proposed fused-based biometric framework showed that it was successful in identifying and authenticating all 25 users with 100% accuracy. Hence, offering some significant improvements to the overall security and privacy of personalized healthcare systems.}, } @article {pmid33466338, year = {2021}, author = {Raghavan, A and Demircioglu, MA and Taeihagh, A}, title = {Public Health Innovation through Cloud Adoption: A Comparative Analysis of Drivers and Barriers in Japan, South Korea, and Singapore.}, journal = {International journal of environmental research and public health}, volume = {18}, number = {1}, pages = {}, pmid = {33466338}, issn = {1660-4601}, mesh = {*Cloud Computing ; Delivery of Health Care/*methods ; *Government ; Humans ; Japan ; *Public Health ; Republic of Korea ; Singapore ; }, abstract = {Governments are increasingly using cloud computing to reduce cost, increase access, improve quality, and create innovations in healthcare. Existing literature is primarily based on successful examples from developed western countries, and there is a lack of similar evidence from Asia. With a population close to 4.5 billion people, Asia faces healthcare challenges that pose an immense burden on economic growth and policymaking. Cloud computing in healthcare can potentially help increase the quality of healthcare delivery and reduce the economic burden, enabling governments to address healthcare challenges effectively and within a short timeframe. Advanced Asian countries such as Japan, South Korea, and Singapore provide successful examples of how cloud computing can be used to develop nationwide databases of electronic health records; real-time health monitoring for the elderly population; genetic database to support advanced research and cancer treatment; telemedicine; and health cities that drive the economy through medical industry, tourism, and research. This article examines these countries and identifies the drivers and barriers of cloud adoption in healthcare and makes policy recommendations to enable successful public health innovations through cloud adoption.}, } @article {pmid33465776, year = {2021}, author = {Anselmo, C and Attili, M and Horton, R and Kappe, B and Schulman, J and Baird, P}, title = {Hey You, Get On the Cloud: Safe and Compliant Use of Cloud Computing with Medical Devices.}, journal = {Biomedical instrumentation & technology}, volume = {55}, number = {1}, pages = {1-15}, pmid = {33465776}, issn = {0899-8205}, mesh = {*Cloud Computing ; *Internet ; }, } @article {pmid33456318, year = {2021}, author = {Patel, YS and Malwi, Z and Nighojkar, A and Misra, R}, title = {Truthful online double auction based dynamic resource provisioning for multi-objective trade-offs in IaaS clouds.}, journal = {Cluster computing}, volume = {24}, number = {3}, pages = {1855-1879}, pmid = {33456318}, issn = {1386-7857}, abstract = {Auction designs have recently been adopted for static and dynamic resource provisioning in IaaS clouds, such as Microsoft Azure and Amazon EC2. However, the existing mechanisms are mostly restricted to simple auctions, single-objective, offline setting, one-sided interactions either among cloud users or cloud service providers (CSPs), and possible misreports of cloud user's private information. This paper proposes a more realistic scenario of online auctioning for IaaS clouds, with the unique characteristics of elasticity for time-varying arrival of cloud user requests under the time-based server maintenance in cloud data centers. We propose an online truthful double auction technique for balancing the multi-objective trade-offs between energy, revenue, and performance in IaaS clouds, consisting of a weighted bipartite matching based winning-bid determination algorithm for resource allocation and a Vickrey-Clarke-Groves (VCG) driven algorithm for payment calculation of winning bids. Through rigorous theoretical analysis and extensive trace-driven simulation studies exploiting Google cluster workload traces, we demonstrate that our mechanism significantly improves the performance while promising truthfulness, heterogeneity, economic efficiency, individual rationality, and has a polynomial-time computational complexity.}, } @article {pmid33451105, year = {2021}, author = {Lee, YL and Arizky, SN and Chen, YR and Liang, D and Wang, WJ}, title = {High-Availability Computing Platform with Sensor Fault Resilience.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33451105}, issn = {1424-8220}, support = {Next Generation IOT key Technologies and Application Systems In-depth Development Project (2/4)//Institute for Information Industry, Taiwan/ ; 108-2221-E- 008 -032 -MY3//Ministry of Science and Technology, Taiwan/ ; }, abstract = {Modern computing platforms usually use multiple sensors to report system information. In order to achieve high availability (HA) for the platform, the sensors can be used to efficiently detect system faults that make a cloud service not live. However, a sensor may fail and disable HA protection. In this case, human intervention is needed, either to change the original fault model or to fix the sensor fault. Therefore, this study proposes an HA mechanism that can continuously provide HA to a cloud system based on dynamic fault model reconstruction. We have implemented the proposed HA mechanism on a four-layer OpenStack cloud system and tested the performance of the proposed mechanism for all possible sets of sensor faults. For each fault model, we inject possible system faults and measure the average fault detection time. The experimental result shows that the proposed mechanism can accurately detect and recover an injected system fault with disabled sensors. In addition, the system fault detection time increases as the number of sensor faults increases, until the HA mechanism is degraded to a one-system-fault model, which is the worst case as the system layer heartbeating.}, } @article {pmid33451012, year = {2021}, author = {Lozano Domínguez, JM and Mateo Sanguino, TJ}, title = {Walking Secure: Safe Routing Planning Algorithm and Pedestrian's Crossing Intention Detector Based on Fuzzy Logic App.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33451012}, issn = {1424-8220}, support = {"Industrialization of a Road Signaling Autonomous System for Smart Pedestrian Crosswalks" (ref. 5947)//Ministry of Economy and Knowledge of the Andalusian Government, Spain/ ; "Improvement of Road Safety Through an Intelligent Service Platform for Pedestrians, Sensors and Environment" (ref. UHU-1260596)//Ministry of Economy and Knowledge of the Andalusian Government, Spain/ ; }, abstract = {Improving road safety through artificial intelligence is now crucial to achieving more secure smart cities. With this objective, a mobile app based on the integration of the smartphone sensors and a fuzzy logic strategy to determine the pedestrian's crossing intention around crosswalks is presented. The app developed also allows the calculation, tracing and guidance of safe routes thanks to an optimization algorithm that includes pedestrian areas on the paths generated over the whole city through a cloud database (i.e., zebra crossings, pedestrian streets and walkways). The experimentation carried out consisted in testing the fuzzy logic strategy with a total of 31 volunteers crossing and walking around a crosswalk. For that, the fuzzy logic approach was subjected to a total of 3120 samples generated by the volunteers. It has been proven that a smartphone can be successfully used as a crossing intention detector system with an accuracy of 98.63%, obtaining a true positive rate of 98.27% and a specificity of 99.39% according to a receiver operating characteristic analysis. Finally, a total of 30 routes were calculated by the proposed algorithm and compared with Google Maps considering the values of time, distance and safety along the routes. As a result, the routes generated by the proposed algorithm were safer than the routes obtained with Google Maps, achieving an increase in the use of safe pedestrian areas of at least 183%.}, } @article {pmid33433860, year = {2021}, author = {Singh, K and Malhotra, J}, title = {Cloud based ensemble machine learning approach for smart detection of epileptic seizures using higher order spectral analysis.}, journal = {Physical and engineering sciences in medicine}, volume = {44}, number = {1}, pages = {313-324}, pmid = {33433860}, issn = {2662-4737}, mesh = {*Cloud Computing ; Electroencephalography ; *Epilepsy/diagnosis ; Humans ; Machine Learning ; Seizures ; }, abstract = {The present paper proposes a smart framework for detection of epileptic seizures using the concepts of IoT technologies, cloud computing and machine learning. This framework processes the acquired scalp EEG signals by Fast Walsh Hadamard transform. Then, the transformed frequency-domain signals are examined using higher-order spectral analysis to extract amplitude and entropy-based statistical features. The extracted features have been selected by means of correlation-based feature selection algorithm to achieve more real-time classification with reduced complexity and delay. Finally, the samples containing selected features have been fed to ensemble machine learning techniques for classification into several classes of EEG states, viz. normal, interictal and ictal. The employed techniques include Dagging, Bagging, Stacking, MultiBoost AB and AdaBoost M1 algorithms in integration with C4.5 decision tree algorithm as the base classifier. The results of the ensemble techniques are also compared with standalone C4.5 decision tree and SVM algorithms. The performance analysis through simulation results reveals that the ensemble of AdaBoost M1 and C4.5 decision tree algorithms with higher-order spectral features is an adequate technique for automated detection of epileptic seizures in real-time. This technique achieves 100% classification accuracy, sensitivity and specificity values with optimally small classification time.}, } @article {pmid33430386, year = {2021}, author = {Li, D and Xu, S and Li, P}, title = {Deep Reinforcement Learning-Empowered Resource Allocation for Mobile Edge Computing in Cellular V2X Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {2}, pages = {}, pmid = {33430386}, issn = {1424-8220}, support = {61571038//National Natural Science Foundation of China/ ; 61931001//National Natural Science Foundation of China/ ; 2020D06//National Mobile Communications Research Laboratory, Southeast University/ ; }, abstract = {With the rapid development of vehicular networks, vehicle-to-everything (V2X) communications have huge number of tasks to be calculated, which brings challenges to the scarce network resources. Cloud servers can alleviate the terrible situation regarding the lack of computing abilities of vehicular user equipment (VUE), but the limited resources, the dynamic environment of vehicles, and the long distances between the cloud servers and VUE induce some potential issues, such as extra communication delay and energy consumption. Fortunately, mobile edge computing (MEC), a promising computing paradigm, can ameliorate the above problems by enhancing the computing abilities of VUE through allocating the computational resources to VUE. In this paper, we propose a joint optimization algorithm based on a deep reinforcement learning algorithm named the double deep Q network (double DQN) to minimize the cost constituted of energy consumption, the latency of computation, and communication with the proper policy. The proposed algorithm is more suitable for dynamic scenarios and requires low-latency vehicular scenarios in the real world. Compared with other reinforcement learning algorithms, the algorithm we proposed algorithm improve the performance in terms of convergence, defined cost, and speed by around 30%, 15%, and 17%.}, } @article {pmid33426574, year = {2021}, author = {Santos, JA and Inácio, PRM and Silva, BMC}, title = {Towards the Use of Blockchain in Mobile Health Services and Applications.}, journal = {Journal of medical systems}, volume = {45}, number = {2}, pages = {17}, pmid = {33426574}, issn = {1573-689X}, mesh = {*Blockchain ; Computer Security ; Electronic Health Records ; Health Services ; Humans ; *Telemedicine ; }, abstract = {With the advent of cryptocurrencies and blockchain, the growth and adaptation of cryptographic features and capabilities were quickly extended to new and underexplored areas, such as healthcare. Currently, blockchain is being implemented mainly as a mechanism to secure Electronic Health Records (EHRs). However, new studies have shown that this technology can be a powerful tool in empowering patients to control their own health data, as well for enabling a fool-proof health data history and establishing medical responsibility. Additionally, with the proliferation of mobile health (m-Health) sustained on service-oriented architectures, the adaptation of blockchain mechanisms into m-Health applications creates the possibility for a more decentralized and available healthcare service. Hence, this paper presents a review of the current security best practices for m-Health and the most used and widely known implementations of the blockchain protocol, including blockchain technologies in m-Health. The main goal of this comprehensive review is to further discuss and elaborate on identified open-issues and potential use cases regarding the uses of blockchain in this area. Finally, the paper presents the major findings, challenges and advantages on future blockchain implementations for m-Health services and applications.}, } @article {pmid33422469, year = {2021}, author = {Mennen, AC and Turk-Browne, NB and Wallace, G and Seok, D and Jaganjac, A and Stock, J and deBettencourt, MT and Cohen, JD and Norman, KA and Sheline, YI}, title = {Cloud-Based Functional Magnetic Resonance Imaging Neurofeedback to Reduce the Negative Attentional Bias in Depression: A Proof-of-Concept Study.}, journal = {Biological psychiatry. Cognitive neuroscience and neuroimaging}, volume = {6}, number = {4}, pages = {490-497}, pmid = {33422469}, issn = {2451-9030}, support = {S10 OD023495/OD/NIH HHS/United States ; T32 MH065214/MH/NIMH NIH HHS/United States ; UL1 TR001863/TR/NCATS NIH HHS/United States ; }, mesh = {*Attentional Bias ; Cloud Computing ; Depression ; *Depressive Disorder, Major/therapy ; Humans ; Magnetic Resonance Imaging ; *Neurofeedback ; }, abstract = {Individuals with depression show an attentional bias toward negatively valenced stimuli and thoughts. In this proof-of-concept study, we present a novel closed-loop neurofeedback procedure intended to remediate this bias. Internal attentional states were detected in real time by applying machine learning techniques to functional magnetic resonance imaging data on a cloud server; these attentional states were externalized using a visual stimulus that the participant could learn to control. We trained 15 participants with major depressive disorder and 12 healthy control participants over 3 functional magnetic resonance imaging sessions. Exploratory analysis showed that participants with major depressive disorder were initially more likely than healthy control participants to get stuck in negative attentional states, but this diminished with neurofeedback training relative to controls. Depression severity also decreased from pre- to posttraining. These results demonstrate that our method is sensitive to the negative attentional bias in major depressive disorder and showcase the potential of this novel technique as a treatment that can be evaluated in future clinical trials.}, } @article {pmid33417125, year = {2021}, author = {Nowakowski, K and Carvalho, P and Six, JB and Maillet, Y and Nguyen, AT and Seghiri, I and M'Pemba, L and Marcille, T and Ngo, ST and Dao, TT}, title = {Human locomotion with reinforcement learning using bioinspired reward reshaping strategies.}, journal = {Medical & biological engineering & computing}, volume = {59}, number = {1}, pages = {243-256}, pmid = {33417125}, issn = {1741-0444}, mesh = {Adult ; *Artificial Intelligence ; Humans ; Learning ; Locomotion ; *Reinforcement, Psychology ; Reward ; }, abstract = {Recent learning strategies such as reinforcement learning (RL) have favored the transition from applied artificial intelligence to general artificial intelligence. One of the current challenges of RL in healthcare relates to the development of a controller to teach a musculoskeletal model to perform dynamic movements. Several solutions have been proposed. However, there is still a lack of investigations exploring the muscle control problem from a biomechanical point of view. Moreover, no studies using biological knowledge to develop plausible motor control models for pathophysiological conditions make use of reward reshaping. Consequently, the objective of the present work was to design and evaluate specific bioinspired reward function strategies for human locomotion learning within an RL framework. The deep deterministic policy gradient (DDPG) method for a single-agent RL problem was applied. A 3D musculoskeletal model (8 DoF and 22 muscles) of a healthy adult was used. A virtual interactive environment was developed and simulated using opensim-rl library. Three reward functions were defined for walking, forward, and side falls. The training process was performed with Google Cloud Compute Engine. The obtained outcomes were compared to the NIPS 2017 challenge outcomes, experimental observations, and literature data. Regarding learning to walk, simulated musculoskeletal models were able to walk from 18 to 20.5 m for the best solutions. A compensation strategy of muscle activations was revealed. Soleus, tibia anterior, and vastii muscles are main actors of the simple forward fall. A higher intensity of muscle activations was also noted after the fall. All kinematics and muscle patterns were consistent with experimental observations and literature data. Regarding the side fall, an intensive level of muscle activation on the expected fall side to unbalance the body was noted. The obtained outcomes suggest that computational and human resources as well as biomechanical knowledge are needed together to develop and evaluate an efficient and robust RL solution. As perspectives, current solutions will be extended to a larger parameter space in 3D. Furthermore, a stochastic reinforcement learning model will be investigated in the future in scope with the uncertainties of the musculoskeletal model and associated environment to provide a general artificial intelligence solution for human locomotion learning. Graphical abstract.}, } @article {pmid33414916, year = {2021}, author = {Chen, Y and Yan, W and Xie, Z and Guo, W and Lu, D and Lv, Z and Zhang, X}, title = {Comparative analysis of target gene exon sequencing by cognitive technology using a next generation sequencing platform in patients with lung cancer.}, journal = {Molecular and clinical oncology}, volume = {14}, number = {2}, pages = {36}, pmid = {33414916}, issn = {2049-9450}, abstract = {Next generation sequencing (NGS) technology is an increasingly important clinical tool for therapeutic decision-making. However, interpretation of NGS data presents challenges at the point of care, due to limitations in understanding the clinical importance of gene variants and efficiently translating results into actionable information for the clinician. The present study compared two approaches for annotating and reporting actionable genes and gene mutations from tumor samples: The traditional approach of manual curation, annotation and reporting using an experienced molecular tumor bioinformationist; and a cloud-based cognitive technology, with the goal to detect gene mutations of potential significance in Chinese patients with lung cancer. Data from 285 gene-targeted exon sequencing previously conducted on 115 patient tissue samples between 2014 and 2016 and subsequently manually annotated and evaluated by the Guangdong Lung Cancer Institute (GLCI) research team were analyzed by the Watson for Genomics (WfG) cognitive genomics technology. A comparative analysis of the annotation results of the two methods was conducted to identify quantitative and qualitative differences in the mutations generated. The complete congruence rate of annotation results between WfG analysis and the GLCI bioinformatician was 43.48%. In 65 (56.52%) samples, WfG analysis identified and interpreted, on average, 1.54 more mutation sites in each sample than the manual GLCI review. These mutation sites were located on 27 genes, including EP300, ARID1A, STK11 and DNMT3A. Mutations in the EP300 gene were most prevalent, and present in 30.77% samples. The Tumor Mutation Burden (TMB) interpreted by WfG analysis (1.82) was significantly higher than the TMB (0.73) interpreted by GLCI review. Compared with manual curation by a bioinformatician, WfG analysis provided comprehensive insights and additional genetic alterations to inform clinical therapeutic strategies for patients with lung cancer. These findings suggest the valuable role of cognitive computing to increase efficiency in the comprehensive detection and interpretation of genetic alterations which may inform opportunities for targeted cancer therapies.}, } @article {pmid33411624, year = {2021}, author = {Rajendran, S and Obeid, JS and Binol, H and D Agostino, R and Foley, K and Zhang, W and Austin, P and Brakefield, J and Gurcan, MN and Topaloglu, U}, title = {Cloud-Based Federated Learning Implementation Across Medical Centers.}, journal = {JCO clinical cancer informatics}, volume = {5}, number = {}, pages = {1-11}, pmid = {33411624}, issn = {2473-4276}, support = {P30 CA012197/CA/NCI NIH HHS/United States ; P30 DK123704/DK/NIDDK NIH HHS/United States ; UL1 TR001420/TR/NCATS NIH HHS/United States ; UL1 TR001450/TR/NCATS NIH HHS/United States ; }, mesh = {*Cloud Computing ; Humans ; *Information Dissemination ; Machine Learning ; Neural Networks, Computer ; *Privacy ; }, abstract = {PURPOSE: Building well-performing machine learning (ML) models in health care has always been exigent because of the data-sharing concerns, yet ML approaches often require larger training samples than is afforded by one institution. This paper explores several federated learning implementations by applying them in both a simulated environment and an actual implementation using electronic health record data from two academic medical centers on a Microsoft Azure Cloud Databricks platform.

MATERIALS AND METHODS: Using two separate cloud tenants, ML models were created, trained, and exchanged from one institution to another via a GitHub repository. Federated learning processes were applied to both artificial neural networks (ANNs) and logistic regression (LR) models on the horizontal data sets that are varying in count and availability. Incremental and cyclic federated learning models have been tested in simulation and real environments.

RESULTS: The cyclically trained ANN showed a 3% increase in performance, a significant improvement across most attempts (P < .05). Single weight neural network models showed improvement in some cases. However, LR models did not show much improvement after federated learning processes. The specific process that improved the performance differed based on the ML model and how federated learning was implemented. Moreover, we have confirmed that the order of the institutions during the training did influence the overall performance increase.

CONCLUSION: Unlike previous studies, our work has shown the implementation and effectiveness of federated learning processes beyond simulation. Additionally, we have identified different federated learning models that have achieved statistically significant performances. More work is needed to achieve effective federated learning processes in biomedicine, while preserving the security and privacy of the data.}, } @article {pmid33411623, year = {2021}, author = {Jones, DE and Alimi, TO and Pordell, P and Tangka, FK and Blumenthal, W and Jones, SF and Rogers, JD and Benard, VB and Richardson, LC}, title = {Pursuing Data Modernization in Cancer Surveillance by Developing a Cloud-Based Computing Platform: Real-Time Cancer Case Collection.}, journal = {JCO clinical cancer informatics}, volume = {5}, number = {}, pages = {24-29}, pmid = {33411623}, issn = {2473-4276}, mesh = {Automation ; Centers for Disease Control and Prevention, U.S. ; *Cloud Computing ; Computer Systems ; Data Collection/*methods ; Data Management/*methods ; Epidemiological Monitoring ; Health Policy ; Humans ; Neoplasms/*epidemiology ; Registries ; United States ; }, abstract = {Cancer surveillance is a field focused on collection of data to evaluate the burden of cancer and apply public health strategies to prevent and control cancer in the community. A key challenge facing the cancer surveillance community is the number of manual tasks required to collect cancer surveillance data, thereby resulting in possible delays in analysis and use of the information. To modernize and automate cancer data collection and reporting, the Centers for Disease Control and Prevention is planning, developing, and piloting a cancer surveillance cloud-based computing platform (CS-CBCP) with standardized electronic reporting from laboratories and health-care providers. With this system, automation of the cancer case collection process and access to real-time cancer case data can be achieved, which could not be done before. Furthermore, the COVID-19 pandemic has illustrated the importance of continuity of operations plans, and the CS-CBCP has the potential to provide such a platform suitable for remote operations of central cancer registries.}, } @article {pmid33409205, year = {2020}, author = {Chattopadhyay, T and Mondal, H and Mondal, S and Dutta, R and Saha, K and Das, D}, title = {Prescription digitization, online preservation, and retrieval on a smartphone.}, journal = {Journal of family medicine and primary care}, volume = {9}, number = {10}, pages = {5295-5302}, pmid = {33409205}, issn = {2249-4863}, abstract = {BACKGROUND: Medical records are important documents that should be stored for at least 3 years after the commencement of the treatment of an adult patient in India. In a health care facility, patients' data is saved in an online or offline retrieval system. However, in the case of the primary care physician, the data is not commonly kept in an easily retrievable system.

AIM: To test the feasibility of using a set of free web-based services in digitization, preservation, and retrieval of prescription on a smartphone by primary care physicians.

METHODS: This study was conducted with 12 primary care physicians. They were provided hands-on guides on creating an online form for uploading a prescription and using an application for retrieval of the prescription on a smartphone. Their feedback on the training material was collected by a telephonic survey, which had a 10-point Likert-type response option. Then, an in-depth interview was conducted to ascertain their perception on the tutorial and the process of digitization and retrieval system.

RESULTS: All of the participants were able to create an online form on their smartphone. They uploaded their prescription and associated data and were able to retrieve it. The physicians opined positively on the "cost of the system," "portability" on a smartphone and ease of the "tutorial". They opined negatively on the "limited storage," chances of "loss of data," and "time constraints" for entry of the patients' data.

CONCLUSION: Free web-based and smartphone applications can be used by a primary care physician for personal storage and retrieval of prescriptions. The simple tutorial presented in this article would help many primary care physicians in resource-limited settings.}, } @article {pmid33408373, year = {2021}, author = {Feldmann, J and Youngblood, N and Karpov, M and Gehring, H and Li, X and Stappers, M and Le Gallo, M and Fu, X and Lukashchuk, A and Raja, AS and Liu, J and Wright, CD and Sebastian, A and Kippenberg, TJ and Pernice, WHP and Bhaskaran, H}, title = {Parallel convolutional processing using an integrated photonic tensor core.}, journal = {Nature}, volume = {589}, number = {7840}, pages = {52-58}, pmid = {33408373}, issn = {1476-4687}, support = {/ERC_/European Research Council/International ; }, abstract = {With the proliferation of ultrahigh-speed mobile networks and internet-connected devices, along with the rise of artificial intelligence (AI)[1], the world is generating exponentially increasing amounts of data that need to be processed in a fast and efficient way. Highly parallelized, fast and scalable hardware is therefore becoming progressively more important[2]. Here we demonstrate a computationally specific integrated photonic hardware accelerator (tensor core) that is capable of operating at speeds of trillions of multiply-accumulate operations per second (10[12] MAC operations per second or tera-MACs per second). The tensor core can be considered as the optical analogue of an application-specific integrated circuit (ASIC). It achieves parallelized photonic in-memory computing using phase-change-material memory arrays and photonic chip-based optical frequency combs (soliton microcombs[3]). The computation is reduced to measuring the optical transmission of reconfigurable and non-resonant passive components and can operate at a bandwidth exceeding 14 gigahertz, limited only by the speed of the modulators and photodetectors. Given recent advances in hybrid integration of soliton microcombs at microwave line rates[3-5], ultralow-loss silicon nitride waveguides[6,7], and high-speed on-chip detectors and modulators, our approach provides a path towards full complementary metal-oxide-semiconductor (CMOS) wafer-scale integration of the photonic tensor core. Although we focus on convolutional processing, more generally our results indicate the potential of integrated photonics for parallel, fast, and efficient computational hardware in data-heavy AI applications such as autonomous driving, live video processing, and next-generation cloud computing services.}, } @article {pmid33407445, year = {2021}, author = {Bertuccio, S and Tardiolo, G and Giambò, FM and Giuffrè, G and Muratore, R and Settimo, C and Raffa, A and Rigano, S and Bramanti, A and Muscarà, N and De Cola, MC}, title = {ReportFlow: an application for EEG visualization and reporting using cloud platform.}, journal = {BMC medical informatics and decision making}, volume = {21}, number = {1}, pages = {7}, pmid = {33407445}, issn = {1472-6947}, mesh = {*Cloud Computing ; Computer Security ; Electroencephalography ; *Electronic Health Records ; Humans ; Information Dissemination ; }, abstract = {BACKGROUND: The cloud is a promising resource for data sharing and computing. It can optimize several legacy processes involving different units of a company or more companies. Recently, cloud technology applications are spreading out in the healthcare setting as well, allowing to cut down costs for physical infrastructures and staff movements. In a public environment the main challenge is to guarantee the patients' data protection. We describe a cloud-based system, named ReportFlow, developed with the aim to improve the process of reporting and delivering electroencephalograms.

METHODS: We illustrate the functioning of this application through a use-case scenario occurring in an Italian hospital, and describe the corresponding key encryption and key management used for data security guarantee. We used the X[2] test or the unpaired Student t test to perform pre-post comparisons of some indexes, in order to evaluate significant changes after the application of ReportFlow.

RESULTS: The results obtained through the use of ReportFlow show a reduction of the time for exam reporting (t = 19.94; p < 0.001) and for its delivering (t = 14.95; p < 0.001), as well as an increase of the number of neurophysiologic examinations performed (about 20%), guaranteeing data integrity and security. Moreover, 68% of exam reports were delivered completely digitally.

CONCLUSIONS: The application resulted to be an optimal solution to optimize the legacy process adopted in this scenario. The comparative pre-post analysis showed promising preliminary results of performance. Future directions will be the creation and release of certificates automatically.}, } @article {pmid33406662, year = {2021}, author = {Li, J and Qiao, Z and Zhang, K and Cui, C}, title = {A Lattice-Based Homomorphic Proxy Re-Encryption Scheme with Strong Anti-Collusion for Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33406662}, issn = {1424-8220}, support = {LH2020F044//Heilongjiang Provincial Natural Science Foundation of China/ ; }, abstract = {The homomorphic proxy re-encryption scheme combines the characteristics of a homomorphic encryption scheme and proxy re-encryption scheme. The proxy can not only convert a ciphertext of the delegator into a ciphertext of the delegatee, but also can homomorphically calculate the original ciphertext and re-encryption ciphertext belonging to the same user, so it is especially suitable for cloud computing. Yin et al. put forward the concept of a strong collusion attack on a proxy re-encryption scheme, and carried out a strong collusion attack on the scheme through an example. The existing homomorphic proxy re-encryption schemes use key switching algorithms to generate re-encryption keys, so it can not resist strong collusion attack. In this paper, we construct the first lattice-based homomorphic proxy re-encryption scheme with strong anti-collusion (HPRE-SAC). Firstly, algorithm TrapGen is used to generate an encryption key and trapdoor, then trapdoor sampling is used to generate a decryption key and re-encryption key, respectively. Finally, in order to ensure the homomorphism of ciphertext, a key switching algorithm is only used to generate the evaluation key. Compared with the existing homomorphic proxy re-encryption schemes, our HPRE-SAC scheme not only can resist strong collusion attacks, but also has smaller parameters.}, } @article {pmid33404529, year = {2021}, author = {Coelho, AA}, title = {Ab initio structure solution of proteins at atomic resolution using charge-flipping techniques and cloud computing.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {77}, number = {Pt 1}, pages = {98-107}, doi = {10.1107/S2059798320015090}, pmid = {33404529}, issn = {2059-7983}, mesh = {*Cloud Computing ; Internet ; Protein Conformation ; Proteins/*chemistry ; *Software ; }, abstract = {Large protein structures at atomic resolution can be solved in minutes using charge-flipping techniques operating on hundreds of virtual machines (computers) on the Amazon Web Services cloud-computing platform driven by the computer programs TOPAS or TOPAS-Academic at a small financial cost. The speed of operation has allowed charge-flipping techniques to be investigated and modified, leading to two strategies that can solve a large range of difficult protein structures at atomic resolution. Techniques include the use of space-group symmetry restraints on the electron density as well as increasing the intensity of a randomly chosen high-intensity electron-density peak. It is also shown that the use of symmetry restraints increases the chance of finding a solution for low-resolution data. Finally, a flipping strategy that negates `uranium atom solutions' has been developed for structures that exhibit such solutions during charge flipping.}, } @article {pmid33401409, year = {2021}, author = {Tian, X and Zhu, J and Xu, T and Li, Y}, title = {Mobility-Included DNN Partition Offloading from Mobile Devices to Edge Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33401409}, issn = {1424-8220}, abstract = {The latest results in Deep Neural Networks (DNNs) have greatly improved the accuracy and performance of a variety of intelligent applications. However, running such computation-intensive DNN-based applications on resource-constrained mobile devices definitely leads to long latency and huge energy consumption. The traditional way is performing DNNs in the central cloud, but it requires significant amounts of data to be transferred to the cloud over the wireless network and also results in long latency. To solve this problem, offloading partial DNN computation to edge clouds has been proposed, to realize the collaborative execution between mobile devices and edge clouds. In addition, the mobility of mobile devices is easily to cause the computation offloading failure. In this paper, we develop a mobility-included DNN partition offloading algorithm (MDPO) to adapt to user's mobility. The objective of MDPO is minimizing the total latency of completing a DNN job when the mobile user is moving. The MDPO algorithm is suitable for both DNNs with chain topology and graphic topology. We evaluate the performance of our proposed MDPO compared to local-only execution and edge-only execution, experiments show that MDPO significantly reduces the total latency and improves the performance of DNN, and MDPO can adjust well to different network conditions.}, } @article {pmid33399819, year = {2021}, author = {Yun, T and Li, H and Chang, PC and Lin, MF and Carroll, A and McLean, CY}, title = {Accurate, scalable cohort variant calls using DeepVariant and GLnexus.}, journal = {Bioinformatics (Oxford, England)}, volume = {36}, number = {24}, pages = {5582-5589}, pmid = {33399819}, issn = {1367-4811}, support = {U01 HG007301/HG/NHGRI NIH HHS/United States ; U01 HG007417/HG/NHGRI NIH HHS/United States ; UM1 HG008901/HG/NHGRI NIH HHS/United States ; 3UM1HG008901-03S1/HG/NHGRI NIH HHS/United States ; //Google LLC/ ; }, abstract = {MOTIVATION: Population-scale sequenced cohorts are foundational resources for genetic analyses, but processing raw reads into analysis-ready cohort-level variants remains challenging.

RESULTS: We introduce an open-source cohort-calling method that uses the highly accurate caller DeepVariant and scalable merging tool GLnexus. Using callset quality metrics based on variant recall and precision in benchmark samples and Mendelian consistency in father-mother-child trios, we optimize the method across a range of cohort sizes, sequencing methods and sequencing depths. The resulting callsets show consistent quality improvements over those generated using existing best practices with reduced cost. We further evaluate our pipeline in the deeply sequenced 1000 Genomes Project (1KGP) samples and show superior callset quality metrics and imputation reference panel performance compared to an independently generated GATK Best Practices pipeline.

We publicly release the 1KGP individual-level variant calls and cohort callset (https://console.cloud.google.com/storage/browser/brain-genomics-public/research/cohort/1KGP) to foster additional development and evaluation of cohort merging methods as well as broad studies of genetic variation. Both DeepVariant (https://github.com/google/deepvariant) and GLnexus (https://github.com/dnanexus-rnd/GLnexus) are open-source, and the optimized GLnexus setup discovered in this study is also integrated into GLnexus public releases v1.2.2 and later.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid33399126, year = {2021}, author = {Yang, L and Culbertson, EA and Thomas, NK and Vuong, HT and Kjær, ETS and Jensen, KMØ and Tucker, MG and Billinge, SJL}, title = {A cloud platform for atomic pair distribution function analysis: PDFitc.}, journal = {Acta crystallographica. Section A, Foundations and advances}, volume = {77}, number = {Pt 1}, pages = {2-6}, pmid = {33399126}, issn = {2053-2733}, support = {DMREF-1534910//National Science Foundation, Division of Materials Research/ ; DE-AC05-00OR22725//U.S. Department of Energy, Neutron Science Directorate/ ; 804066//H2020 European Research Council/ ; }, abstract = {A cloud web platform for analysis and interpretation of atomic pair distribution function (PDF) data (PDFitc) is described. The platform is able to host applications for PDF analysis to help researchers study the local and nanoscale structure of nanostructured materials. The applications are designed to be powerful and easy to use and can, and will, be extended over time through community adoption and development. The currently available PDF analysis applications, structureMining, spacegroupMining and similarityMapping, are described. In the first and second the user uploads a single PDF and the application returns a list of best-fit candidate structures, and the most likely space group of the underlying structure, respectively. In the third, the user can upload a set of measured or calculated PDFs and the application returns a matrix of Pearson correlations, allowing assessment of the similarity between different data sets. structureMining is presented here as an example to show the easy-to-use workflow on PDFitc. In the future, as well as using the PDFitc applications for data analysis, it is hoped that the community will contribute their own codes and software to the platform.}, } @article {pmid33395689, year = {2021}, author = {Lima, MS}, title = {Information theory inspired optimization algorithm for efficient service orchestration in distributed systems.}, journal = {PloS one}, volume = {16}, number = {1}, pages = {e0242285}, pmid = {33395689}, issn = {1932-6203}, mesh = {*Algorithms ; *Computer Communication Networks ; *Computer Simulation ; *Data Management ; Information Theory ; }, abstract = {Distributed Systems architectures are becoming the standard computational model for processing and transportation of information, especially for Cloud Computing environments. The increase in demand for application processing and data management from enterprise and end-user workloads continues to move from a single-node client-server architecture to a distributed multitier design where data processing and transmission are segregated. Software development must considerer the orchestration required to provision its core components in order to deploy the services efficiently in many independent, loosely coupled-physically and virtually interconnected-data centers spread geographically, across the globe. This network routing challenge can be modeled as a variation of the Travelling Salesman Problem (TSP). This paper proposes a new optimization algorithm for optimum route selection using Algorithmic Information Theory. The Kelly criterion for a Shannon-Bernoulli process is used to generate a reliable quantitative algorithm to find a near optimal solution tour. The algorithm is then verified by comparing the results with benchmark heuristic solutions in 3 test cases. A statistical analysis is designed to measure the significance of the results between the algorithms and the entropy function can be derived from the distribution. The tested results shown an improvement in the solution quality by producing routes with smaller length and time requirements. The quality of the results proves the flexibility of the proposed algorithm for problems with different complexities without relying in nature-inspired models such as Genetic Algorithms, Ant Colony, Cross Entropy, Neural Networks, 2opt and Simulated Annealing. The proposed algorithm can be used by applications to deploy services across large cluster of nodes by making better decision in the route design. The findings in this paper unifies critical areas in Computer Science, Mathematics and Statistics that many researchers have not explored and provided a new interpretation that advances the understanding of the role of entropy in decision problems encoded in Turing Machines.}, } @article {pmid33394397, year = {2021}, author = {Khan, R and Gilani, H}, title = {Global drought monitoring with big geospatial datasets using Google Earth Engine.}, journal = {Environmental science and pollution research international}, volume = {28}, number = {14}, pages = {17244-17264}, pmid = {33394397}, issn = {1614-7499}, mesh = {Australia ; Brazil ; *Droughts ; *Meteorology ; Thailand ; }, abstract = {Drought or dryness occurs due to the accumulative effect of certain climatological and hydrological variables over a certain period. Droughts are studied through numerically computed simple or compound indices. Vegetation condition index (VCI) is used for observing the change in vegetation that causes agricultural drought. Since the land surface temperature has minimum influence from cloud contamination and humidity in the air, so the temperature condition index (TCI) is used for studying the temperature change. Dryness or wetness of soil is a major indicator for agriculture and hydrological drought and for that purpose, the index, soil moisture condition index (SMCI), is computed. The deviation of precipitation from normal is a major cause for meteorological droughts and for that purpose, precipitation condition index (PCI) is computed. The years when the indices escalated the dryness situation to severe and extreme are pointed out in this research. Furthermore, an interactive dashboard is generated in the Google Earth Engine (GEE) for users to compute the said indices using country boundary, time period, and ecological mask of their choice: Agriculture Drought Monitoring. Apart from global results, three case studies of droughts (2002 in Australia, 2013 in Brazil, and 2019 in Thailand) computed via the dashboard are discussed in detail in this research.}, } @article {pmid33389466, year = {2021}, author = {Yadav, S and Luthra, S and Garg, D}, title = {Modelling Internet of things (IoT)-driven global sustainability in multi-tier agri-food supply chain under natural epidemic outbreaks.}, journal = {Environmental science and pollution research international}, volume = {28}, number = {13}, pages = {16633-16654}, pmid = {33389466}, issn = {1614-7499}, mesh = {*COVID-19 ; Disease Outbreaks ; *Epidemics ; Food Supply ; Humans ; *Internet of Things ; SARS-CoV-2 ; }, abstract = {Epidemic outbreak (COVID-19, SARS-CoV-2) is an exceptional scenario of agri-food supply chain (AFSC) risk at the globalised level which is characterised by logistics' network breakdown (ripple effects), demand mismatch (uncertainty), and sustainable issues. Thus, the aim of this research is the modelling of the sustainable based multi-tier system for AFSC, which is managed through the different emerging application of Internet of things (IoT) technology. Different IoT technologies, viz., Blockchain, robotics, Big data analysis, and cloud computing, have developed a competitive AFSC at the global level. Competitive AFSC needs cautious incorporation of multi-tiers suppliers, specifically during dealing with globalised sustainability issues. Firms have been advancing towards their multi suppliers for driving social, environments and economical practices. This paper also studies the interrelationship of 14 enablers and their cause and effect magnitude as contributing to IoT-based food secure model. The methodology used in the paper is interpretative structural modelling (ISM) for establishing interrelationship among the enablers and Fuzzy-Decision-Making Trial and Evaluation Laboratory (F-DEMATEL) to provide the magnitude of the cause-effect strength of the hierarchical framework. This paper also provides some theoretical contribution supported by information processing theory (IPT) and dynamic capability theory (DCT). This paper may guide the organisation's managers in their strategic planning based on enabler's classification into cause and effect groups. This paper may also encourage the mangers for implementing IoT technologies in AFSC.}, } @article {pmid33382884, year = {2020}, author = {Khomtchouk, BB and Nelson, CS and Vand, KA and Palmisano, S and Grossman, RL}, title = {HeartBioPortal2.0: new developments and updates for genetic ancestry and cardiometabolic quantitative traits in diverse human populations.}, journal = {Database : the journal of biological databases and curation}, volume = {2020}, number = {}, pages = {}, pmid = {33382884}, issn = {1758-0463}, support = {T32 AG047126/AG/NIA NIH HHS/United States ; }, mesh = {*Cardiovascular Diseases/genetics ; Female ; Genetic Predisposition to Disease ; *Genome-Wide Association Study ; Genomics ; Humans ; Male ; Phenotype ; }, abstract = {Cardiovascular disease (CVD) is the leading cause of death worldwide for all genders and across most racial and ethnic groups. However, different races and ethnicities exhibit different rates of CVD and its related cardiorenal and metabolic comorbidities, suggesting differences in genetic predisposition and risk of onset, as well as socioeconomic and lifestyle factors (diet, exercise, etc.) that act upon an individual's unique underlying genetic background. Here, we present HeartBioPortal2.0, a major update to HeartBioPortal, the world's largest CVD genetics data precision medicine platform for harmonized CVD-relevant genetic variants, which now enables search and analysis of human genetic information related to heart disease across ethnically diverse populations and cardiovascular/renal/metabolic quantitative traits pertinent to CVD pathophysiology. HeartBioPortal2.0 is structured as a cloud-based computing platform and knowledge portal that consolidates a multitude of CVD-relevant genomic data modalities into a single powerful query and browsing interface between data and user via a user-friendly web application publicly available to the scientific research community. Since its initial release, HeartBioPortal2.0 has added new cardiovascular/renal/metabolic disease-relevant gene expression data as well as genetic association data from numerous large-scale genome-wide association study consortiums such as CARDIoGRAMplusC4D, TOPMed, FinnGen, AFGen, MESA, MEGASTROKE, UK Biobank, CHARGE, Biobank Japan and MyCode, among other studies. In addition, HeartBioPortal2.0 now includes support for quantitative traits and ethnically diverse populations, allowing users to investigate the shared genetic architecture of any gene or its variants across the continuous cardiometabolic spectrum from health (e.g. blood pressure traits) to disease (e.g. hypertension), facilitating the understanding of CVD trait genetics that inform health-to-disease transitions and endophenotypes. Custom visualizations in the new and improved user interface, including performance enhancements and new security features such as user authentication, collectively re-imagine HeartBioPortal's user experience and provide a data commons that co-locates data, storage and computing infrastructure in the context of studying the genetic basis behind the leading cause of global mortality. Database URL: https://www.heartbioportal.com/.}, } @article {pmid33378901, year = {2020}, author = {Halty, A and Sánchez, R and Vázquez, V and Viana, V and Piñeyro, P and Rossit, DA}, title = {Scheduling in cloud manufacturing systems: Recent systematic literature review.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {17}, number = {6}, pages = {7378-7397}, doi = {10.3934/mbe.2020377}, pmid = {33378901}, issn = {1551-0018}, abstract = {Cloud Manufacturing (CMFg) is a novel production paradigm that benefits from Cloud Computing in order to develop manufacturing systems linked by the cloud. These systems, based on virtual platforms, allow direct linkage between customers and suppliers of manufacturing services, regardless of geographical distance. In this way, CMfg can expand both markets for producers, and suppliers for customers. However, these linkages imply a new challenge for production planning and decision-making process, especially in Scheduling. In this paper, a systematic literature review of articles addressing scheduling in Cloud Manufacturing environments is carried out. The review takes as its starting point a seminal study published in 2019, in which all problem features are described in detail. We pay special attention to the optimization methods and problem-solving strategies that have been suggested in CMfg scheduling. From the review carried out, we can assert that CMfg is a topic of growing interest within the scientific community. We also conclude that the methods based on bio-inspired metaheuristics are by far the most widely used (they represent more than 50% of the articles found). On the other hand, we suggest some lines for future research to further consolidate this field. In particular, we want to highlight the multi-objective approach, since due to the nature of the problem and the production paradigm, the optimization objectives involved are generally in conflict. In addition, decentralized approaches such as those based on game theory are promising lines for future research.}, } @article {pmid33374965, year = {2020}, author = {Sahlmann, K and Clemens, V and Nowak, M and Schnor, B}, title = {MUP: Simplifying Secure Over-The-Air Update with MQTT for Constrained IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374965}, issn = {1424-8220}, abstract = {Message Queuing Telemetry Transport (MQTT) is one of the dominating protocols for edge- and cloud-based Internet of Things (IoT) solutions. When a security vulnerability of an IoT device is known, it has to be fixed as soon as possible. This requires a firmware update procedure. In this paper, we propose a secure update protocol for MQTT-connected devices which ensures the freshness of the firmware, authenticates the new firmware and considers constrained devices. We show that the update protocol is easy to integrate in an MQTT-based IoT network using a semantic approach. The feasibility of our approach is demonstrated by a detailed performance analysis of our prototype implementation on a IoT device with 32 kB RAM. Thereby, we identify design issues in MQTT 5 which can help to improve the support of constrained devices.}, } @article {pmid33374599, year = {2020}, author = {Asif, R and Ghanem, K and Irvine, J}, title = {Proof-of-PUF Enabled Blockchain: Concurrent Data and Device Security for Internet-of-Energy.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374599}, issn = {1424-8220}, abstract = {A detailed review on the technological aspects of Blockchain and Physical Unclonable Functions (PUFs) is presented in this article. It stipulates an emerging concept of Blockchain that integrates hardware security primitives via PUFs to solve bandwidth, integration, scalability, latency, and energy requirements for the Internet-of-Energy (IoE) systems. This hybrid approach, hereinafter termed as PUFChain, provides device and data provenance which records data origins, history of data generation and processing, and clone-proof device identification and authentication, thus possible to track the sources and reasons of any cyber attack. In addition to this, we review the key areas of design, development, and implementation, which will give us the insight on seamless integration with legacy IoE systems, reliability, cyber resilience, and future research challenges.}, } @article {pmid33374340, year = {2020}, author = {Lin, HY and Hung, YM}, title = {An Improved Proxy Re-Encryption Scheme for IoT-Based Data Outsourcing Services in Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374340}, issn = {1424-8220}, abstract = {IoT-based data outsourcing services in clouds could be regarded as a new trend in recent years, as they could reduce the hardware and software cost for enterprises and obtain higher flexibility. To securely transfer an encrypted message in the cloud, a so-called proxy re-encryption scheme is a better alternative. In such schemes, a ciphertext designated for a data aggregation is able to be re-encrypted as one designated for another by a semi-trusted proxy without decryption. In this paper, we introduce a secure proxy re-encryption protocol for IoT-based data outsourcing services in clouds. The proposed scheme is provably secure assuming the hardness of the bilinear inverse Diffie-Hellman problem (BIDHP). In particular, our scheme is bidirectional and supports the functionality of multi-hop, which allows an uploaded ciphertext to be transformed into a different one multiple times. The ciphertext length of our method is independent of the number of involved IoT nodes. Specifically, the re-encryption process only takes one exponentiation computation which is around 54 ms when sharing the data with 100 IoT devices. For each IoT node, the decryption process only requires two exponentiation computations. When compared with a related protocol presented by Kim and Lee, the proposed one also exhibits lower computational costs.}, } @article {pmid33374270, year = {2020}, author = {Abbas, Q and Alsheddy, A}, title = {Driver Fatigue Detection Systems Using Multi-Sensors, Smartphone, and Cloud-Based Computing Platforms: A Comparative Analysis.}, journal = {Sensors (Basel, Switzerland)}, volume = {21}, number = {1}, pages = {}, pmid = {33374270}, issn = {1424-8220}, support = {0001-008-11-17-3//King Abdulaziz City for Science and Technology/ ; }, mesh = {*Automobile Driving ; Cloud Computing ; *Internet of Things ; Machine Learning ; *Monitoring, Physiologic ; *Smartphone ; }, abstract = {Internet of things (IoT) cloud-based applications deliver advanced solutions for smart cities to decrease traffic accidents caused by driver fatigue while driving on the road. Environmental conditions or driver behavior can ultimately lead to serious roadside accidents. In recent years, the authors have developed many low-cost, computerized, driver fatigue detection systems (DFDs) to help drivers, by using multi-sensors, and mobile and cloud-based computing architecture. To promote safe driving, these are the most current emerging platforms that were introduced in the past. In this paper, we reviewed state-of-the-art approaches for predicting unsafe driving styles using three common IoT-based architectures. The novelty of this article is to show major differences among multi-sensors, smartphone-based, and cloud-based architectures in multimodal feature processing. We discussed all of the problems that machine learning techniques faced in recent years, particularly the deep learning (DL) model, to predict driver hypovigilance, especially in terms of these three IoT-based architectures. Moreover, we performed state-of-the-art comparisons by using driving simulators to incorporate multimodal features of the driver. We also mention online data sources in this article to test and train network architecture in the field of DFDs on public available multimodal datasets. These comparisons assist other authors to continue future research in this domain. To evaluate the performance, we mention the major problems in these three architectures to help researchers use the best IoT-based architecture for detecting DFDs in a real-time environment. Moreover, the important factors of Multi-Access Edge Computing (MEC) and 5th generation (5G) networks are analyzed in the context of deep learning architecture to improve the response time of DFD systems. Lastly, it is concluded that there is a research gap when it comes to implementing the DFD systems on MEC and 5G technologies by using multimodal features and DL architecture.}, } @article {pmid33371361, year = {2020}, author = {Alankar, B and Sharma, G and Kaur, H and Valverde, R and Chang, V}, title = {Experimental Setup for Investigating the Efficient Load Balancing Algorithms on Virtual Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33371361}, issn = {1424-8220}, support = {5753/ IFD/ 2015-16//National Council for Science and 852 Technology Communications (NCSTC), Department of Science and Technology (DST), 853 Ministry of Science and Technology (Govt. of India), New Delhi, India/ ; VCR 0000110//VC Research/ ; }, abstract = {Cloud computing has emerged as the primary choice for developers in developing applications that require high-performance computing. Virtualization technology has helped in the distribution of resources to multiple users. Increased use of cloud infrastructure has led to the challenge of developing a load balancing mechanism to provide optimized use of resources and better performance. Round robin and least connections load balancing algorithms have been developed to allocate user requests across a cluster of servers in the cloud in a time-bound manner. In this paper, we have applied the round robin and least connections approach of load balancing to HAProxy, virtual machine clusters and web servers. The experimental results are visualized and summarized using Apache Jmeter and a further comparative study of round robin and least connections is also depicted. Experimental setup and results show that the round robin algorithm performs better as compared to the least connections algorithm in all measuring parameters of load balancer in this paper.}, } @article {pmid33362806, year = {2020}, author = {Das Choudhury, S and Maturu, S and Samal, A and Stoerger, V and Awada, T}, title = {Leveraging Image Analysis to Compute 3D Plant Phenotypes Based on Voxel-Grid Plant Reconstruction.}, journal = {Frontiers in plant science}, volume = {11}, number = {}, pages = {521431}, pmid = {33362806}, issn = {1664-462X}, abstract = {High throughput image-based plant phenotyping facilitates the extraction of morphological and biophysical traits of a large number of plants non-invasively in a relatively short time. It facilitates the computation of advanced phenotypes by considering the plant as a single object (holistic phenotypes) or its components, i.e., leaves and the stem (component phenotypes). The architectural complexity of plants increases over time due to variations in self-occlusions and phyllotaxy, i.e., arrangements of leaves around the stem. One of the central challenges to computing phenotypes from 2-dimensional (2D) single view images of plants, especially at the advanced vegetative stage in presence of self-occluding leaves, is that the information captured in 2D images is incomplete, and hence, the computed phenotypes are inaccurate. We introduce a novel algorithm to compute 3-dimensional (3D) plant phenotypes from multiview images using voxel-grid reconstruction of the plant (3DPhenoMV). The paper also presents a novel method to reliably detect and separate the individual leaves and the stem from the 3D voxel-grid of the plant using voxel overlapping consistency check and point cloud clustering techniques. To evaluate the performance of the proposed algorithm, we introduce the University of Nebraska-Lincoln 3D Plant Phenotyping Dataset (UNL-3DPPD). A generic taxonomy of 3D image-based plant phenotypes are also presented to promote 3D plant phenotyping research. A subset of these phenotypes are computed using computer vision algorithms with discussion of their significance in the context of plant science. The central contributions of the paper are (a) an algorithm for 3D voxel-grid reconstruction of maize plants at the advanced vegetative stages using images from multiple 2D views; (b) a generic taxonomy of 3D image-based plant phenotypes and a public benchmark dataset, i.e., UNL-3DPPD, to promote the development of 3D image-based plant phenotyping research; and (c) novel voxel overlapping consistency check and point cloud clustering techniques to detect and isolate individual leaves and stem of the maize plants to compute the component phenotypes. Detailed experimental analyses demonstrate the efficacy of the proposed method, and also show the potential of 3D phenotypes to explain the morphological characteristics of plants regulated by genetic and environmental interactions.}, } @article {pmid33348559, year = {2020}, author = {Chen, B and Chen, H and Yuan, D and Yu, L}, title = {3D Fast Object Detection Based on Discriminant Images and Dynamic Distance Threshold Clustering.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33348559}, issn = {1424-8220}, support = {2018YFB1201602//National Key Research and Development Plan/ ; 61976224//Natural Science Foundation of China/ ; 2018JJ3689//Natural Science Foundation of Hunan Province of China/ ; }, abstract = {The object detection algorithm based on vehicle-mounted lidar is a key component of the perception system on autonomous vehicles. It can provide high-precision and highly robust obstacle information for the safe driving of autonomous vehicles. However, most algorithms are often based on a large amount of point cloud data, which makes real-time detection difficult. To solve this problem, this paper proposes a 3D fast object detection method based on three main steps: First, the ground segmentation by discriminant image (GSDI) method is used to convert point cloud data into discriminant images for ground points segmentation, which avoids the direct computing of the point cloud data and improves the efficiency of ground points segmentation. Second, the image detector is used to generate the region of interest of the three-dimensional object, which effectively narrows the search range. Finally, the dynamic distance threshold clustering (DDTC) method is designed for different density of the point cloud data, which improves the detection effect of long-distance objects and avoids the over-segmentation phenomenon generated by the traditional algorithm. Experiments have showed that this algorithm can meet the real-time requirements of autonomous driving while maintaining high accuracy.}, } @article {pmid33343851, year = {2020}, author = {Bibi, N and Sikandar, M and Ud Din, I and Almogren, A and Ali, S}, title = {IoMT-Based Automated Detection and Classification of Leukemia Using Deep Learning.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {6648574}, pmid = {33343851}, issn = {2040-2309}, mesh = {Algorithms ; COVID-19/epidemiology ; Cloud Computing ; Databases, Factual ; *Deep Learning ; *Diagnosis, Computer-Assisted ; Diagnostic Imaging ; Humans ; *Internet of Things ; Leukemia/*classification/*diagnosis ; Leukemia, Lymphocytic, Chronic, B-Cell/diagnosis ; Leukemia, Myelogenous, Chronic, BCR-ABL Positive/diagnosis ; Leukemia, Myeloid, Acute/diagnosis ; Machine Learning ; Neural Networks, Computer ; *Pattern Recognition, Automated ; Precursor Cell Lymphoblastic Leukemia-Lymphoma/diagnosis ; Telemedicine ; }, abstract = {For the last few years, computer-aided diagnosis (CAD) has been increasing rapidly. Numerous machine learning algorithms have been developed to identify different diseases, e.g., leukemia. Leukemia is a white blood cells- (WBC-) related illness affecting the bone marrow and/or blood. A quick, safe, and accurate early-stage diagnosis of leukemia plays a key role in curing and saving patients' lives. Based on developments, leukemia consists of two primary forms, i.e., acute and chronic leukemia. Each form can be subcategorized as myeloid and lymphoid. There are, therefore, four leukemia subtypes. Various approaches have been developed to identify leukemia with respect to its subtypes. However, in terms of effectiveness, learning process, and performance, these methods require improvements. This study provides an Internet of Medical Things- (IoMT-) based framework to enhance and provide a quick and safe identification of leukemia. In the proposed IoMT system, with the help of cloud computing, clinical gadgets are linked to network resources. The system allows real-time coordination for testing, diagnosis, and treatment of leukemia among patients and healthcare professionals, which may save both time and efforts of patients and clinicians. Moreover, the presented framework is also helpful for resolving the problems of patients with critical condition in pandemics such as COVID-19. The methods used for the identification of leukemia subtypes in the suggested framework are Dense Convolutional Neural Network (DenseNet-121) and Residual Convolutional Neural Network (ResNet-34). Two publicly available datasets for leukemia, i.e., ALL-IDB and ASH image bank, are used in this study. The results demonstrated that the suggested models supersede the other well-known machine learning algorithms used for healthy-versus-leukemia-subtypes identification.}, } @article {pmid33333717, year = {2020}, author = {Khorsheed, MB and Zainel, QM and Hassen, OA and Darwish, SM}, title = {The Application of Fractal Transform and Entropy for Improving Fault Tolerance and Load Balancing in Grid Computing Environments.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {33333717}, issn = {1099-4300}, abstract = {This paper applies the entropy-based fractal indexing scheme that enables the grid environment for fast indexing and querying. It addresses the issue of fault tolerance and load balancing-based fractal management to make computational grids more effective and reliable. A fractal dimension of a cloud of points gives an estimate of the intrinsic dimensionality of the data in that space. The main drawback of this technique is the long computing time. The main contribution of the suggested work is to investigate the effect of fractal transform by adding R-tree index structure-based entropy to existing grid computing models to obtain a balanced infrastructure with minimal fault. In this regard, the presented work is going to extend the commonly scheduling algorithms that are built based on the physical grid structure to a reduced logical network. The objective of this logical network is to reduce the searching in the grid paths according to arrival time rate and path's bandwidth with respect to load balance and fault tolerance, respectively. Furthermore, an optimization searching technique is utilized to enhance the grid performance by investigating the optimum number of nodes extracted from the logical grid. The experimental results indicated that the proposed model has better execution time, throughput, makespan, latency, load balancing, and success rate.}, } @article {pmid33332376, year = {2020}, author = {Khan, A and Nawaz, U and Ulhaq, A and Robinson, RW}, title = {Real-time plant health assessment via implementing cloud-based scalable transfer learning on AWS DeepLens.}, journal = {PloS one}, volume = {15}, number = {12}, pages = {e0243243}, pmid = {33332376}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Image Processing, Computer-Assisted/*methods ; *Machine Learning ; *Plant Diseases/classification ; *Plant Leaves/anatomy & histology ; }, abstract = {The control of plant leaf diseases is crucial as it affects the quality and production of plant species with an effect on the economy of any country. Automated identification and classification of plant leaf diseases is, therefore, essential for the reduction of economic losses and the conservation of specific species. Various Machine Learning (ML) models have previously been proposed to detect and identify plant leaf disease; however, they lack usability due to hardware sophistication, limited scalability and realistic use inefficiency. By implementing automatic detection and classification of leaf diseases in fruit trees (apple, grape, peach and strawberry) and vegetable plants (potato and tomato) through scalable transfer learning on Amazon Web Services (AWS) SageMaker and importing it into AWS DeepLens for real-time functional usability, our proposed DeepLens Classification and Detection Model (DCDM) addresses such limitations. Scalability and ubiquitous access to our approach is provided by cloud integration. Our experiments on an extensive image data set of healthy and unhealthy fruit trees and vegetable plant leaves showed 98.78% accuracy with a real-time diagnosis of diseases of plant leaves. To train DCDM deep learning model, we used forty thousand images and then evaluated it on ten thousand images. It takes an average of 0.349s to test an image for disease diagnosis and classification using AWS DeepLens, providing the consumer with disease information in less than a second.}, } @article {pmid33329060, year = {2020}, author = {Molina-Molina, A and Ruiz-Malagón, EJ and Carrillo-Pérez, F and Roche-Seruendo, LE and Damas, M and Banos, O and García-Pinillos, F}, title = {Validation of mDurance, A Wearable Surface Electromyography System for Muscle Activity Assessment.}, journal = {Frontiers in physiology}, volume = {11}, number = {}, pages = {606287}, pmid = {33329060}, issn = {1664-042X}, abstract = {The mDurance® system is an innovative digital tool that combines wearable surface electromyography (sEMG), mobile computing and cloud analysis to streamline and automatize the assessment of muscle activity. The tool is particularly devised to support clinicians and sport professionals in their daily routines, as an assessment tool in the prevention, monitoring rehabilitation and training field. This study aimed at determining the validity of the mDurance system for measuring muscle activity by comparing sEMG output with a reference sEMG system, the Delsys® system. Fifteen participants were tested during isokinetic knee extensions at three different speeds (60, 180, and 300 deg/s), for two muscles (rectus femoris [RF] and vastus lateralis [VL]) and two different electrodes locations (proximal and distal placement). The maximum voluntary isometric contraction was carried out for the normalization of the signal, followed by dynamic isokinetic knee extensions for each speed. The sEMG output for both systems was obtained from the raw sEMG signal following mDurance's processing and filtering. Mean, median, first quartile, third quartile and 90th percentile was calculated from the sEMG amplitude signals for each system. The results show an almost perfect ICC relationship for the VL (ICC > 0.81) and substantial to almost perfect for the RF (ICC > 0.762) for all variables and speeds. The Bland-Altman plots revealed heteroscedasticity of error for mean, quartile 3 and 90th percentile (60 and 300 deg/s) for RF and at mean and 90th percentile for VL (300 deg/s). In conclusion, the results indicate that the mDurance® sEMG system is a valid tool to measure muscle activity during dynamic contractions over a range of speeds. This innovative system provides more time for clinicians (e.g., interpretation patients' pathologies) and sport trainers (e.g., advising athletes), thanks to automatic processing and filtering of the raw sEMG signal and generation of muscle activity reports in real-time.}, } @article {pmid33327512, year = {2020}, author = {Filev Maia, R and Ballester Lurbe, C and Agrahari Baniya, A and Hornbuckle, J}, title = {IRRISENS: An IoT Platform Based on Microservices Applied in Commercial-Scale Crops Working in a Multi-Cloud Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33327512}, issn = {1424-8220}, support = {1920FRP//Australian Government Department of Agriculture/ ; }, abstract = {Research has shown the multitude of applications that Internet of Things (IoT), cloud computing, and forecast technologies present in every sector. In agriculture, one application is the monitoring of factors that influence crop development to assist in making crop management decisions. Research on the application of such technologies in agriculture has been mainly conducted at small experimental sites or under controlled conditions. This research has provided relevant insights and guidelines for the use of different types of sensors, application of a multitude of algorithms to forecast relevant parameters as well as architectural approaches of IoT platforms. However, research on the implementation of IoT platforms at the commercial scale is needed to identify platform requirements to properly function under such conditions. This article evaluates an IoT platform (IRRISENS) based on fully replicable microservices used to sense soil, crop, and atmosphere parameters, interact with third-party cloud services for scheduling irrigation and, potentially, control irrigation automatically. The proposed IoT platform was evaluated during one growing season at four commercial-scale farms on two broadacre irrigated crops with very different water management requirements (rice and cotton). Five main requirements for IoT platforms to be used in agriculture at commercial scale were identified from implementing IRRISENS as an irrigation support tool for rice and cotton production: scalability, flexibility, heterogeneity, robustness to failure, and security. The platform addressed all these requirements. The results showed that the microservice-based approach used is robust against both intermittent and critical failures in the field that could occur in any of the monitored sites. Further, processing or storage overload caused by datalogger malfunctioning or other reasons at one farm did not affect the platform's performance. The platform was able to deal with different types of data heterogeneity. Since there are no shared microservices among farms, the IoT platform proposed here also provides data isolation, maintaining data confidentiality for each user, which is relevant in a commercial farm scenario.}, } @article {pmid33327453, year = {2020}, author = {Suryanto, N and Kang, H and Kim, Y and Yun, Y and Larasati, HT and Kim, H}, title = {A Distributed Black-Box Adversarial Attack Based on Multi-Group Particle Swarm Optimization.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {24}, pages = {}, pmid = {33327453}, issn = {1424-8220}, support = {2019-0-01343//Ministry of Science and ICT, South Korea/ ; IITP-2020-0-01797//Ministry of Science and ICT, South Korea/ ; }, mesh = {*Algorithms ; *Artificial Intelligence ; Humans ; }, abstract = {Adversarial attack techniques in deep learning have been studied extensively due to its stealthiness to human eyes and potentially dangerous consequences when applied to real-life applications. However, current attack methods in black-box settings mainly employ a large number of queries for crafting their adversarial examples, hence making them very likely to be detected and responded by the target system (e.g., artificial intelligence (AI) service provider) due to its high traffic volume. A recent proposal able to address the large query problem utilizes a gradient-free approach based on Particle Swarm Optimization (PSO) algorithm. Unfortunately, this original approach tends to have a low attack success rate, possibly due to the model's difficulty of escaping local optima. This obstacle can be overcome by employing a multi-group approach for PSO algorithm, by which the PSO particles can be redistributed, preventing them from being trapped in local optima. In this paper, we present a black-box adversarial attack which can significantly increase the success rate of PSO-based attack while maintaining a low number of query by launching the attack in a distributed manner. Attacks are executed from multiple nodes, disseminating queries among the nodes, hence reducing the possibility of being recognized by the target system while also increasing scalability. Furthermore, we utilize Multi-Group PSO with Random Redistribution (MGRR-PSO) for perturbation generation, performing better than the original approach against local optima, thus achieving a higher success rate. Additionally, we propose to efficiently remove excessive perturbation (i.e, perturbation pruning) by utilizing again the MGRR-PSO rather than a standard iterative method as used in the original approach. We perform five different experiments: comparing our attack's performance with existing algorithms, testing in high-dimensional space in ImageNet dataset, examining our hyperparameters (i.e., particle size, number of clients, search boundary), and testing on real digital attack to Google Cloud Vision. Our attack proves to obtain a 100% success rate on MNIST and CIFAR-10 datasets and able to successfully fool Google Cloud Vision as a proof of the real digital attack by maintaining a lower query and wide applicability.}, } @article {pmid33311728, year = {2020}, author = {Karim, HMR}, title = {Cloud computing-based remote pre-anaesthetic check-up: An adapted approach during corona pandemic.}, journal = {Indian journal of anaesthesia}, volume = {64}, number = {Suppl 4}, pages = {S248-S249}, pmid = {33311728}, issn = {0019-5049}, } @article {pmid33297921, year = {2021}, author = {Singh, NK and Kumar, N and Singh, AK}, title = {Physiology to Disease Transmission of Respiratory Tract Infection: A Narrative Review.}, journal = {Infectious disorders drug targets}, volume = {21}, number = {6}, pages = {e170721188930}, doi = {10.2174/1871526520666201209145908}, pmid = {33297921}, issn = {2212-3989}, mesh = {Air Microbiology ; *COVID-19 ; Humans ; *Respiratory Tract Infections ; SARS-CoV-2 ; Sneezing ; }, abstract = {INTRODUCTION: In the current scenario of the COVID 19 pandemic, the protective reflexes, namely sneeze and cough, have received great importance. However, it is not in terms of protection but in terms of the spread of infection. The present review tries to bring out the correlation between the physiology of sneeze and cough, taking into consideration the various receptors that initiate the two reflexes, then correlating it with the formation of expelled droplets and the significance of various aspects of droplets that lead to the spread of infection.

MATERIAL AND METHODS: For the compilation of the present review, we searched the terms "Physiology of cough", "Physiology of sneeze", "droplets", "aerosols" and "Aerosols in COVID 19". The above-mentioned terms were extensively searched on PubMed, Google Scholar, and google search engine. After reviewing the various available material, the most significant research has been considered for this review.

CONCLUSION: Through this review, we conclude that there are various factors responsible for the initiation of sneeze and cough, but in the case of infection, it is mainly the inflammatory reaction that directly stimulates the receptors to produce the reflex outburst air. As the flow of air during expiration is turbulent, it causes damage to the Epithelial Lining Fluid present in the respiratory conduit. In addition, it gets admixed with the saliva in the oropharynx and oral cavity and mucus in the nose to form droplets of various sizes. Large droplets settle close and are responsible for droplet and fomite transmission, but the smaller droplets remain suspended in the air and travel farther distances to cause airborne transmission. The spread of droplet cloud in sneezing may range to 6m or more as compared to cough; hence the concept of 1m to 2m of social distancing does not hold reliable if the patient is sneezing.}, } @article {pmid33297386, year = {2020}, author = {Sheng, J and Liu, C and Chen, L and Wang, B and Zhang, J}, title = {Research on Community Detection in Complex Networks Based on Internode Attraction.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {33297386}, issn = {1099-4300}, support = {No.2018YFB1003602//National Key Research and Development Program of China/ ; }, abstract = {With the rapid development of computer technology, the research on complex networks has attracted more and more attention. At present, the research directions of cloud computing, big data, internet of vehicles, and distributed systems with very high attention are all based on complex networks. Community structure detection is a very important and meaningful research hotspot in complex networks. It is a difficult task to quickly and accurately divide the community structure and run it on large-scale networks. In this paper, we put forward a new community detection approach based on internode attraction, named IACD. This algorithm starts from the perspective of the important nodes of the complex network and refers to the gravitational relationship between two objects in physics to represent the forces between nodes in the network dataset, and then perform community detection. Through experiments on a large number of real-world datasets and synthetic networks, it is shown that the IACD algorithm can quickly and accurately divide the community structure, and it is superior to some classic algorithms and recently proposed algorithms.}, } @article {pmid33292419, year = {2020}, author = {Abbasi, WA and Yaseen, A and Hassan, FU and Andleeb, S and Minhas, FUAA}, title = {ISLAND: in-silico proteins binding affinity prediction using sequence information.}, journal = {BioData mining}, volume = {13}, number = {1}, pages = {20}, pmid = {33292419}, issn = {1756-0381}, support = {Open Access Publishing Support//University of Warwick/ ; 213-58990-2PS2-046//Higher Education Commission, Pakistan/ ; NRPU 6085//Higher Education Commision, Pakistan/ ; }, abstract = {BACKGROUND: Determining binding affinity in protein-protein interactions is important in the discovery and design of novel therapeutics and mutagenesis studies. Determination of binding affinity of proteins in the formation of protein complexes requires sophisticated, expensive and time-consuming experimentation which can be replaced with computational methods. Most computational prediction techniques require protein structures that limit their applicability to protein complexes with known structures. In this work, we explore sequence-based protein binding affinity prediction using machine learning.

METHOD: We have used protein sequence information instead of protein structures along with machine learning techniques to accurately predict the protein binding affinity.

RESULTS: We present our findings that the true generalization performance of even the state-of-the-art sequence-only predictor is far from satisfactory and that the development of machine learning methods for binding affinity prediction with improved generalization performance is still an open problem. We have also proposed a sequence-based novel protein binding affinity predictor called ISLAND which gives better accuracy than existing methods over the same validation set as well as on external independent test dataset. A cloud-based webserver implementation of ISLAND and its python code are available at https://sites.google.com/view/wajidarshad/software .

CONCLUSION: This paper highlights the fact that the true generalization performance of even the state-of-the-art sequence-only predictor of binding affinity is far from satisfactory and that the development of effective and practical methods in this domain is still an open problem.}, } @article {pmid33291634, year = {2020}, author = {Balaniuk, R and Isupova, O and Reece, S}, title = {Mining and Tailings Dam Detection in Satellite Imagery Using Deep Learning.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33291634}, issn = {1424-8220}, support = {000//Fundação de Apoio à Pesquisa do Distrito Federal - Brazil/ ; }, abstract = {This work explores the combination of free cloud computing, free open-source software, and deep learning methods to analyze a real, large-scale problem: the automatic country-wide identification and classification of surface mines and mining tailings dams in Brazil. Locations of officially registered mines and dams were obtained from the Brazilian government open data resource. Multispectral Sentinel-2 satellite imagery, obtained and processed at the Google Earth Engine platform, was used to train and test deep neural networks using the TensorFlow 2 application programming interface (API) and Google Colaboratory (Colab) platform. Fully convolutional neural networks were used in an innovative way to search for unregistered ore mines and tailing dams in large areas of the Brazilian territory. The efficacy of the approach is demonstrated by the discovery of 263 mines that do not have an official mining concession. This exploratory work highlights the potential of a set of new technologies, freely available, for the construction of low cost data science tools that have high social impact. At the same time, it discusses and seeks to suggest practical solutions for the complex and serious problem of illegal mining and the proliferation of tailings dams, which pose high risks to the population and the environment, especially in developing countries.}, } @article {pmid33291483, year = {2020}, author = {Zhang, S and Wen, Q and Li, W and Zhang, H and Jin, Z}, title = {A Multi-User Public Key Encryption with Multi-Keyword Search out of Bilinear Pairings.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33291483}, issn = {1424-8220}, support = {61502044//National Natural Science Foundation of China/ ; }, abstract = {Internet of Things (IoT) and cloud computing are adopted widely in daily life and industrial production. Sensors of IoT equipment gather personal, sensitive and important data, which is stored in a cloud server. The cloud helps users to save cost and collaborate. However, the privacy of data is also at risk. Public-key encryption with keyword search (PEKS) is convenient for users to use the data without leaking privacy. In this article, we give a scheme of PEKS for a multi-user to realize the multi-keyword search at once and extend it to show a rank based on keywords match. The receiver can finish the search by himself or herself. With private cloud and server cloud, most users' computing can be outsourced. Moreover, the PEKS can be transferred to a multi-user model in which the private cloud is used to manage receivers and outsource. The store cloud and the private cloud both obtain nothing with the keyword information. Then our IoT devices can easily run these protocols. As we do not use any pairing operations, the scheme is under more general assumptions that means the devices do not need to take on the heavy task of calculating pairing.}, } @article {pmid33287155, year = {2020}, author = {Chen, Y and Yang, T and Li, C and Zhang, Y}, title = {A Binarized Segmented ResNet Based on Edge Computing for Re-Identification.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33287155}, issn = {1424-8220}, support = {61802001//National Natural Science Foundation of China/ ; }, abstract = {With the advent of the Internet of Everything, more and more devices are connected to the Internet every year. In major cities, in order to maintain normal social order, the demand for deployed cameras is also increasing. In terms of public safety, person Re-Identification (ReID) can play a big role. However, the current methods of ReID are to transfer the collected pedestrian images to the cloud for processing, which will bring huge communication costs. In order to solve this problem, we combine the recently emerging edge computing and use the edge to combine the end devices and the cloud to implement our proposed binarized segmented ResNet. Our method is mainly to divide a complete ResNet into three parts, corresponding to the end devices, the edge, and the cloud. After joint training, the corresponding segmented sub-network is deployed to the corresponding side, and inference is performed to realize ReID. In our experiments, we compared some traditional ReID methods in terms of accuracy and communication overhead. It can be found that our method can greatly reduce the communication cost on the basis of basically not reducing the recognition accuracy of ReID. In general, the communication cost can be reduced by four to eight times.}, } @article {pmid33286195, year = {2020}, author = {Zhou, Y and Li, N and Tian, Y and An, D and Wang, L}, title = {Public Key Encryption with Keyword Search in Cloud: A Survey.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {4}, pages = {}, pmid = {33286195}, issn = {1099-4300}, support = {2018CXGC0701//Shandong Provincial Key Research and Development Program of China/ ; No. 61972050//National Natural Science Foundation of China (NSFC)/ ; }, abstract = {With the popularization of cloud computing, many business and individuals prefer to outsource their data to cloud in encrypted form to protect data confidentiality. However, how to search over encrypted data becomes a concern for users. To address this issue, searchable encryption is a novel cryptographic primitive that enables user to search queries over encrypted data stored on an untrusted server while guaranteeing the privacy of the data. Public key encryption with keyword search (PEKS) has received a lot of attention as an important branch. In this paper, we focus on the development of PEKS in cloud by providing a comprehensive research survey. From a technological viewpoint, the existing PEKS schemes can be classified into several variants: PEKS based on public key infrastructure, PEKS based on identity-based encryption, PEKS based on attribute-based encryption, PEKS based on predicate encryption, PEKS based on certificateless encryption, and PEKS supporting proxy re-encryption. Moreover, we propose some potential applications and valuable future research directions in PEKS.}, } @article {pmid33270670, year = {2020}, author = {Salama AbdELminaam, D and Almansori, AM and Taha, M and Badr, E}, title = {A deep facial recognition system using computational intelligent algorithms.}, journal = {PloS one}, volume = {15}, number = {12}, pages = {e0242269}, pmid = {33270670}, issn = {1932-6203}, mesh = {Algorithms ; *Artificial Intelligence ; Deep Learning ; *Facial Recognition ; Humans ; *Machine Learning ; Neural Networks, Computer ; *Support Vector Machine ; }, abstract = {The development of biometric applications, such as facial recognition (FR), has recently become important in smart cities. Many scientists and engineers around the world have focused on establishing increasingly robust and accurate algorithms and methods for these types of systems and their applications in everyday life. FR is developing technology with multiple real-time applications. The goal of this paper is to develop a complete FR system using transfer learning in fog computing and cloud computing. The developed system uses deep convolutional neural networks (DCNN) because of the dominant representation; there are some conditions including occlusions, expressions, illuminations, and pose, which can affect the deep FR performance. DCNN is used to extract relevant facial features. These features allow us to compare faces between them in an efficient way. The system can be trained to recognize a set of people and to learn via an online method, by integrating the new people it processes and improving its predictions on the ones it already has. The proposed recognition method was tested with different three standard machine learning algorithms (Decision Tree (DT), K Nearest Neighbor(KNN), Support Vector Machine (SVM)). The proposed system has been evaluated using three datasets of face images (SDUMLA-HMT, 113, and CASIA) via performance metrics of accuracy, precision, sensitivity, specificity, and time. The experimental results show that the proposed method achieves superiority over other algorithms according to all parameters. The suggested algorithm results in higher accuracy (99.06%), higher precision (99.12%), higher recall (99.07%), and higher specificity (99.10%) than the comparison algorithms.}, } @article {pmid33268451, year = {2020}, author = {Aigouy, B and Cortes, C and Liu, S and Prud'Homme, B}, title = {EPySeg: a coding-free solution for automated segmentation of epithelia using deep learning.}, journal = {Development (Cambridge, England)}, volume = {147}, number = {24}, pages = {}, pmid = {33268451}, issn = {1477-9129}, mesh = {Computational Biology ; Deep Learning ; Epithelium/*growth & development ; Humans ; Image Processing, Computer-Assisted ; Morphogenesis/*genetics ; *Software ; }, abstract = {Epithelia are dynamic tissues that self-remodel during their development. During morphogenesis, the tissue-scale organization of epithelia is obtained through a sum of individual contributions of the cells constituting the tissue. Therefore, understanding any morphogenetic event first requires a thorough segmentation of its constituent cells. This task, however, usually involves extensive manual correction, even with semi-automated tools. Here, we present EPySeg, an open-source, coding-free software that uses deep learning to segment membrane-stained epithelial tissues automatically and very efficiently. EPySeg, which comes with a straightforward graphical user interface, can be used as a Python package on a local computer, or on the cloud via Google Colab for users not equipped with deep-learning compatible hardware. By substantially reducing human input in image segmentation, EPySeg accelerates and improves the characterization of epithelial tissues for all developmental biologists.}, } @article {pmid33266523, year = {2020}, author = {Cai, Y and Tang, C and Xu, Q}, title = {Two-Party Privacy-Preserving Set Intersection with FHE.}, journal = {Entropy (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {33266523}, issn = {1099-4300}, support = {61772147//Foundation of National Natural Science of China under Grant/ ; 2015A030308016//Guangdong Province Natural Science Foundation of major basic research and Cultivation project under Grant/ ; 2015KCXTD014//Project of Ordinary University Innovation Team Construction of Guangdong Province under Grant/ ; 2014KZDXM044//Basic Research Major Projects of Department of education of Guangdong Province under Grant/ ; 1201610005//Collaborative Innovation Major Projects of Bureau of Education of Guangzhou City under Grant/ ; 2019B020215004//Key-Area Research and Development Plan of Guangdong province under Grant/ ; }, abstract = {A two-party private set intersection allows two parties, the client and the server, to compute an intersection over their private sets, without revealing any information beyond the intersecting elements. We present a novel private set intersection protocol based on Shuhong Gao's fully homomorphic encryption scheme and prove the security of the protocol in the semi-honest model. We also present a variant of the protocol which is a completely novel construction for computing the intersection based on Bloom filter and fully homomorphic encryption, and the protocol's complexity is independent of the set size of the client. The security of the protocols relies on the learning with errors and ring learning with error problems. Furthermore, in the cloud with malicious adversaries, the computation of the private set intersection can be outsourced to the cloud service provider without revealing any private information.}, } @article {pmid33266243, year = {2020}, author = {Froiz-Míguez, I and Lopez-Iturri, P and Fraga-Lamas, P and Celaya-Echarri, M and Blanco-Novoa, Ó and Azpilicueta, L and Falcone, F and Fernández-Caramés, TM}, title = {Design, Implementation, and Empirical Validation of an IoT Smart Irrigation System for Fog Computing Applications Based on LoRa and LoRaWAN Sensor Nodes.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33266243}, issn = {1424-8220}, support = {ED431C 2020/15, ED431G490 2019/01//Xunta de Galicia/ ; RTI2018-095499-B-C31, TEC2016-75067-C4-1-R, RED2018-102668-T and PID2019-104958RB-C42//Agencia Estatal de Investigación of Spain and ERDF funds of the EU (FEDER Galicia 2014-2020 & AEI/FEDER Programs, UE)/ ; }, abstract = {Climate change is driving new solutions to manage water more efficiently. Such solutions involve the development of smart irrigation systems where Internet of Things (IoT) nodes are deployed throughout large areas. In addition, in the mentioned areas, wireless communications can be difficult due to the presence of obstacles and metallic objects that block electromagnetic wave propagation totally or partially. This article details the development of a smart irrigation system able to cover large urban areas thanks to the use of Low-Power Wide-Area Network (LPWAN) sensor nodes based on LoRa and LoRaWAN. IoT nodes collect soil temperature/moisture and air temperature data, and control water supply autonomously, either by making use of fog computing gateways or by relying on remote commands sent from a cloud. Since the selection of IoT node and gateway locations is essential to have good connectivity and to reduce energy consumption, this article uses an in-house 3D-ray launching radio-planning tool to determine the best locations in real scenarios. Specifically, this paper provides details on the modeling of a university campus, which includes elements like buildings, roads, green areas, or vehicles. In such a scenario, simulations and empirical measurements were performed for two different testbeds: a LoRaWAN testbed that operates at 868 MHz and a testbed based on LoRa with 433 MHz transceivers. All the measurements agree with the simulation results, showing the impact of shadowing effects and material features (e.g., permittivity, conductivity) in the electromagnetic propagation of near-ground and underground LoRaWAN communications. Higher RF power levels are observed for 433 MHz due to the higher transmitted power level and the lower radio propagation losses, and even in the worst gateway location, the received power level is higher than the sensitivity threshold (-148 dBm). Regarding water consumption, the provided estimations indicate that the proposed smart irrigation system is able to reduce roughly 23% of the amount of used water just by considering weather forecasts. The obtained results provide useful guidelines for future smart irrigation developers and show the radio planning tool accuracy, which allows for optimizing the sensor network topology and the overall performance of the network in terms of coverage, cost, and energy consumption.}, } @article {pmid33260321, year = {2020}, author = {Xu, S and Guo, C}, title = {Computation Offloading in a Cognitive Vehicular Networks with Vehicular Cloud Computing and Remote Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33260321}, issn = {1424-8220}, support = {4202049//Beijing Natural Science Foundation/ ; 2018YFB1800805//National Key R\&D Program of China/ ; }, abstract = {To satisfy the explosive growth of computation-intensive vehicular applications, we investigated the computation offloading problem in a cognitive vehicular networks (CVN). Specifically, in our scheme, the vehicular cloud computing (VCC)- and remote cloud computing (RCC)-enabled computation offloading were jointly considered. So far, extensive research has been conducted on RCC-based computation offloading, while the studies on VCC-based computation offloading are relatively rare. In fact, due to the dynamic and uncertainty of on-board resource, the VCC-based computation offloading is more challenging then the RCC one, especially under the vehicular scenario with expensive inter-vehicle communication or poor communication environment. To solve this problem, we propose to leverage the VCC's computation resource for computation offloading with a perception-exploitation way, which mainly comprise resource discovery and computation offloading two stages. In resource discovery stage, upon the action-observation history, a Long Short-Term Memory (LSTM) model is proposed to predict the on-board resource utilizing status at next time slot. Thereafter, based on the obtained computation resource distribution, a decentralized multi-agent Deep Reinforcement Learning (DRL) algorithm is proposed to solve the collaborative computation offloading with VCC and RCC. Last but not least, the proposed algorithms' effectiveness is verified with a host of numerical simulation results from different perspectives.}, } @article {pmid33256006, year = {2020}, author = {Bandyopadhyay, A and Kumar Singh, V and Mukhopadhyay, S and Rai, U and Xhafa, F and Krause, P}, title = {Matching IoT Devices to the Fog Service Providers: A Mechanism Design Perspective.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33256006}, issn = {1424-8220}, support = {PhD-MLA/4(29)/2014-15//Visvesvaraya National Institute of Technology/ ; PRX19/00155//Spanish Ministry of Science, 610 Innovation and Universities/ ; PID2019-111100RB-C21/AEI/ 10.13039/501100011033//Ministerio de Ciencia e Innovación, Spain/ ; }, abstract = {In the Internet of Things (IoT) + Fog + Cloud architecture, with the unprecedented growth of IoT devices, one of the challenging issues that needs to be tackled is to allocate Fog service providers (FSPs) to IoT devices, especially in a game-theoretic environment. Here, the issue of allocation of FSPs to the IoT devices is sifted with game-theoretic idea so that utility maximizing agents may be benign. In this scenario, we have multiple IoT devices and multiple FSPs, and the IoT devices give preference ordering over the subset of FSPs. Given such a scenario, the goal is to allocate at most one FSP to each of the IoT devices. We propose mechanisms based on the theory of mechanism design without money to allocate FSPs to the IoT devices. The proposed mechanisms have been designed in a flexible manner to address the long and short duration access of the FSPs to the IoT devices. For analytical results, we have proved the economic robustness, and probabilistic analyses have been carried out for allocation of IoT devices to the FSPs. In simulation, mechanism efficiency is laid out under different scenarios with an implementation in Python.}, } @article {pmid33255294, year = {2020}, author = {Díaz-de-Arcaya, J and Miñón, R and Torre-Bastida, AI and Del Ser, J and Almeida, A}, title = {PADL: A Modeling and Deployment Language for Advanced Analytical Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {23}, pages = {}, pmid = {33255294}, issn = {1424-8220}, support = {KK-2020/00049//SPRI-Basque Government ELKARTEK 3KIA/ ; IT1294-19//Consolidated Research Group MATHMODE/ ; RTI2018-101045-A-C22//Ministerio de Ciencia y Tecnología/ ; }, abstract = {In the smart city context, Big Data analytics plays an important role in processing the data collected through IoT devices. The analysis of the information gathered by sensors favors the generation of specific services and systems that not only improve the quality of life of the citizens, but also optimize the city resources. However, the difficulties of implementing this entire process in real scenarios are manifold, including the huge amount and heterogeneity of the devices, their geographical distribution, and the complexity of the necessary IT infrastructures. For this reason, the main contribution of this paper is the PADL description language, which has been specifically tailored to assist in the definition and operationalization phases of the machine learning life cycle. It provides annotations that serve as an abstraction layer from the underlying infrastructure and technologies, hence facilitating the work of data scientists and engineers. Due to its proficiency in the operationalization of distributed pipelines over edge, fog, and cloud layers, it is particularly useful in the complex and heterogeneous environments of smart cities. For this purpose, PADL contains functionalities for the specification of monitoring, notifications, and actuation capabilities. In addition, we provide tools that facilitate its adoption in production environments. Finally, we showcase the usefulness of the language by showing the definition of PADL-compliant analytical pipelines over two uses cases in a smart city context (flood control and waste management), demonstrating that its adoption is simple and beneficial for the definition of information and process flows in such environments.}, } @article {pmid35382513, year = {2020}, author = {Adedolapo, O and Huichen, Y and Avishek, B and William, H and Dan, A and Mohammed, T}, title = {Feature Selection for Learning to Predict Outcomes of Compute Cluster Jobs with Application to Decision Support.}, journal = {Proceedings. International Conference on Computational Science and Computational Intelligence}, volume = {2020}, number = {}, pages = {1231-1236}, pmid = {35382513}, issn = {2769-5654}, support = {P20 GM113109/GM/NIGMS NIH HHS/United States ; }, abstract = {We present a machine learning framework and a new test bed for data mining from the Slurm Workload Manager for high-performance computing (HPC) clusters. The focus was to find a method for selecting features to support decisions: helping users decide whether to resubmit failed jobs with boosted CPU and memory allocations or migrate them to a computing cloud. This task was cast as both supervised classification and regression learning, specifically, sequential problem solving suitable for reinforcement learning. Selecting relevant features can improve training accuracy, reduce training time, and produce a more comprehensible model, with an intelligent system that can explain predictions and inferences. We present a supervised learning model trained on a Simple Linux Utility for Resource Management (Slurm) data set of HPC jobs using three different techniques for selecting features: linear regression, lasso, and ridge regression. Our data set represented both HPC jobs that failed and those that succeeded, so our model was reliable, less likely to overfit, and generalizable. Our model achieved an R[2] of 95% with 99% accuracy. We identified five predictors for both CPU and memory properties.}, } @article {pmid33237919, year = {2020}, author = {Gonzalez Villasanti, H and Justice, LM and Chaparro-Moreno, LJ and Lin, TJ and Purtell, K}, title = {Automatized analysis of children's exposure to child-directed speech in reschool settings: Validation and application.}, journal = {PloS one}, volume = {15}, number = {11}, pages = {e0242511}, pmid = {33237919}, issn = {1932-6203}, mesh = {Adult ; Automated Facial Recognition/*methods ; Child, Preschool/*education ; Cloud Computing ; Facial Expression ; Female ; Humans ; Interpersonal Relations ; Language Development ; Machine Learning ; Peer Group ; Phonetics ; *Speech ; Speech Perception ; *Speech Recognition Software ; *Teaching ; Video Recording ; }, abstract = {The present study explored whether a tool for automatic detection and recognition of interactions and child-directed speech (CDS) in preschool classrooms could be developed, validated, and applied to non-coded video recordings representing children's classroom experiences. Using first-person video recordings collected by 13 preschool children during a morning in their classrooms, we extracted high-level audiovisual features from recordings using automatic speech recognition and computer vision services from a cloud computing provider. Using manual coding for interactions and transcriptions of CDS as reference, we trained and tested supervised classifiers and linear mappings to measure five variables of interest. We show that the supervised classifiers trained with speech activity, proximity, and high-level facial features achieve adequate accuracy in detecting interactions. Furthermore, in combination with an automatic speech recognition service, the supervised classifier achieved error rates for CDS measures that are in line with other open-source automatic decoding tools in early childhood settings. Finally, we demonstrate our tool's applicability by using it to automatically code and transcribe children's interactions and CDS exposure vertically within a classroom day (morning to afternoon) and horizontally over time (fall to winter). Developing and scaling tools for automatized capture of children's interactions with others in the preschool classroom, as well as exposure to CDS, may revolutionize scientific efforts to identify precise mechanisms that foster young children's language development.}, } @article {pmid33232315, year = {2020}, author = {Haas, T}, title = {Developing political-ecological theory: The need for many-task computing.}, journal = {PloS one}, volume = {15}, number = {11}, pages = {e0226861}, pmid = {33232315}, issn = {1932-6203}, mesh = {*Acinonyx ; Algorithms ; Animals ; Cloud Computing ; Conservation of Natural Resources/legislation & jurisprudence/*methods ; Ecosystem ; Endangered Species ; Models, Theoretical ; Politics ; }, abstract = {Models of political-ecological systems can inform policies for managing ecosystems that contain endangered species. To increase the credibility of these models, massive computation is needed to statistically estimate the model's parameters, compute confidence intervals for these parameters, determine the model's prediction error rate, and assess its sensitivity to parameter misspecification. To meet this statistical and computational challenge, this article delivers statistical algorithms and a method for constructing ecosystem management plans that are coded as distributed computing applications. These applications can run on cluster computers, the cloud, or a collection of in-house workstations. This downloadable code is used to address the challenge of conserving the East African cheetah (Acinonyx jubatus). This demonstration means that the new standard of credibility that any political-ecological model needs to meet is the one given herein.}, } @article {pmid33217896, year = {2020}, author = {Hassan, SR and Ahmad, I and Ahmad, S and Alfaify, A and Shafiq, M}, title = {Remote Pain Monitoring Using Fog Computing for e-Healthcare: An Efficient Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33217896}, issn = {1424-8220}, support = {RSP-2020/256//King Saud University, Riyadh, Saudi Arabia/ ; }, mesh = {Cloud Computing ; Delivery of Health Care ; Electrocardiography ; Electromyography ; Humans ; *Internet of Things ; Pain/*diagnosis ; *Remote Sensing Technology ; *Telemedicine ; Wireless Technology ; }, abstract = {The integration of medical signal processing capabilities and advanced sensors into Internet of Things (IoT) devices plays a key role in providing comfort and convenience to human lives. As the number of patients is increasing gradually, providing healthcare facilities to each patient, particularly to the patients located in remote regions, not only has become challenging but also results in several issues, such as: (i) increase in workload on paramedics, (ii) wastage of time, and (iii) accommodation of patients. Therefore, the design of smart healthcare systems has become an important area of research to overcome these above-mentioned issues. Several healthcare applications have been designed using wireless sensor networks (WSNs), cloud computing, and fog computing. Most of the e-healthcare applications are designed using the cloud computing paradigm. Cloud-based architecture introduces high latency while processing huge amounts of data, thus restricting the large-scale implementation of latency-sensitive e-healthcare applications. Fog computing architecture offers processing and storage resources near to the edge of the network, thus, designing e-healthcare applications using the fog computing paradigm is of interest to meet the low latency requirement of such applications. Patients that are minors or are in intensive care units (ICUs) are unable to self-report their pain conditions. The remote healthcare monitoring applications deploy IoT devices with bio-sensors capable of sensing surface electromyogram (sEMG) and electrocardiogram (ECG) signals to monitor the pain condition of such patients. In this article, fog computing architecture is proposed for deploying a remote pain monitoring system. The key motivation for adopting the fog paradigm in our proposed approach is to reduce latency and network consumption. To validate the effectiveness of the proposed approach in minimizing delay and network utilization, simulations were carried out in iFogSim and the results were compared with the cloud-based systems. The results of the simulations carried out in this research indicate that a reduction in both latency and network consumption can be achieved by adopting the proposed approach for implementing a remote pain monitoring system.}, } @article {pmid33216660, year = {2021}, author = {Krishna, R and Elisseev, V}, title = {User-centric genomics infrastructure: trends and technologies.}, journal = {Genome}, volume = {64}, number = {4}, pages = {467-475}, doi = {10.1139/gen-2020-0096}, pmid = {33216660}, issn = {1480-3321}, mesh = {Base Sequence ; Computational Biology/*methods ; Genomics/*methods ; Humans ; Software ; }, abstract = {Genomics is both a data- and compute-intensive discipline. The success of genomics depends on an adequate informatics infrastructure that can address growing data demands and enable a diverse range of resource-intensive computational activities. Designing a suitable infrastructure is a challenging task, and its success largely depends on its adoption by users. In this article, we take a user-centric view of the genomics, where users are bioinformaticians, computational biologists, and data scientists. We try to take their point of view on how traditional computational activities for genomics are expanding due to data growth, as well as the introduction of big data and cloud technologies. The changing landscape of computational activities and new user requirements will influence the design of future genomics infrastructures.}, } @article {pmid33211552, year = {2021}, author = {Van Horn, JD}, title = {Bridging the Brain and Data Sciences.}, journal = {Big data}, volume = {9}, number = {3}, pages = {153-187}, pmid = {33211552}, issn = {2167-647X}, support = {R24 MH114796/MH/NIMH NIH HHS/United States ; U24 ES026465/ES/NIEHS NIH HHS/United States ; R44 NS081792/NS/NINDS NIH HHS/United States ; }, mesh = {Brain ; *Cloud Computing ; *Data Science ; Publishing ; }, abstract = {Brain scientists are now capable of collecting more data in a single experiment than researchers a generation ago might have collected over an entire career. Indeed, the brain itself seems to thirst for more and more data. Such digital information not only comprises individual studies but is also increasingly shared and made openly available for secondary, confirmatory, and/or combined analyses. Numerous web resources now exist containing data across spatiotemporal scales. Data processing workflow technologies running via cloud-enabled computing infrastructures allow for large-scale processing. Such a move toward greater openness is fundamentally changing how brain science results are communicated and linked to available raw data and processed results. Ethical, professional, and motivational issues challenge the whole-scale commitment to data-driven neuroscience. Nevertheless, fueled by government investments into primary brain data collection coupled with increased sharing and community pressure challenging the dominant publishing model, large-scale brain and data science is here to stay.}, } @article {pmid33211312, year = {2020}, author = {Giardini, ME and Livingstone, IAT}, title = {Extending the Reach and Task-Shifting Ophthalmology Diagnostics Through Remote Visualisation.}, journal = {Advances in experimental medicine and biology}, volume = {1260}, number = {}, pages = {161-174}, pmid = {33211312}, issn = {0065-2598}, mesh = {Artificial Intelligence ; Child ; *Eye Diseases/diagnostic imaging ; Humans ; Infant, Newborn ; Ophthalmology/*trends ; *Remote Consultation ; Telemedicine/*trends ; }, abstract = {Driven by the global increase in the size and median age of the world population, sight loss is becoming a major public health challenge. Furthermore, the increased survival of premature neonates in low- and middle-income countries is causing an increase in developmental paediatric ophthalmic disease. Finally, there is an ongoing change in health-seeking behaviour worldwide, with consequent demand for increased access to healthcare, including ophthalmology. There is therefore the need to maximise the reach of resource-limited ophthalmology expertise in the context of increasing demand. Yet, ophthalmic diagnostics critically relies on visualisation, through optical imaging, of the front and of the back of the eye, and teleophthalmology, the remote visualisation of diagnostic images, shows promise to offer a viable solution.In this chapter, we first explore the strategies at the core of teleophthalmology and, in particular, real-time vs store-and-forward remote visualisation techniques, including considerations on suitability for different tasks and environments. We then introduce the key technologies suitable for teleophthalmology: anterior segment imaging, posterior segment imaging (retinal imaging) and, briefly, radiographic/tomographic techniques. We highlight enabling factors, such as high-resolution handheld imaging, high data rate mobile transmission, cloud storage and computing, 3D printing and other rapid fabrication technologies and patient and healthcare system acceptance of remote consultations. We then briefly discuss four canonical implementation settings, namely, national service provision integration, field and community screening, optometric decision support and virtual clinics, giving representative examples. We conclude with considerations on the outlook of the field, in particular, on artificial intelligence and on robotic actuation of the patient end point as a complement to televisualisation.}, } @article {pmid33211025, year = {2020}, author = {Tsai, VF and Zhuang, B and Pong, YH and Hsieh, JT and Chang, HC}, title = {Web- and Artificial Intelligence-Based Image Recognition For Sperm Motility Analysis: Verification Study.}, journal = {JMIR medical informatics}, volume = {8}, number = {11}, pages = {e20031}, pmid = {33211025}, issn = {2291-9694}, abstract = {BACKGROUND: Human sperm quality fluctuates over time. Therefore, it is crucial for couples preparing for natural pregnancy to monitor sperm motility.

OBJECTIVE: This study verified the performance of an artificial intelligence-based image recognition and cloud computing sperm motility testing system (Bemaner, Createcare) composed of microscope and microfluidic modules and designed to adapt to different types of smartphones.

METHODS: Sperm videos were captured and uploaded to the cloud with an app. Analysis of sperm motility was performed by an artificial intelligence-based image recognition algorithm then results were displayed. According to the number of motile sperm in the vision field, 47 (deidentified) videos of sperm were scored using 6 grades (0-5) by a male-fertility expert with 10 years of experience. Pearson product-moment correlation was calculated between the grades and the results (concentration of total sperm, concentration of motile sperm, and motility percentage) computed by the system.

RESULTS: Good correlation was demonstrated between the grades and results computed by the system for concentration of total sperm (r=0.65, P<.001), concentration of motile sperm (r=0.84, P<.001), and motility percentage (r=0.90, P<.001).

CONCLUSIONS: This smartphone-based sperm motility test (Bemaner) accurately measures motility-related parameters and could potentially be applied toward the following fields: male infertility detection, sperm quality test during preparation for pregnancy, and infertility treatment monitoring. With frequent at-home testing, more data can be collected to help make clinical decisions and to conduct epidemiological research.}, } @article {pmid33208108, year = {2020}, author = {Choi, JH and Kim, T and Jung, J and Joo, JWJ}, title = {Fully automated web-based tool for identifying regulatory hotspots.}, journal = {BMC genomics}, volume = {21}, number = {Suppl 10}, pages = {616}, pmid = {33208108}, issn = {1471-2164}, mesh = {Chromosome Mapping ; Internet ; Models, Statistical ; *Quantitative Trait Loci ; *Saccharomyces cerevisiae/genetics ; }, abstract = {BACKGROUND: Regulatory hotspots are genetic variations that may regulate the expression levels of many genes. It has been of great interest to find those hotspots utilizing expression quantitative trait locus (eQTL) analysis. However, it has been reported that many of the findings are spurious hotspots induced by various unknown confounding factors. Recently, methods utilizing complicated statistical models have been developed that successfully identify genuine hotspots. Next-generation Intersample Correlation Emended (NICE) is one of the methods that show high sensitivity and low false-discovery rate in finding regulatory hotspots. Even though the methods successfully find genuine hotspots, they have not been widely used due to their non-user-friendly interfaces and complex running processes. Furthermore, most of the methods are impractical due to their prohibitively high computational complexity.

RESULTS: To overcome the limitations of existing methods, we developed a fully automated web-based tool, referred to as NICER (NICE Renew), which is based on NICE program. First, we dramatically reduced running and installing burden of NICE. Second, we significantly reduced running time by incorporating multi-processing. Third, besides our web-based NICER, users can use NICER on Google Compute Engine and can readily install and run the NICER web service on their local computers. Finally, we provide different input formats and visualizations tools to show results. Utilizing a yeast dataset, we show that NICER can be successfully used in an eQTL analysis to identify many genuine regulatory hotspots, for which more than half of the hotspots were previously reported elsewhere.

CONCLUSIONS: Even though many hotspot analysis tools have been proposed, they have not been widely used for many practical reasons. NICER is a fully-automated web-based solution for eQTL mapping and regulatory hotspots analysis. NICER provides a user-friendly interface and has made hotspot analysis more viable by reducing the running time significantly. We believe that NICER will become the method of choice for increasing power of eQTL hotspot analysis.}, } @article {pmid33207820, year = {2020}, author = {Sadique, KM and Rahmani, R and Johannesson, P}, title = {IMSC-EIoTD: Identity Management and Secure Communication for Edge IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33207820}, issn = {1424-8220}, abstract = {The Internet of things (IoT) will accommodate several billions of devices to the Internet to enhance human society as well as to improve the quality of living. A huge number of sensors, actuators, gateways, servers, and related end-user applications will be connected to the Internet. All these entities require identities to communicate with each other. The communicating devices may have mobility and currently, the only main identity solution is IP based identity management which is not suitable for the authentication and authorization of the heterogeneous IoT devices. Sometimes devices and applications need to communicate in real-time to make decisions within very short times. Most of the recently proposed solutions for identity management are cloud-based. Those cloud-based identity management solutions are not feasible for heterogeneous IoT devices. In this paper, we have proposed an edge-fog based decentralized identity management and authentication solution for IoT devices (IoTD) and edge IoT gateways (EIoTG). We have also presented a secure communication protocol for communication between edge IoT devices and edge IoT gateways. The proposed security protocols are verified using Scyther formal verification tool, which is a popular tool for automated verification of security protocols. The proposed model is specified using the PROMELA language. SPIN model checker is used to confirm the specification of the proposed model. The results show different message flows without any error.}, } @article {pmid33207813, year = {2020}, author = {Liu, H and Li, S and Sun, W}, title = {Resource Allocation for Edge Computing without Using Cloud Center in Smart Home Environment: A Pricing Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33207813}, issn = {1424-8220}, support = {71671159//National Natural Science Foundation of China/ ; 71971188//National Natural Science Foundation of China/ ; G2018203302//Natural Science Foundation of Hebei Province/ ; G2020203005//Natural Science Foundation of Hebei Province/ ; }, abstract = {Recently, more and more smart homes have become one of important parts of home infrastructure. However, most of the smart home applications are not interconnected and remain isolated. They use the cloud center as the control platform, which increases the risk of link congestion and data security. Thus, in the future, smart homes based on edge computing without using cloud center become an important research area. In this paper, we assume that all applications in a smart home environment are composed of edge nodes and users. In order to maximize the utility of users, we assume that all users and edge nodes are placed in a market and formulate a pricing resource allocation model with utility maximization. We apply the Lagrangian method to analyze the model, so an edge node (provider in the market) allocates its resources to a user (customer in the market) based on the prices of resources and the utility related to the preference of users. To obtain the optimal resource allocation, we propose a pricing-based resource allocation algorithm by using low-pass filtering scheme and conform that the proposed algorithm can achieve an optimum within reasonable convergence times through some numerical examples.}, } @article {pmid33205037, year = {2020}, author = {Singh, P and Kaur, R}, title = {An integrated fog and Artificial Intelligence smart health framework to predict and prevent COVID-19.}, journal = {Global transitions}, volume = {2}, number = {}, pages = {283-292}, pmid = {33205037}, issn = {2589-7918}, abstract = {Nowadays, COVID-19 is spreading at a rapid rate in almost all the continents of the world. It has already affected many people who are further spreading it day by day. Hence, it is the most essential to alert nearby people to be aware of it due to its communicable behavior. Till May 2020, no vaccine is available for the treatment of this COVID-19, but the existing technologies can be used to minimize its effect. Cloud/fog computing could be used to monitor and control this rapidly spreading infection in a cost-effective and time-saving manner. To strengthen COVID-19 patient prediction, Artificial Intelligence(AI) can be integrated with cloud/fog computing for practical solutions. In this paper, fog assisted the internet of things based quality of service framework is presented to prevent and protect from COVID-19. It provides real-time processing of users' health data to predict the COVID-19 infection by observing their symptoms and immediately generates an emergency alert, medical reports, and significant precautions to the user, their guardian as well as doctors/experts. It collects sensitive information from the hospitals/quarantine shelters through the patient IoT devices for taking necessary actions/decisions. Further, it generates an alert message to the government health agencies for controlling the outbreak of chronic illness and for tanking quick and timely actions.}, } @article {pmid33204404, year = {2020}, author = {Alanazi, SA and Kamruzzaman, MM and Alruwaili, M and Alshammari, N and Alqahtani, SA and Karime, A}, title = {Measuring and Preventing COVID-19 Using the SIR Model and Machine Learning in Smart Health Care.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {8857346}, pmid = {33204404}, issn = {2040-2309}, mesh = {Algorithms ; Basic Reproduction Number/statistics & numerical data ; Biomedical Engineering ; COVID-19/epidemiology/*prevention & control ; Computer Simulation ; Delivery of Health Care ; Disease Susceptibility/epidemiology ; Female ; Forecasting ; Humans ; *Machine Learning ; Male ; *Models, Biological ; Pandemics/*prevention & control/statistics & numerical data ; Physical Distancing ; Quarantine ; SARS-CoV-2 ; Saudi Arabia/epidemiology ; Stochastic Processes ; }, abstract = {COVID-19 presents an urgent global challenge because of its contagious nature, frequently changing characteristics, and the lack of a vaccine or effective medicines. A model for measuring and preventing the continued spread of COVID-19 is urgently required to provide smart health care services. This requires using advanced intelligent computing such as artificial intelligence, machine learning, deep learning, cognitive computing, cloud computing, fog computing, and edge computing. This paper proposes a model for predicting COVID-19 using the SIR and machine learning for smart health care and the well-being of the citizens of KSA. Knowing the number of susceptible, infected, and recovered cases each day is critical for mathematical modeling to be able to identify the behavioral effects of the pandemic. It forecasts the situation for the upcoming 700 days. The proposed system predicts whether COVID-19 will spread in the population or die out in the long run. Mathematical analysis and simulation results are presented here as a means to forecast the progress of the outbreak and its possible end for three types of scenarios: "no actions," "lockdown," and "new medicines." The effect of interventions like lockdown and new medicines is compared with the "no actions" scenario. The lockdown case delays the peak point by decreasing the infection and affects the area equality rule of the infected curves. On the other side, new medicines have a significant impact on infected curve by decreasing the number of infected people about time. Available forecast data on COVID-19 using simulations predict that the highest level of cases might occur between 15 and 30 November 2020. Simulation data suggest that the virus might be fully under control only after June 2021. The reproductive rate shows that measures such as government lockdowns and isolation of individuals are not enough to stop the pandemic. This study recommends that authorities should, as soon as possible, apply a strict long-term containment strategy to reduce the epidemic size successfully.}, } @article {pmid33200116, year = {2020}, author = {Gorgulla, C and Padmanabha Das, KM and Leigh, KE and Cespugli, M and Fischer, PD and Wang, ZF and Tesseyre, G and Pandita, S and Shnapir, A and Calderaio, A and Gechev, M and Rose, A and Lewis, N and Hutcheson, C and Yaffe, E and Luxenburg, R and Herce, HD and Durmaz, V and Halazonetis, TD and Fackeldey, K and Patten, JJ and Chuprina, A and Dziuba, I and Plekhova, A and Moroz, Y and Radchenko, D and Tarkhanova, O and Yavnyuk, I and Gruber, C and Yust, R and Payne, D and Näär, AM and Namchuk, MN and Davey, RA and Wagner, G and Kinney, J and Arthanari, H}, title = {A Multi-Pronged Approach Targeting SARS-CoV-2 Proteins Using Ultra-Large Virtual Screening.}, journal = {ChemRxiv : the preprint server for chemistry}, volume = {}, number = {}, pages = {}, pmid = {33200116}, issn = {2573-2293}, support = {R01 GM136859/GM/NIGMS NIH HHS/United States ; }, abstract = {Severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), previously known as 2019 novel coronavirus (2019-nCoV), has spread rapidly across the globe, creating an unparalleled global health burden and spurring a deepening economic crisis. As of July 7th, 2020, almost seven months into the outbreak, there are no approved vaccines and few treatments available. Developing drugs that target multiple points in the viral life cycle could serve as a strategy to tackle the current as well as future coronavirus pandemics. Here we leverage the power of our recently developed in silico screening platform, VirtualFlow, to identify inhibitors that target SARS-CoV-2. VirtualFlow is able to efficiently harness the power of computing clusters and cloud-based computing platforms to carry out ultra-large scale virtual screens. In this unprecedented structure-based multi-target virtual screening campaign, we have used VirtualFlow to screen an average of approximately 1 billion molecules against each of 40 different target sites on 17 different potential viral and host targets in the cloud. In addition to targeting the active sites of viral enzymes, we also target critical auxiliary sites such as functionally important protein-protein interaction interfaces. This multi-target approach not only increases the likelihood of finding a potent inhibitor, but could also help identify a collection of anti-coronavirus drugs that would retain efficacy in the face of viral mutation. Drugs belonging to different regimen classes could be combined to develop possible combination therapies, and top hits that bind at highly conserved sites would be potential candidates for further development as coronavirus drugs. Here, we present the top 200 in silico hits for each target site. While in-house experimental validation of some of these compounds is currently underway, we want to make this array of potential inhibitor candidates available to researchers worldwide in consideration of the pressing need for fast-tracked drug development.}, } @article {pmid33194451, year = {2020}, author = {Hesselmann, G}, title = {No conclusive evidence that difficult general knowledge questions cause a "Google Stroop effect". A replication study.}, journal = {PeerJ}, volume = {8}, number = {}, pages = {e10325}, pmid = {33194451}, issn = {2167-8359}, abstract = {Access to the digital "all-knowing cloud" has become an integral part of our daily lives. It has been suggested that the increasing offloading of information and information processing services to the cloud will alter human cognition and metacognition in the short and long term. A much-cited study published in Science in 2011 provided first behavioral evidence for such changes in human cognition. Participants had to answer difficult trivia questions, and subsequently showed longer response times in a variant of the Stroop task with internet-related words ("Google Stroop effect"). The authors of this study concluded that the concept of the Internet is automatically activated in situations where information is missing (e.g., because we might feel the urge to "google" the information). However, the "Google Stroop effect" could not be replicated in two recent replication attempts as part of a large replicability project. After the failed replication was published in 2018, the first author of the original study pointed out some problems with the design of the failed replication. In our study, we therefore aimed to replicate the "Google Stroop effect" with a research design closer to the original experiment. Our results revealed no conclusive evidence in favor of the notion that the concept of the Internet or internet access (via computers or smartphones) is automatically activated when participants are faced with hard trivia questions. We provide recommendations for follow-up research.}, } @article {pmid33193602, year = {2020}, author = {Guerra-Assunção, JA and Conde, L and Moghul, I and Webster, AP and Ecker, S and Chervova, O and Chatzipantsiou, C and Prieto, PP and Beck, S and Herrero, J}, title = {GenomeChronicler: The Personal Genome Project UK Genomic Report Generator Pipeline.}, journal = {Frontiers in genetics}, volume = {11}, number = {}, pages = {518644}, pmid = {33193602}, issn = {1664-8021}, abstract = {In recent years, there has been a significant increase in whole genome sequencing data of individual genomes produced by research projects as well as direct to consumer service providers. While many of these sources provide their users with an interpretation of the data, there is a lack of free, open tools for generating reports exploring the data in an easy to understand manner. GenomeChronicler was developed as part of the Personal Genome Project UK (PGP-UK) to address this need. PGP-UK provides genomic, transcriptomic, epigenomic and self-reported phenotypic data under an open-access model with full ethical approval. As a result, the reports generated by GenomeChronicler are intended for research purposes only and include information relating to potentially beneficial and potentially harmful variants, but without clinical curation. GenomeChronicler can be used with data from whole genome or whole exome sequencing, producing a genome report containing information on variant statistics, ancestry and known associated phenotypic traits. Example reports are available from the PGP-UK data page (personalgenomes.org.uk/data). The objective of this method is to leverage existing resources to find known phenotypes associated with the genotypes detected in each sample. The provided trait data is based primarily upon information available in SNPedia, but also collates data from ClinVar, GETevidence, and gnomAD to provide additional details on potential health implications, presence of genotype in other PGP participants and population frequency of each genotype. The analysis can be run in a self-contained environment without requiring internet access, making it a good choice for cases where privacy is essential or desired: any third party project can embed GenomeChronicler within their off-line safe-haven environments. GenomeChronicler can be run for one sample at a time, or in parallel making use of the Nextflow workflow manager. The source code is available from GitHub (https://github.com/PGP-UK/GenomeChronicler), container recipes are available for Docker and Singularity, as well as a pre-built container from SingularityHub (https://singularity-hub.org/collections/3664) enabling easy deployment in a variety of settings. Users without access to computational resources to run GenomeChronicler can access the software from the Lifebit CloudOS platform (https://lifebit.ai/cloudos) enabling the production of reports and variant calls from raw sequencing data in a scalable fashion.}, } @article {pmid33192434, year = {2020}, author = {Kirkland, P and Di Caterina, G and Soraghan, J and Matich, G}, title = {Perception Understanding Action: Adding Understanding to the Perception Action Cycle With Spiking Segmentation.}, journal = {Frontiers in neurorobotics}, volume = {14}, number = {}, pages = {568319}, pmid = {33192434}, issn = {1662-5218}, abstract = {Traditionally the Perception Action cycle is the first stage of building an autonomous robotic system and a practical way to implement a low latency reactive system within a low Size, Weight and Power (SWaP) package. However, within complex scenarios, this method can lack contextual understanding about the scene, such as object recognition-based tracking or system attention. Object detection, identification and tracking along with semantic segmentation and attention are all modern computer vision tasks in which Convolutional Neural Networks (CNN) have shown significant success, although such networks often have a large computational overhead and power requirements, which are not ideal in smaller robotics tasks. Furthermore, cloud computing and massively parallel processing like in Graphic Processing Units (GPUs) are outside the specification of many tasks due to their respective latency and SWaP constraints. In response to this, Spiking Convolutional Neural Networks (SCNNs) look to provide the feature extraction benefits of CNNs, while maintaining low latency and power overhead thanks to their asynchronous spiking event-based processing. A novel Neuromorphic Perception Understanding Action (PUA) system is presented, that aims to combine the feature extraction benefits of CNNs with low latency processing of SCNNs. The PUA utilizes a Neuromorphic Vision Sensor for Perception that facilitates asynchronous processing within a Spiking fully Convolutional Neural Network (SpikeCNN) to provide semantic segmentation and Understanding of the scene. The output is fed to a spiking control system providing Actions. With this approach, the aim is to bring features of deep learning into the lower levels of autonomous robotics, while maintaining a biologically plausible STDP rule throughout the learned encoding part of the network. The network will be shown to provide a more robust and predictable management of spiking activity with an improved thresholding response. The reported experiments show that this system can deliver robust results of over 96 and 81% for accuracy and Intersection over Union, ensuring such a system can be successfully used within object recognition, classification and tracking problem. This demonstrates that the attention of the system can be tracked accurately, while the asynchronous processing means the controller can give precise track updates with minimal latency.}, } @article {pmid33187267, year = {2020}, author = {Hamdan, S and Ayyash, M and Almajali, S}, title = {Edge-Computing Architectures for Internet of Things Applications: A Survey.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {22}, pages = {}, pmid = {33187267}, issn = {1424-8220}, abstract = {The rapid growth of the Internet of Things (IoT) applications and their interference with our daily life tasks have led to a large number of IoT devices and enormous sizes of IoT-generated data. The resources of IoT devices are limited; therefore, the processing and storing IoT data in these devices are inefficient. Traditional cloud-computing resources are used to partially handle some of the IoT resource-limitation issues; however, using the resources in cloud centers leads to other issues, such as latency in time-critical IoT applications. Therefore, edge-cloud-computing technology has recently evolved. This technology allows for data processing and storage at the edge of the network. This paper studies, in-depth, edge-computing architectures for IoT (ECAs-IoT), and then classifies them according to different factors such as data placement, orchestration services, security, and big data. Besides, the paper studies each architecture in depth and compares them according to various features. Additionally, ECAs-IoT is mapped according to two existing IoT layered models, which helps in identifying the capabilities, features, and gaps of every architecture. Moreover, the paper presents the most important limitations of existing ECAs-IoT and recommends solutions to them. Furthermore, this survey details the IoT applications in the edge-computing domain. Lastly, the paper recommends four different scenarios for using ECAs-IoT by IoT applications.}, } @article {pmid33185051, year = {2020}, author = {LaRochelle, EPM and Pogue, BW}, title = {Theoretical lateral and axial sensitivity limits and choices of molecular reporters for Cherenkov-excited luminescence in tissue during x-ray beam scanning.}, journal = {Journal of biomedical optics}, volume = {25}, number = {11}, pages = {}, pmid = {33185051}, issn = {1560-2281}, support = {P30 CA023108/CA/NCI NIH HHS/United States ; R01 EB024498/EB/NIBIB NIH HHS/United States ; }, mesh = {*Luminescence ; Monte Carlo Method ; Phantoms, Imaging ; *Photons ; X-Rays ; }, abstract = {PURPOSE: Unlike fluorescence imaging utilizing an external excitation source, Cherenkov emissions and Cherenkov-excited luminescence occur within a medium when irradiated with high-energy x-rays. Methods to improve the understanding of the lateral spread and axial depth distribution of these emissions are needed as an initial step to improve the overall system resolution.

METHODS: Monte Carlo simulations were developed to investigate the lateral spread of thin sheets of high-energy sources and compared to experimental measurements of similar sources in water. Additional simulations of a multilayer skin model were used to investigate the limits of detection using both 6- and 18-MV x-ray sources with fluorescence excitation for inclusion depths up to 1 cm.

RESULTS: Simulations comparing the lateral spread of high-energy sources show approximately 100  ×   higher optical yield from electrons than photons, although electrons showed a larger penumbra in both the simulations and experimental measurements. Cherenkov excitation has a roughly inverse wavelength squared dependence in intensity but is largely redshifted in excitation through any distance of tissue. The calculated emission spectra in tissue were convolved with a database of luminescent compounds to produce a computational ranking of potential Cherenkov-excited luminescence molecular contrast agents.

CONCLUSIONS: Models of thin x-ray and electron sources were compared with experimental measurements, showing similar trends in energy and source type. Surface detection of Cherenkov-excited luminescence appears to be limited by the mean free path of the luminescence emission, where for the given simulation only 2% of the inclusion emissions reached the surface from a depth of 7 mm in a multilayer tissue model.}, } @article {pmid33178415, year = {2020}, author = {Zasada, SJ and Wright, DW and Coveney, PV}, title = {Large-scale binding affinity calculations on commodity compute clouds.}, journal = {Interface focus}, volume = {10}, number = {6}, pages = {20190133}, pmid = {33178415}, issn = {2042-8898}, support = {MR/L016311/1/MRC_/Medical Research Council/United Kingdom ; }, abstract = {In recent years, it has become possible to calculate binding affinities of compounds bound to proteins via rapid, accurate, precise and reproducible free energy calculations. This is imperative in drug discovery as well as personalized medicine. This approach is based on molecular dynamics (MD) simulations and draws on sequence and structural information of the protein and compound concerned. Free energies are determined by ensemble averages of many MD replicas, each of which requires hundreds of cores and/or GPU accelerators, which are now available on commodity cloud computing platforms; there are also requirements for initial model building and subsequent data analysis stages. To automate the process, we have developed a workflow known as the binding affinity calculator. In this paper, we focus on the software infrastructure and interfaces that we have developed to automate the overall workflow and execute it on commodity cloud platforms, in order to reliably predict their binding affinities on time scales relevant to the domains of application, and illustrate its application to two free energy methods.}, } @article {pmid33177037, year = {2020}, author = {Jeon, S and Seo, J and Kim, S and Lee, J and Kim, JH and Sohn, JW and Moon, J and Joo, HJ}, title = {Proposal and Assessment of a De-Identification Strategy to Enhance Anonymity of the Observational Medical Outcomes Partnership Common Data Model (OMOP-CDM) in a Public Cloud-Computing Environment: Anonymization of Medical Data Using Privacy Models.}, journal = {Journal of medical Internet research}, volume = {22}, number = {11}, pages = {e19597}, pmid = {33177037}, issn = {1438-8871}, mesh = {Cloud Computing/*standards ; Confidentiality/*standards ; Data Anonymization/*standards ; Databases, Factual/*standards ; Humans ; Medical Informatics/*methods ; }, abstract = {BACKGROUND: De-identifying personal information is critical when using personal health data for secondary research. The Observational Medical Outcomes Partnership Common Data Model (CDM), defined by the nonprofit organization Observational Health Data Sciences and Informatics, has been gaining attention for its use in the analysis of patient-level clinical data obtained from various medical institutions. When analyzing such data in a public environment such as a cloud-computing system, an appropriate de-identification strategy is required to protect patient privacy.

OBJECTIVE: This study proposes and evaluates a de-identification strategy that is comprised of several rules along with privacy models such as k-anonymity, l-diversity, and t-closeness. The proposed strategy was evaluated using the actual CDM database.

METHODS: The CDM database used in this study was constructed by the Anam Hospital of Korea University. Analysis and evaluation were performed using the ARX anonymizing framework in combination with the k-anonymity, l-diversity, and t-closeness privacy models.

RESULTS: The CDM database, which was constructed according to the rules established by Observational Health Data Sciences and Informatics, exhibited a low risk of re-identification: The highest re-identifiable record rate (11.3%) in the dataset was exhibited by the DRUG_EXPOSURE table, with a re-identification success rate of 0.03%. However, because all tables include at least one "highest risk" value of 100%, suitable anonymizing techniques are required; moreover, the CDM database preserves the "source values" (raw data), a combination of which could increase the risk of re-identification. Therefore, this study proposes an enhanced strategy to de-identify the source values to significantly reduce not only the highest risk in the k-anonymity, l-diversity, and t-closeness privacy models but also the overall possibility of re-identification.

CONCLUSIONS: Our proposed de-identification strategy effectively enhanced the privacy of the CDM database, thereby encouraging clinical research involving multiple centers.}, } @article {pmid33172017, year = {2020}, author = {Cecilia, JM and Cano, JC and Morales-García, J and Llanes, A and Imbernón, B}, title = {Evaluation of Clustering Algorithms on GPU-Based Edge Computing Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33172017}, issn = {1424-8220}, support = {RYC2018-025580-I//Ministerio de Ciencia e Innovación/ ; RTI2018-096384-B-I00//Ministerio de Ciencia e Innovación/ ; RTC2019-007159-5//Ministerio de Ciencia e Innovación/ ; 20813/PI/18//Fundación Séneca/ ; }, abstract = {Internet of Things (IoT) is becoming a new socioeconomic revolution in which data and immediacy are the main ingredients. IoT generates large datasets on a daily basis but it is currently considered as "dark data", i.e., data generated but never analyzed. The efficient analysis of this data is mandatory to create intelligent applications for the next generation of IoT applications that benefits society. Artificial Intelligence (AI) techniques are very well suited to identifying hidden patterns and correlations in this data deluge. In particular, clustering algorithms are of the utmost importance for performing exploratory data analysis to identify a set (a.k.a., cluster) of similar objects. Clustering algorithms are computationally heavy workloads and require to be executed on high-performance computing clusters, especially to deal with large datasets. This execution on HPC infrastructures is an energy hungry procedure with additional issues, such as high-latency communications or privacy. Edge computing is a paradigm to enable light-weight computations at the edge of the network that has been proposed recently to solve these issues. In this paper, we provide an in-depth analysis of emergent edge computing architectures that include low-power Graphics Processing Units (GPUs) to speed-up these workloads. Our analysis includes performance and power consumption figures of the latest Nvidia's AGX Xavier to compare the energy-performance ratio of these low-cost platforms with a high-performance cloud-based counterpart version. Three different clustering algorithms (i.e., k-means, Fuzzy Minimals (FM), and Fuzzy C-Means (FCM)) are designed to be optimally executed on edge and cloud platforms, showing a speed-up factor of up to 11× for the GPU code compared to sequential counterpart versions in the edge platforms and energy savings of up to 150% between the edge computing and HPC platforms.}, } @article {pmid33171714, year = {2020}, author = {Ghazal, M and Basmaji, T and Yaghi, M and Alkhedher, M and Mahmoud, M and El-Baz, AS}, title = {Cloud-Based Monitoring of Thermal Anomalies in Industrial Environments Using AI and the Internet of Robotic Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33171714}, issn = {1424-8220}, abstract = {Recent advancements in cloud computing, artificial intelligence, and the internet of things (IoT) create new opportunities for autonomous industrial environments monitoring. Nevertheless, detecting anomalies in harsh industrial settings remains challenging. This paper proposes an edge-fog-cloud architecture with mobile IoT edge nodes carried on autonomous robots for thermal anomalies detection in aluminum factories. We use companion drones as fog nodes to deliver first response services and a cloud back-end for thermal anomalies analysis. We also propose a self-driving deep learning architecture and a thermal anomalies detection and visualization algorithm. Our results show our robot surveyors are low-cost, deliver reduced response time, and more accurately detect anomalies compared to human surveyors or fixed IoT nodes monitoring the same industrial area. Our self-driving architecture has a root mean square error of 0.19 comparable to VGG-19 with a significantly reduced complexity and three times the frame rate at 60 frames per second. Our thermal to visual registration algorithm maximizes mutual information in the image-gradient domain while adapting to different resolutions and camera frame rates.}, } @article {pmid33171646, year = {2020}, author = {Kołakowska, A and Szwoch, W and Szwoch, M}, title = {A Review of Emotion Recognition Methods Based on Data Acquired via Smartphone Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33171646}, issn = {1424-8220}, mesh = {*Algorithms ; Bayes Theorem ; *Emotions ; Humans ; *Machine Learning ; *Smartphone ; }, abstract = {In recent years, emotion recognition algorithms have achieved high efficiency, allowing the development of various affective and affect-aware applications. This advancement has taken place mainly in the environment of personal computers offering the appropriate hardware and sufficient power to process complex data from video, audio, and other channels. However, the increase in computing and communication capabilities of smartphones, the variety of their built-in sensors, as well as the availability of cloud computing services have made them an environment in which the task of recognising emotions can be performed at least as effectively. This is possible and particularly important due to the fact that smartphones and other mobile devices have become the main computer devices used by most people. This article provides a systematic overview of publications from the last 10 years related to emotion recognition methods using smartphone sensors. The characteristics of the most important sensors in this respect are presented, and the methods applied to extract informative features on the basis of data read from these input channels. Then, various machine learning approaches implemented to recognise emotional states are described.}, } @article {pmid33163154, year = {2019}, author = {Wibberg, D and Batut, B and Belmann, P and Blom, J and Glöckner, FO and Grüning, B and Hoffmann, N and Kleinbölting, N and Rahn, R and Rey, M and Scholz, U and Sharan, M and Tauch, A and Trojahn, U and Usadel, B and Kohlbacher, O}, title = {The de.NBI / ELIXIR-DE training platform - Bioinformatics training in Germany and across Europe within ELIXIR.}, journal = {F1000Research}, volume = {8}, number = {}, pages = {}, pmid = {33163154}, issn = {2046-1402}, mesh = {Computational Biology/*education ; Europe ; Germany ; Humans ; }, abstract = {The German Network for Bioinformatics Infrastructure (de.NBI) is a national and academic infrastructure funded by the German Federal Ministry of Education and Research (BMBF). The de.NBI provides (i) service, (ii) training, and (iii) cloud computing to users in life sciences research and biomedicine in Germany and Europe and (iv) fosters the cooperation of the German bioinformatics community with international network structures. The de.NBI members also run the German node (ELIXIR-DE) within the European ELIXIR infrastructure. The de.NBI / ELIXIR-DE training platform, also known as special interest group 3 (SIG 3) 'Training & Education', coordinates the bioinformatics training of de.NBI and the German ELIXIR node. The network provides a high-quality, coherent, timely, and impactful training program across its eight service centers. Life scientists learn how to handle and analyze biological big data more effectively by applying tools, standards and compute services provided by de.NBI. Since 2015, more than 300 training courses were carried out with about 6,000 participants and these courses received recommendation rates of almost 90% (status as of July 2020). In addition to face-to-face training courses, online training was introduced on the de.NBI website in 2016 and guidelines for the preparation of e-learning material were established in 2018. In 2016, ELIXIR-DE joined the ELIXIR training platform. Here, the de.NBI / ELIXIR-DE training platform collaborates with ELIXIR in training activities, advertising training courses via TeSS and discussions on the exchange of data for training events essential for quality assessment on both the technical and administrative levels. The de.NBI training program trained thousands of scientists from Germany and beyond in many different areas of bioinformatics.}, } @article {pmid33163255, year = {2020}, author = {Bremer, E and Saltz, J and Almeida, JS}, title = {ImageBox 2 - Efficient and Rapid Access of Image Tiles from Whole-Slide Images Using Serverless HTTP Range Requests.}, journal = {Journal of pathology informatics}, volume = {11}, number = {}, pages = {29}, pmid = {33163255}, issn = {2229-5089}, abstract = {BACKGROUND: Whole-slide images (WSI) are produced by a high-resolution scanning of pathology glass slides. There are a large number of whole-slide imaging scanners, and the resulting images are frequently larger than 100,000 × 100,000 pixels which typically image 100,000 to one million cells, ranging from several hundred megabytes to many gigabytes in size.

AIMS AND OBJECTIVES: Provide HTTP access over the web to Whole Slide Image tiles that do not have localized tiling servers but only basic HTTP access. Move all image decode and tiling functions to calling agent (ImageBox).

METHODS: Current software systems require tiling image servers to be installed on systems providing local disk access to these images. ImageBox2 breaks this requirement by accessing tiles from remote HTTP source via byte-level HTTP range requests. This method does not require changing the client software as the operation is relegated to the ImageBox2 server which is local (or remote) to the client and can access tiles from remote images that have no server of their own such as Amazon S3 hosted images. That is, it provides a data service [on a server that does not need to be managed], the definition of serverless execution model increasingly favored by cloud computing infrastructure.

CONCLUSIONS: The specific methodology described and assessed in this report preserves normal client connection semantics by enabling cloud-friendly tiling, promoting a web of http connected whole-slide images from a wide-ranging number of sources, and providing tiling where local tiling servers would have been otherwise unavailable.}, } @article {pmid33162127, year = {2021}, author = {Bergier, I and Papa, M and Silva, R and Santos, PM}, title = {Cloud/edge computing for compliance in the Brazilian livestock supply chain.}, journal = {The Science of the total environment}, volume = {761}, number = {}, pages = {143276}, doi = {10.1016/j.scitotenv.2020.143276}, pmid = {33162127}, issn = {1879-1026}, abstract = {Brazil is an important player in the global agribusiness markets, in which grain and beef make up the majority of exports. Barriers to access more valuable sustainable markets emerge from the lack of adequate compliance in supply chains. Here is depicted a mobile application based on cloud/edge computing for the livestock supply chain to circumvent that limitation. The application, called BovChain, is a peer-to-peer (P2P) network connecting landowners and slaughterhouses. The objective of the application is twofold. Firstly, it maximizes sustainable business by reducing transaction costs and by strengthening ties between state-authorized stakeholders. Secondly, it creates metadata useful for digital certification by exploiting CMOS and GPS sensor technologies embedded in low-cost smartphones. Successful declarative transactions in the digital space are recorded as metadata, and the corresponding big data might be valuable for the certification of livestock origin and traceability for sustainability compliance in 'glocal' beef markets.}, } @article {pmid33151974, year = {2020}, author = {Hanif, M and Lee, C and Helal, S}, title = {Predictive topology refinements in distributed stream processing system.}, journal = {PloS one}, volume = {15}, number = {11}, pages = {e0240424}, pmid = {33151974}, issn = {1932-6203}, mesh = {Algorithms ; *Big Data ; Cloud Computing/*standards ; Computer Communication Networks/*standards ; *Quality Control ; Workload ; }, abstract = {Cloud computing has evolved the big data technologies to a consolidated paradigm with SPaaS (Streaming processing-as-a-service). With a number of enterprises offering cloud-based solutions to end-users and other small enterprises, there has been a boom in the volume of data, creating interest of both industry and academia in big data analytics, streaming applications, and social networking applications. With the companies shifting to cloud-based solutions as a service paradigm, the competition grows in the market. Good quality of service (QoS) is a must for the enterprises, as they strive to survive in a competitive environment. However, achieving reasonable QoS goals to meet SLA agreement cost-effectively is challenging due to variation in workload over time. This problem can be solved if the system has the ability to predict the workload for the near future. In this paper, we present a novel topology-refining scheme based on a workload prediction mechanism. Predictions are made through a model based on a combination of SVR, autoregressive, and moving average model with a feedback mechanism. Our streaming system is designed to increase the overall performance by making the topology refining robust to the incoming workload on the fly, while still being able to achieve QoS goals of SLA constraints. Apache Flink distributed processing engine is used as a testbed in the paper. The result shows that the prediction scheme works well for both workloads, i.e., synthetic as well as real traces of data.}, } @article {pmid33150404, year = {2021}, author = {Wang, G and Wignall, J and Kinard, D and Singh, V and Foster, C and Adams, S and Pratt, W and Desai, AD}, title = {An implementation model for managing cloud-based longitudinal care plans for children with medical complexity.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {28}, number = {1}, pages = {23-32}, pmid = {33150404}, issn = {1527-974X}, support = {K08 HS024299/HS/AHRQ HHS/United States ; }, mesh = {Adult ; Caregivers ; Child ; Chronic Disease/therapy ; *Cloud Computing ; Health Information Exchange ; Health Insurance Portability and Accountability Act ; Health Personnel ; Humans ; *Patient Care Planning/organization & administration ; *Patient Care Team ; Pediatrics ; United States ; }, abstract = {OBJECTIVE: We aimed to iteratively refine an implementation model for managing cloud-based longitudinal care plans (LCPs) for children with medical complexity (CMC).

MATERIALS AND METHODS: We conducted iterative 1-on-1 design sessions with CMC caregivers (ie, parents/legal guardians) and providers between August 2017 and March 2019. During audio-recorded sessions, we asked participants to walk through role-specific scenarios of how they would create, review, and edit an LCP using a cloud-based prototype, which we concurrently developed. Between sessions, we reviewed audio recordings to identify strategies that would mitigate barriers that participants reported relating to 4 processes for managing LCPs: (1) taking ownership, (2) sharing, (3) reviewing, and (4) editing. Analysis informed iterative implementation model revisions.

RESULTS: We conducted 30 design sessions, with 10 caregivers and 20 providers. Participants emphasized that cloud-based LCPs required a team of owners: the caregiver(s), a caregiver-designated clinician, and a care coordinator. Permission settings would need to include universal accessibility for emergency providers, team-level permission options, and some editing restrictions for caregivers. Notifications to review and edit the LCP should be sent to team members before and after clinic visits and after hospital encounters. Mitigating double documentation barriers would require alignment of data fields between the LCP and electronic health record to maximize interoperability.

DISCUSSION: These findings provide a model for how we may leverage emerging Health Insurance Portability and Accountability Act-compliant cloud computing technologies to support families and providers in comanaging health information for CMC.

CONCLUSIONS: Utilizing these management strategies when implementing cloud-based LCPs has the potential to improve team-based care across settings.}, } @article {pmid33150354, year = {2021}, author = {Long, A and Glogowski, A and Meppiel, M and De Vito, L and Engle, E and Harris, M and Ha, G and Schneider, D and Gabrielian, A and Hurt, DE and Rosenthal, A}, title = {The technology behind TB DEPOT: a novel public analytics platform integrating tuberculosis clinical, genomic, and radiological data for visual and statistical exploration.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {28}, number = {1}, pages = {71-79}, pmid = {33150354}, issn = {1527-974X}, mesh = {Computational Biology ; Databases as Topic ; Genomics ; Humans ; *Internet ; *Medical Informatics Applications ; National Institute of Allergy and Infectious Diseases (U.S.) ; Radiology ; Software ; *Tuberculosis/diagnosis/drug therapy/genetics/prevention & control ; United States ; }, abstract = {OBJECTIVE: Clinical research informatics tools are necessary to support comprehensive studies of infectious diseases. The National Institute of Allergy and Infectious Diseases (NIAID) developed the publicly accessible Tuberculosis Data Exploration Portal (TB DEPOT) to address the complex etiology of tuberculosis (TB).

MATERIALS AND METHODS: TB DEPOT displays deidentified patient case data and facilitates analyses across a wide range of clinical, socioeconomic, genomic, and radiological factors. The solution is built using Amazon Web Services cloud-based infrastructure, .NET Core, Angular, Highcharts, R, PLINK, and other custom-developed services. Structured patient data, pathogen genomic variants, and medical images are integrated into the solution to allow seamless filtering across data domains.

RESULTS: Researchers can use TB DEPOT to query TB patient cases, create and save patient cohorts, and execute comparative statistical analyses on demand. The tool supports user-driven data exploration and fulfills the National Institute of Health's Findable, Accessible, Interoperable, and Reusable (FAIR) principles.

DISCUSSION: TB DEPOT is the first tool of its kind in the field of TB research to integrate multidimensional data from TB patient cases. Its scalable and flexible architectural design has accommodated growth in the data, organizations, types of data, feature requests, and usage. Use of client-side technologies over server-side technologies and prioritizing maintenance have been important lessons learned. Future directions are dynamically prioritized and key functionality is shared through an application programming interface.

CONCLUSION: This paper describes the platform development methodology, resulting functionality, benefits, and technical considerations of a clinical research informatics application to support increased understanding of TB.}, } @article {pmid33150095, year = {2020}, author = {Frontoni, E and Romeo, L and Bernardini, M and Moccia, S and Migliorelli, L and Paolanti, M and Ferri, A and Misericordia, P and Mancini, A and Zingaretti, P}, title = {A Decision Support System for Diabetes Chronic Care Models Based on General Practitioner Engagement and EHR Data Sharing.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {8}, number = {}, pages = {3000112}, pmid = {33150095}, issn = {2168-2372}, abstract = {Objective Decision support systems (DSS) have been developed and promoted for their potential to improve quality of health care. However, there is a lack of common clinical strategy and a poor management of clinical resources and erroneous implementation of preventive medicine. Methods To overcome this problem, this work proposed an integrated system that relies on the creation and sharing of a database extracted from GPs' Electronic Health Records (EHRs) within the Netmedica Italian (NMI) cloud infrastructure. Although the proposed system is a pilot application specifically tailored for improving the chronic Type 2 Diabetes (T2D) care it could be easily targeted to effectively manage different chronic-diseases. The proposed DSS is based on EHR structure used by GPs in their daily activities following the most updated guidelines in data protection and sharing. The DSS is equipped with a Machine Learning (ML) method for analyzing the shared EHRs and thus tackling the high variability of EHRs. A novel set of T2D care-quality indicators are used specifically to determine the economic incentives and the T2D features are presented as predictors of the proposed ML approach. Results The EHRs from 41237 T2D patients were analyzed. No additional data collection, with respect to the standard clinical practice, was required. The DSS exhibited competitive performance (up to an overall accuracy of 98%±2% and macro-recall of 96%±1%) for classifying chronic care quality across the different follow-up phases. The chronic care quality model brought to a significant increase (up to 12%) of the T2D patients without complications. For GPs who agreed to use the proposed system, there was an economic incentive. A further bonus was assigned when performance targets are achieved. Conclusions The quality care evaluation in a clinical use-case scenario demonstrated how the empowerment of the GPs through the use of the platform (integrating the proposed DSS), along with the economic incentives, may speed up the improvement of care.}, } @article {pmid33143038, year = {2020}, author = {Chukhno, O and Chukhno, N and Araniti, G and Campolo, C and Iera, A and Molinaro, A}, title = {Optimal Placement of Social Digital Twins in Edge IoT Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33143038}, issn = {1424-8220}, support = {813278//H2020 Marie Skłodowska-Curie Actions/ ; }, abstract = {In next-generation Internet of Things (IoT) deployments, every object such as a wearable device, a smartphone, a vehicle, and even a sensor or an actuator will be provided with a digital counterpart (twin) with the aim of augmenting the physical object's capabilities and acting on its behalf when interacting with third parties. Moreover, such objects can be able to interact and autonomously establish social relationships according to the Social Internet of Things (SIoT) paradigm. In such a context, the goal of this work is to provide an optimal solution for the social-aware placement of IoT digital twins (DTs) at the network edge, with the twofold aim of reducing the latency (i) between physical devices and corresponding DTs for efficient data exchange, and (ii) among DTs of friend devices to speed-up the service discovery and chaining procedures across the SIoT network. To this aim, we formulate the problem as a mixed-integer linear programming model taking into account limited computing resources in the edge cloud and social relationships among IoT devices.}, } @article {pmid33140820, year = {2021}, author = {Wang, YL and Wang, F and Shi, XX and Jia, CY and Wu, FX and Hao, GF and Yang, GF}, title = {Cloud 3D-QSAR: a web tool for the development of quantitative structure-activity relationship models in drug discovery.}, journal = {Briefings in bioinformatics}, volume = {22}, number = {4}, pages = {}, doi = {10.1093/bib/bbaa276}, pmid = {33140820}, issn = {1477-4054}, mesh = {*Drug Design ; *Drug Discovery ; *Internet ; Quantitative Structure-Activity Relationship ; *Software ; }, abstract = {Effective drug discovery contributes to the treatment of numerous diseases but is limited by high costs and long cycles. The Quantitative Structure-Activity Relationship (QSAR) method was introduced to evaluate the activity of a large number of compounds virtually, reducing the time and labor costs required for chemical synthesis and experimental determination. Hence, this method increases the efficiency of drug discovery. To meet the needs of researchers to utilize this technology, numerous QSAR-related web servers, such as Web-4D-QSAR and DPubChem, have been developed in recent years. However, none of the servers mentioned above can perform a complete QSAR modeling and supply activity prediction functions. We introduce Cloud 3D-QSAR by integrating the functions of molecular structure generation, alignment, molecular interaction field (MIF) computing and results analysis to provide a one-stop solution. We rigidly validated this server, and the activity prediction correlation was R2 = 0.934 in 834 test molecules. The sensitivity, specificity and accuracy were 86.9%, 94.5% and 91.5%, respectively, with AUC = 0.981, AUCPR = 0.971. The Cloud 3D-QSAR server may facilitate the development of good QSAR models in drug discovery. Our server is free and now available at http://chemyang.ccnu.edu.cn/ccb/server/cloud3dQSAR/ and http://agroda.gzu.edu.cn:9999/ccb/server/cloud3dQSAR/.}, } @article {pmid33138072, year = {2020}, author = {Zhao, L}, title = {Privacy-Preserving Distributed Analytics in Fog-Enabled IoT Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33138072}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) has evolved significantly with advances in gathering data that can be extracted to provide knowledge and facilitate decision-making processes. Currently, IoT data analytics encountered challenges such as growing data volumes collected by IoT devices and fast response requirements for time-sensitive applications in which traditional Cloud-based solution is unable to meet due to bandwidth and high latency limitations. In this paper, we develop a distributed analytics framework for fog-enabled IoT systems aiming to avoid raw data movement and reduce latency. The distributed framework leverages the computational capacities of all the participants such as edge devices and fog nodes and allows them to obtain the global optimal solution locally. To further enhance the privacy of data holders in the system, a privacy-preserving protocol is proposed using cryptographic schemes. Security analysis was conducted and it verified that exact private information about any edge device's raw data would not be inferred by an honest-but-curious neighbor in the proposed secure protocol. In addition, the accuracy of solution is unaffected in the secure protocol comparing to the proposed distributed algorithm without encryption. We further conducted experiments on three case studies: seismic imaging, diabetes progression prediction, and Enron email classification. On seismic imaging problem, the proposed algorithm can be up to one order of magnitude faster than the benchmarks in reaching the optimal solution. The evaluation results validate the effectiveness of the proposed methodology and demonstrate its potential to be a promising solution for data analytics in fog-enabled IoT systems.}, } @article {pmid33137686, year = {2021}, author = {Li, J and Tooth, S and Zhang, K and Zhao, Y}, title = {Visualisation of flooding along an unvegetated, ephemeral river using Google Earth Engine: Implications for assessment of channel-floodplain dynamics in a time of rapid environmental change.}, journal = {Journal of environmental management}, volume = {278}, number = {Pt 2}, pages = {111559}, doi = {10.1016/j.jenvman.2020.111559}, pmid = {33137686}, issn = {1095-8630}, mesh = {Agriculture ; Colorado ; *Floods ; Humans ; Hydrology ; *Rivers ; }, abstract = {Given rapid environmental change, the development of new, data-driven, interdisciplinary approaches is essential for improving assessment and management of river systems, especially with respect to flooding. In the world's extensive drylands, difficulties in obtaining field observations of major hydrological events mean that remote sensing techniques are commonly used to map river floods and assess flood impacts. Such techniques, however, are dependent on available cloud-free imagery during or immediately after peak discharge, and single images may omit important flood-related hydrogeomorphological events. Here, we combine multiple Landsat images from Google Earth Engine (GEE) with precipitation datasets and high-resolution (<0.65 m) satellite imagery to visualise flooding and assess the associated channel-floodplain dynamics along a 25 km reach of the unvegetated, ephemeral Río Colorado, Bolivia. After cloud and shadow removal, Landsat surface reflectance data were used to calculate the Modified Normalized Difference Water Index (MNDWI) and map flood extents and patterns. From 2004 through 2016, annual flooding area along the narrow (<30 m), shallow (<1.7 m), fine-grained (dominantly silt/clay) channels was positively correlated (R[2] = 0.83) with 2-day maximum precipitation totals. Rapid meander bend migration, bank erosion, and frequent overbank flooding was associated with formation of crevasse channels, splays, and headward-eroding channels, and with avulsion (shifting of flow from one channel to another). These processes demonstrate ongoing, widespread channel-floodplain dynamics despite low stream powers and cohesive sediments. Application of our study approaches to other dryland rivers will help generate comparative data on the controls, rates, patterns and timescales of channel-floodplain dynamics under scenarios of climate change and direct human impacts, with potential implications for improved river management.}, } @article {pmid33126457, year = {2020}, author = {Fang, J and Hu, J and Wei, J and Liu, T and Wang, B}, title = {An Efficient Resource Allocation Strategy for Edge-Computing Based Environmental Monitoring System.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33126457}, issn = {1424-8220}, support = {61202076//National Natural Science Foundation of China/ ; 4192007//Beijing Municipal Natural Science Foundation/ ; }, abstract = {The cloud computing and microsensor technology has greatly changed environmental monitoring, but it is difficult for cloud-computing based monitoring system to meet the computation demand of smaller monitoring granularity and increasing monitoring applications. As a novel computing paradigm, edge computing deals with this problem by deploying resource on edge network. However, the particularity of environmental monitoring applications is ignored by most previous studies. In this paper, we proposed a resource allocation algorithm and a task scheduling strategy to reduce the average completion latency of environmental monitoring application, when considering the characteristic of environmental monitoring system and dependency among task. Simulations are conducted, and the results show that compared with the traditional algorithms. With considering the emergency task, the proposed methods decrease the average completion latency by 21.6% in the best scenario.}, } @article {pmid33124526, year = {2021}, author = {Singh, K and Singh, S and Malhotra, J}, title = {Spectral features based convolutional neural network for accurate and prompt identification of schizophrenic patients.}, journal = {Proceedings of the Institution of Mechanical Engineers. Part H, Journal of engineering in medicine}, volume = {235}, number = {2}, pages = {167-184}, doi = {10.1177/0954411920966937}, pmid = {33124526}, issn = {2041-3033}, mesh = {Diagnosis, Computer-Assisted ; *Electroencephalography ; Humans ; Machine Learning ; Neural Networks, Computer ; *Quality of Life ; }, abstract = {Schizophrenia is a fatal mental disorder, which affects millions of people globally by the disturbance in their thinking, feeling and behaviour. In the age of the internet of things assisted with cloud computing and machine learning techniques, the computer-aided diagnosis of schizophrenia is essentially required to provide its patients with an opportunity to own a better quality of life. In this context, the present paper proposes a spectral features based convolutional neural network (CNN) model for accurate identification of schizophrenic patients using spectral analysis of multichannel EEG signals in real-time. This model processes acquired EEG signals with filtering, segmentation and conversion into frequency domain. Then, given frequency domain segments are divided into six distinct spectral bands like delta, theta-1, theta-2, alpha, beta and gamma. The spectral features including mean spectral amplitude, spectral power and Hjorth descriptors (Activity, Mobility and Complexity) are extracted from each band. These features are independently fed to the proposed spectral features-based CNN and long short-term memory network (LSTM) models for classification. This work also makes use of raw time-domain and frequency-domain EEG segments for classification using temporal CNN and spectral CNN models of same architectures respectively. The overall analysis of simulation results of all models exhibits that the proposed spectral features based CNN model is an efficient technique for accurate and prompt identification of schizophrenic patients among healthy individuals with average classification accuracies of 94.08% and 98.56% for two different datasets with optimally small classification time.}, } @article {pmid33120553, year = {2020}, author = {Romansky, RP and Noninska, IS}, title = {Challenges of the digital age for privacy and personal data protection.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {17}, number = {5}, pages = {5288-5303}, doi = {10.3934/mbe.2020286}, pmid = {33120553}, issn = {1551-0018}, abstract = {Digital age can be described as a collection of different technological solutions as virtual environments, digital services, intelligent applications, machine learning, knowledge-based systems, etc., determining the specific characteristics of contemporary world globalization, e-communications, information sharing, virtualization, etc. However, there is an opportunity the technologies of the digital age to violate some basic principles of the information security and privacy by unregulated access to information and personal data, stored in different nodes of the global network. The goal of the article is to determine some special features of information and personal data protection and to summarise the main challenges of the digital age for the user's security and privacy. A brief presentation of the fundamental legislation in the fields of privacy and personal data protection is made in the introduction, followed by a review of related work on the topic. Components of information security for counteracting threats and attacks and basic principles in the organization of personal data protection are discussed. A summary of the basic challenges of the digital age is made by systematizing the negatives for user's privacy of the contemporary technologies as social computing, cloud services, Internet of Things, Big Data and Big Data Analytics and separate requirements to secure privacy of the participants based on General Data Protection Regulation principles are formulated.}, } @article {pmid33120510, year = {2020}, author = {Zhu, Y and Jiang, ZP and Mo, XH and Zhang, B and Al-Dhelaan, A and Al-Dhelaan, F}, title = {A study on the design methodology of TAC[3] for edge computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {17}, number = {5}, pages = {4406-4421}, doi = {10.3934/mbe.2020243}, pmid = {33120510}, issn = {1551-0018}, abstract = {The following scenarios, such as complex application requirements, ZB (Zettabyte) order of magnitude of network data, and tens of billions of connected devices, pose serious challenges to the capabilities and security of the three pillars of ICT: Computing, network, and storage. Edge computing came into being. Following the design methodology of "description-synthesis-simulation-optimization", TAC[3] (Tile-Architecture Cluster Computing Core) was proposed as the lightweight accelerated ECN (Edge Computing Node). ECN with a Tile-Architecture be designed and simulated through the method of executable description specification and polymorphous parallelism DSE (Design Space Exploration). By reasonable configuration of the edge computing environment and constant optimization of typical application scenarios, such as convolutional neural network and processing of image and graphic, we can meet the challenges of network bandwidth, end-cloud delay and privacy security brought by massive data of the IoE. The philosophy of "Edge-Cloud complements each other, and Edge-AI energizes each other" will become a new generation of IoE behavior principle.}, } @article {pmid33119530, year = {2021}, author = {Wu, Z and Sun, J and Zhang, Y and Zhu, Y and Li, J and Plaza, A and Benediktsson, JA and Wei, Z}, title = {Scheduling-Guided Automatic Processing of Massive Hyperspectral Image Classification on Cloud Computing Architectures.}, journal = {IEEE transactions on cybernetics}, volume = {51}, number = {7}, pages = {3588-3601}, doi = {10.1109/TCYB.2020.3026673}, pmid = {33119530}, issn = {2168-2275}, abstract = {The large data volume and high algorithm complexity of hyperspectral image (HSI) problems have posed big challenges for efficient classification of massive HSI data repositories. Recently, cloud computing architectures have become more relevant to address the big computational challenges introduced in the HSI field. This article proposes an acceleration method for HSI classification that relies on scheduling metaheuristics to automatically and optimally distribute the workload of HSI applications across multiple computing resources on a cloud platform. By analyzing the procedure of a representative classification method, we first develop its distributed and parallel implementation based on the MapReduce mechanism on Apache Spark. The subtasks of the processing flow that can be processed in a distributed way are identified as divisible tasks. The optimal execution of this application on Spark is further formulated as a divisible scheduling framework that takes into account both task execution precedences and task divisibility when allocating the divisible and indivisible subtasks onto computing nodes. The formulated scheduling framework is an optimization procedure that searches for optimized task assignments and partition counts for divisible tasks. Two metaheuristic algorithms are developed to solve this divisible scheduling problem. The scheduling results provide an optimized solution to the automatic processing of HSI big data on clouds, improving the computational efficiency of HSI classification by exploring the parallelism during the parallel processing flow. Experimental results demonstrate that our scheduling-guided approach achieves remarkable speedups by facilitating the automatic processing of HSI classification on Spark, and is scalable to the increasing HSI data volume.}, } @article {pmid33114594, year = {2020}, author = {Krishnamurthi, R and Kumar, A and Gopinathan, D and Nayyar, A and Qureshi, B}, title = {An Overview of IoT Sensor Data Processing, Fusion, and Analysis Techniques.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33114594}, issn = {1424-8220}, abstract = {In the recent era of the Internet of Things, the dominant role of sensors and the Internet provides a solution to a wide variety of real-life problems. Such applications include smart city, smart healthcare systems, smart building, smart transport and smart environment. However, the real-time IoT sensor data include several challenges, such as a deluge of unclean sensor data and a high resource-consumption cost. As such, this paper addresses how to process IoT sensor data, fusion with other data sources, and analyses to produce knowledgeable insight into hidden data patterns for rapid decision-making. This paper addresses the data processing techniques such as data denoising, data outlier detection, missing data imputation and data aggregation. Further, it elaborates on the necessity of data fusion and various data fusion methods such as direct fusion, associated feature extraction, and identity declaration data fusion. This paper also aims to address data analysis integration with emerging technologies, such as cloud computing, fog computing and edge computing, towards various challenges in IoT sensor network and sensor data analysis. In summary, this paper is the first of its kind to present a complete overview of IoT sensor data processing, fusion and analysis techniques.}, } @article {pmid33113982, year = {2020}, author = {Biswash, SK and Jayakody, DNK}, title = {A Fog Computing-Based Device-Driven Mobility Management Scheme for 5G Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33113982}, issn = {1424-8220}, support = {SPARC/2018-2019/P145/SL//SPARC, Ministry of Human Resource Development, India/ ; No.19-37-90037 and No.19-37-90105.//Russian Foundation for Basic Research/ ; NA//Framework of Competitiveness Enhancement Program of the National Research Tomsk Polytechnic University/ ; }, abstract = {The fog computing-based device-driven network is a promising solution for high data rates in modern cellular networks. It is a unique framework to reduce the generated-data, data management overheads, network scalability challenges, and help us to provide a pervasive computation environment for real-time network applications, where the mobile data is easily available and accessible to nearby fog servers. It explores a new dimension of the next generation network called fog networks. Fog networks is a complementary part of the cloud network environment. The proposed network architecture is a part of the newly emerged paradigm that extends the network computing infrastructure within the device-driven 5G communication system. This work explores a new design of the fog computing framework to support device-driven communication to achieve better Quality of Service (QoS) and Quality of Experience (QoE). In particular, we focus on, how potential is the fog computing orchestration framework? How it can be customized to the next generation of cellular communication systems? Next, we propose a mobility management procedure for fog networks, considering the static and dynamic mobile nodes. We compare our results with the legacy of cellular networks and observed that the proposed work has the least energy consumption, delay, latency, signaling cost as compared to LTE/LTE-A networks.}, } @article {pmid33113931, year = {2020}, author = {Wu, CY and Huang, KH}, title = {A Framework for Off-Line Operation of Smart and Traditional Devices of IoT Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {21}, pages = {}, pmid = {33113931}, issn = {1424-8220}, support = {MOST 108-3011-F-036-001//Ministry of Science and Technology, Taiwan/ ; B108-I01-016//Tatung University/ ; }, abstract = {Recently, with the continuous evolution of information technology, various products such as Building Information, Internet of Things (IoT), Big Data, Cloud Computing and Machine Learning have been developed and have created a lifestyle change. A smart Internet of Things (IoT) system is formed by combining the communication capabilities of the internet with control, monitoring and identification services to integrate people, things and objects. However, in some IoT environments that have a weak signal, such as remote areas, warehouses or basements, the network may become unstable, meaning that the IoT system is unable to provide efficient services. This paper therefore presents a framework that ensures the reliability of IoT system services so that even if the IoT system cannot connect to the network, the system can provide the services offline. To avoid increasing the installation cost or replacing existing traditional devices with modern smart devices, this framework can also be used to control traditional devices. The system operation is convenient because users can operate all their smart and traditional devices under the IoT system through voice commands and/or a handheld microcontroller, thus reducing the manual operation of the user. The framework proposed in this paper can be applied to various smart scenarios, including smart warehouses, smart restaurants, smart homes, smart farms and smart factories, to improve people's quality of life and convenience, and create a humane and comfortable smart living environment.}, } @article {pmid33106798, year = {2020}, author = {Lei, H and O'Connell, R and Ehwerhemuepha, L and Taraman, S and Feaster, W and Chang, A}, title = {Agile clinical research: A data science approach to scrumban in clinical medicine.}, journal = {Intelligence-based medicine}, volume = {3}, number = {}, pages = {100009}, pmid = {33106798}, issn = {2666-5212}, abstract = {The COVID-19 pandemic has required greater minute-to-minute urgency of patient treatment in Intensive Care Units (ICUs), rendering the use of Randomized Controlled Trials (RCTs) too slow to be effective for treatment discovery. There is a need for agility in clinical research, and the use of data science to develop predictive models for patient treatment is a potential solution. However, rapidly developing predictive models in healthcare is challenging given the complexity of healthcare problems and the lack of regular interaction between data scientists and physicians. Data scientists can spend significant time working in isolation to build predictive models that may not be useful in clinical environments. We propose the use of an agile data science framework based on the Scrumban framework used in software development. Scrumban is an iterative framework, where in each iteration larger problems are broken down into simple do-able tasks for data scientists and physicians. The two sides collaborate closely in formulating clinical questions and developing and deploying predictive models into clinical settings. Physicians can provide feedback or new hypotheses given the performance of the model, and refinement of the model or clinical questions can take place in the next iteration. The rapid development of predictive models can now be achieved with increasing numbers of publicly available healthcare datasets and easily accessible cloud-based data science tools. What is truly needed are data scientist and physician partnerships ensuring close collaboration between the two sides in using these tools to develop clinically useful predictive models to meet the demands of the COVID-19 healthcare landscape.}, } @article {pmid33104194, year = {2021}, author = {Goonasekera, N and Mahmoud, A and Chilton, J and Afgan, E}, title = {GalaxyCloudRunner: enhancing scalable computing for Galaxy.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {12}, pages = {1763-1765}, pmid = {33104194}, issn = {1367-4811}, support = {U24 HG006620/HG/NHGRI NIH HHS/United States ; U41 HG006620/HG/NHGRI NIH HHS/United States ; 5U41HG006620-07/NH/NIH HHS/United States ; }, mesh = {Azure Stains ; *Computational Biology ; Documentation ; Humans ; *Software ; }, abstract = {SUMMARY: The existence of more than 100 public Galaxy servers with service quotas is indicative of the need for an increased availability of compute resources for Galaxy to use. The GalaxyCloudRunner enables a Galaxy server to easily expand its available compute capacity by sending user jobs to cloud resources. User jobs are routed to the acquired resources based on a set of configurable rules and the resources can be dynamically acquired from any of four popular cloud providers (AWS, Azure, GCP or OpenStack) in an automated fashion.

GalaxyCloudRunner is implemented in Python and leverages Docker containers. The source code is MIT licensed and available at https://github.com/cloudve/galaxycloudrunner. The documentation is available at http://gcr.cloudve.org/.}, } @article {pmid33100917, year = {2020}, author = {Amaro, RE and Mulholland, AJ}, title = {Biomolecular Simulations in the Time of COVID19, and After.}, journal = {Computing in science & engineering}, volume = {22}, number = {6}, pages = {30-36}, pmid = {33100917}, issn = {1521-9615}, support = {/WT_/Wellcome Trust/United Kingdom ; BB/L01386X/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; P41 GM103426/GM/NIGMS NIH HHS/United States ; R01 GM132826/GM/NIGMS NIH HHS/United States ; }, abstract = {COVID19 has changed life for people worldwide. Despite lockdowns globally, computational research has pressed on, working remotely and collaborating virtually on research questions in COVID19 and the virus it is caused by, SARS-CoV-2. Molecular simulations can help to characterize the function of viral and host proteins and have the potential to contribute to the search for vaccines and treatments. Changes in the modus operandi of research groups include broader adoption of the use of preprint servers, earlier and more open sharing of methods, models, and data, the use of social media to rapidly disseminate information, online seminars, and cloud-based virtual collaboration. Research funders and computing providers worldwide recognized the need to provide rapid and significant access to computational architectures. In this review, we discuss how the interplay of all of these factors is influencing the impact - both potential and realized - of biomolecular simulations in the fight against SARS-CoV-2.}, } @article {pmid33100581, year = {2020}, author = {Alam, M and Samad, MD and Vidyaratne, L and Glandon, A and Iftekharuddin, KM}, title = {Survey on Deep Neural Networks in Speech and Vision Systems.}, journal = {Neurocomputing}, volume = {417}, number = {}, pages = {302-321}, pmid = {33100581}, issn = {0925-2312}, support = {R01 EB020683/EB/NIBIB NIH HHS/United States ; }, abstract = {This survey presents a review of state-of-the-art deep neural network architectures, algorithms, and systems in vision and speech applications. Recent advances in deep artificial neural network algorithms and architectures have spurred rapid innovation and development of intelligent vision and speech systems. With availability of vast amounts of sensor data and cloud computing for processing and training of deep neural networks, and with increased sophistication in mobile and embedded technology, the next-generation intelligent systems are poised to revolutionize personal and commercial computing. This survey begins by providing background and evolution of some of the most successful deep learning models for intelligent vision and speech systems to date. An overview of large-scale industrial research and development efforts is provided to emphasize future trends and prospects of intelligent vision and speech systems. Robust and efficient intelligent systems demand low-latency and high fidelity in resource-constrained hardware platforms such as mobile devices, robots, and automobiles. Therefore, this survey also provides a summary of key challenges and recent successes in running deep neural networks on hardware-restricted platforms, i.e. within limited memory, battery life, and processing capabilities. Finally, emerging applications of vision and speech across disciplines such as affective computing, intelligent transportation, and precision medicine are discussed. To our knowledge, this paper provides one of the most comprehensive surveys on the latest developments in intelligent vision and speech applications from the perspectives of both software and hardware systems. Many of these emerging technologies using deep neural networks show tremendous promise to revolutionize research and development for future vision and speech systems.}, } @article {pmid33088611, year = {2020}, author = {Kovatch, P and Gai, L and Cho, HM and Fluder, E and Jiang, D}, title = {Optimizing High-Performance Computing Systems for Biomedical Workloads.}, journal = {IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum : [proceedings]. IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum}, volume = {2020}, number = {}, pages = {183-192}, pmid = {33088611}, issn = {2164-7062}, support = {S10 OD018522/OD/NIH HHS/United States ; S10 OD026880/OD/NIH HHS/United States ; }, abstract = {The productivity of computational biologists is limited by the speed of their workflows and subsequent overall job throughput. Because most biomedical researchers are focused on better understanding scientific phenomena rather than developing and optimizing code, a computing and data system implemented in an adventitious and/or non-optimized manner can impede the progress of scientific discovery. In our experience, most computational, life-science applications do not generally leverage the full capabilities of high-performance computing, so tuning a system for these applications is especially critical. To optimize a system effectively, systems staff must understand the effects of the applications on the system. Effective stewardship of the system includes an analysis of the impact of the applications on the compute cores, file system, resource manager and queuing policies. The resulting improved system design, and enactment of a sustainability plan, help to enable a long-term resource for productive computational and data science. We present a case study of a typical biomedical computational workload at a leading academic medical center supporting over $100 million per year in computational biology research. Over the past eight years, our high-performance computing system has enabled over 900 biomedical publications in four major areas: genetics and population analysis, gene expression, machine learning, and structural and chemical biology. We have upgraded the system several times in response to trends, actual usage, and user feedback. Major components crucial to this evolution include scheduling structure and policies, memory size, compute type and speed, parallel file system capabilities, and deployment of cloud technologies. We evolved a 70 teraflop machine to a 1.4 petaflop machine in seven years and grew our user base nearly 10-fold. For long-term stability and sustainability, we established a chargeback fee structure. Our overarching guiding principle for each progression has been to increase scientific throughput and enable enhanced scientific fidelity with minimal impact to existing user workflows or code. This highly-constrained system optimization has presented unique challenges, leading us to adopt new approaches to provide constructive pathways forward. We share our practical strategies resulting from our ongoing growth and assessments.}, } @article {pmid33079655, year = {2021}, author = {Guo, J and Tian, S and Liu, K and Guo, J}, title = {IoT-Enabled Fluorescence Sensor for Quantitative KET Detection and Anti-Drug Situational Awareness.}, journal = {IEEE transactions on nanobioscience}, volume = {20}, number = {1}, pages = {2-8}, doi = {10.1109/TNB.2020.3032121}, pmid = {33079655}, issn = {1558-2639}, mesh = {Adolescent ; *Awareness ; Humans ; Immunoassay ; Reproducibility of Results ; }, abstract = {Recently, drug abuse has become a worldwide concern. Among varieties of drugs, KET is found to be favorite in drug addicts, especially teenagers, for recreational purposes. KET is a kind of analgesic and anesthetic drug which can induce hallucinogenic and dissociative effects after high-dose abuse. Hence, it is critical to develop a rapid and sensitive detection method for strict drug control. In this study, we proposed a cloud-enabled smartphone based fluorescence sensor for quantitative detection of KET from human hair sample. The lateral flow immunoassay (LFIA) was used as the detecting strategy where UCNPs were introduced as fluorescent labels. The sensor was capable of identifying the up-converted fluorescence and calculating the signal intensities on TL and CL to obtain a T/C value, which was corresponding to the KET concentration. The sensor transmitted the test data to the cloud-enabled smartphone through Type-C interface, and the data were further uploaded to the edge of the network for cloud-edge computing and storage. The entire detection took only 5 minutes with high stability and reliability. The detection limit of KET was 1 ng/mL and a quantitative detection range from 1 to 150 ng/mL. Furthermore, based on the huge development of Internet of Things (IoT), an App was developed on the smartphone for anti-drug situational awareness. Based on this system, it was convenient for Police Department to perform on-site KET detection. Moreover, it was critical for prediction of the development trend of future events, benefiting much to constructing a harmonious society.}, } @article {pmid33066295, year = {2020}, author = {Janbi, N and Katib, I and Albeshri, A and Mehmood, R}, title = {Distributed Artificial Intelligence-as-a-Service (DAIaaS) for Smarter IoE and 6G Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {20}, pages = {}, pmid = {33066295}, issn = {1424-8220}, support = {RG-10-611-38//King Abdulaziz University/ ; }, abstract = {Artificial intelligence (AI) has taken us by storm, helping us to make decisions in everything we do, even in finding our "true love" and the "significant other". While 5G promises us high-speed mobile internet, 6G pledges to support ubiquitous AI services through next-generation softwarization, heterogeneity, and configurability of networks. The work on 6G is in its infancy and requires the community to conceptualize and develop its design, implementation, deployment, and use cases. Towards this end, this paper proposes a framework for Distributed AI as a Service (DAIaaS) provisioning for Internet of Everything (IoE) and 6G environments. The AI service is "distributed" because the actual training and inference computations are divided into smaller, concurrent, computations suited to the level and capacity of resources available with cloud, fog, and edge layers. Multiple DAIaaS provisioning configurations for distributed training and inference are proposed to investigate the design choices and performance bottlenecks of DAIaaS. Specifically, we have developed three case studies (e.g., smart airport) with eight scenarios (e.g., federated learning) comprising nine applications and AI delivery models (smart surveillance, etc.) and 50 distinct sensor and software modules (e.g., object tracker). The evaluation of the case studies and the DAIaaS framework is reported in terms of end-to-end delay, network usage, energy consumption, and financial savings with recommendations to achieve higher performance. DAIaaS will facilitate standardization of distributed AI provisioning, allow developers to focus on the domain-specific details without worrying about distributed training and inference, and help systemize the mass-production of technologies for smarter environments.}, } @article {pmid33064102, year = {2020}, author = {Kirchberg, J and Fritzmann, J and Weitz, J and Bork, U}, title = {eHealth Literacy of German Physicians in the Pre-COVID-19 Era: Questionnaire Study.}, journal = {JMIR mHealth and uHealth}, volume = {8}, number = {10}, pages = {e20099}, pmid = {33064102}, issn = {2291-5222}, mesh = {Adult ; *Attitude of Health Personnel ; COVID-19 ; Cohort Studies ; Coronavirus Infections/epidemiology ; Female ; Germany/epidemiology ; *Health Literacy ; Humans ; Male ; Middle Aged ; Pandemics ; Physicians/*psychology/statistics & numerical data ; Pneumonia, Viral/epidemiology ; Surveys and Questionnaires ; *Telemedicine ; }, abstract = {BACKGROUND: Digitalization is a disruptive technology that changes the way we deliver diagnostic procedures and treatments in medicine. Different stakeholders have varying interests in and expectations of the digitalization of modern medicine. Many recent digital advances in the medical field, such as the implementation of electronic health records, telemedical services, and mobile health apps, are increasingly used by medical professionals and patients. During the current pandemic outbreak of a novel coronavirus-caused respiratory disease (COVID-19), many modern information and communication technologies (ICT) have been used to overcome the physical barriers and limitations caused by government-issued curfews and workforce shortages. Therefore, the COVID-19 pandemic has led to a surge in the usage of modern ICT in medicine. At the same time, the eHealth literacy of physicians working with these technologies has probably not improved since our study.

OBJECTIVE: This paper describes a representative cohort of German physicians before the COVID-19 pandemic and their eHealth literacy and attitude towards modern ICT.

METHODS: A structured, self-developed questionnaire about user behavior and attitudes towards eHealth applications was administered to a representative cohort of 93 German physicians.

RESULTS: Of the 93 German physicians who participated in the study, 97% (90/93) use a mobile phone. Medical apps are used by 42% (39/93). Half of the surveyed physicians (47/93, 50%) use their private mobile phones for official purposes on a daily basis. Telemedicine is part of the daily routine for more than one-third (31/93, 33%) of all participants. More than 80% (76/93, 82%) of the trial participants state that their knowledge regarding the legal aspects and data safety of medical apps and cloud computing is insufficient.

CONCLUSIONS: Modern ICT is frequently used and mostly welcomed by German physicians. However, there is a tremendous lack of eHealth literacy and knowledge about the safe and secure implementation of these technologies in routine clinical practice.}, } @article {pmid33064097, year = {2020}, author = {Kim, JM and Lee, WR and Kim, JH and Seo, JM and Im, C}, title = {Light-Induced Fluorescence-Based Device and Hybrid Mobile App for Oral Hygiene Management at Home: Development and Usability Study.}, journal = {JMIR mHealth and uHealth}, volume = {8}, number = {10}, pages = {e17881}, pmid = {33064097}, issn = {2291-5222}, mesh = {Algorithms ; Fluorescence ; Humans ; *Mobile Applications ; Oral Hygiene ; }, abstract = {BACKGROUND: Dental diseases can be prevented through the management of dental plaques. Dental plaque can be identified using the light-induced fluorescence (LIF) technique that emits light at 405 nm. The LIF technique is more convenient than the commercial technique using a disclosing agent, but the result may vary for each individual as it still requires visual identification.

OBJECTIVE: The objective of this study is to introduce and validate a deep learning-based oral hygiene monitoring system that makes it easy to identify dental plaques at home.

METHODS: We developed a LIF-based system consisting of a device that can visually identify dental plaques and a mobile app that displays the location and area of dental plaques on oral images. The mobile app is programmed to automatically determine the location and distribution of dental plaques using a deep learning-based algorithm and present the results to the user as time series data. The mobile app is also built with convergence of naive and web applications so that the algorithm is executed on a cloud server to efficiently distribute computing resources.

RESULTS: The location and distribution of users' dental plaques could be identified via the hand-held LIF device or mobile app. The color correction filter in the device was developed using a color mixing technique. The mobile app was built as a hybrid app combining the functionalities of a native application and a web application. Through the scrollable WebView on the mobile app, changes in the time series of dental plaque could be confirmed. The algorithm for dental plaque detection was implemented to run on Amazon Web Services for object detection by single shot multibox detector and instance segmentation by Mask region-based convolutional neural network.

CONCLUSIONS: This paper shows that the system can be used as a home oral care product for timely identification and management of dental plaques. In the future, it is expected that these products will significantly reduce the social costs associated with dental diseases.}, } @article {pmid33050165, year = {2020}, author = {Butun, I and Sari, A and Österberg, P}, title = {Hardware Security of Fog End-Devices for the Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {20}, pages = {}, pmid = {33050165}, issn = {1424-8220}, support = {IB2019- 8185//Swedish Foundation for International Cooperation in Research and Higher Education/ ; 773717//Horizon 2020 Framework Programme/ ; }, abstract = {The proliferation of the Internet of Things (IoT) caused new application needs to emerge as rapid response ability is missing in the current IoT end-devices. Therefore, Fog Computing has been proposed to be an edge component for the IoT networks as a remedy to this problem. In recent times, cyber-attacks are on the rise, especially towards infrastructure-less networks, such as IoT. Many botnet attack variants (Mirai, Torii, etc.) have shown that the tiny microdevices at the lower spectrum of the network are becoming a valued participant of a botnet, for further executing more sophisticated attacks against infrastructural networks. As such, the fog devices also need to be secured against cyber-attacks, not only software-wise, but also from hardware alterations and manipulations. Hence, this article first highlights the importance and benefits of fog computing for IoT networks, then investigates the means of providing hardware security to these devices with an enriched literature review, including but not limited to Hardware Security Module, Physically Unclonable Function, System on a Chip, and Tamper Resistant Memory.}, } @article {pmid33048709, year = {2021}, author = {Oppermann, M and Kincaid, R and Munzner, T}, title = {VizCommender: Computing Text-Based Similarity in Visualization Repositories for Content-Based Recommendations.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {27}, number = {2}, pages = {495-505}, doi = {10.1109/TVCG.2020.3030387}, pmid = {33048709}, issn = {1941-0506}, abstract = {Cloud-based visualization services have made visual analytics accessible to a much wider audience than ever before. Systems such as Tableau have started to amass increasingly large repositories of analytical knowledge in the form of interactive visualization workbooks. When shared, these collections can form a visual analytic knowledge base. However, as the size of a collection increases, so does the difficulty in finding relevant information. Content-based recommendation (CBR) systems could help analysts in finding and managing workbooks relevant to their interests. Toward this goal, we focus on text-based content that is representative of the subject matter of visualizations rather than the visual encodings and style. We discuss the challenges associated with creating a CBR based on visualization specifications and explore more concretely how to implement the relevance measures required using Tableau workbook specifications as the source of content data. We also demonstrate what information can be extracted from these visualization specifications and how various natural language processing techniques can be used to compute similarity between workbooks as one way to measure relevance. We report on a crowd-sourced user study to determine if our similarity measure mimics human judgement. Finally, we choose latent Dirichl et al.ocation (LDA) as a specific model and instantiate it in a proof-of-concept recommender tool to demonstrate the basic function of our similarity measure.}, } @article {pmid33044796, year = {2020}, author = {Wang, L and Yan, B and Boasson, V}, title = {A national fight against COVID-19: lessons and experiences from China.}, journal = {Australian and New Zealand journal of public health}, volume = {44}, number = {6}, pages = {502-507}, pmid = {33044796}, issn = {1753-6405}, mesh = {COVID-19 ; China/epidemiology ; Coronavirus Infections/epidemiology/*prevention & control ; Disease Outbreaks/*prevention & control ; Emergency Service, Hospital/*organization & administration ; Government ; Humans ; Leadership ; Moral Obligations ; Pandemics/*prevention & control ; Pneumonia, Viral/epidemiology/*prevention & control ; Public Health/*methods ; Qualitative Research ; }, abstract = {OBJECTIVE: This paper aims to review the public health measures and actions taken during the fight against COVID-19 in China, to generate a model for prevention and control public health emergency by summarising the lessons and experiences gained.

METHODS: This paper adopts a widely accepted qualitative research and coding method to form an analysis on word materials.

RESULTS: Although Chinese CDC didn't work effectively in the early stages on risk identification and warning, China was able to respond quickly and successfully to this medical emergency after the initial shock of the awareness of a novel epidemic with a swift implementation of national-scale health emergency management.

CONCLUSIONS: The success in fighting against COVID-19 in China can be attributed to: 1) adaptable governance to changing situations; 2) culture of moral compliance with rules; 3) trusted collaboration between government and people; 4) an advanced technical framework ABCD+5G (A-Artificial intelligence; B-Block chain; C-Cloud computing; D-Big data). Implications for public health: This paper constructs a conceptual model for pandemic management based on the lessons and experiences of fighting COVID-19 in China. It provides insights for pandemic control and public emergency management in similar context.}, } @article {pmid33030032, year = {2022}, author = {Yu, Z and Jung, D and Park, S and Hu, Y and Huang, K and Rasco, BA and Wang, S and Ronholm, J and Lu, X and Chen, J}, title = {Smart traceability for food safety.}, journal = {Critical reviews in food science and nutrition}, volume = {62}, number = {4}, pages = {905-916}, doi = {10.1080/10408398.2020.1830262}, pmid = {33030032}, issn = {1549-7852}, mesh = {Food ; *Food Safety ; *Food Supply ; Humans ; }, abstract = {Current food production faces a tremendous challenge due to the growing human population. The global population is estimated to reach 9 billion by 2050 with 70% more food being required. Safe food is an important dimension of food security, and food traceability across the supply chain is a key component of this. However, current food traceability systems are challenged by frequent occurrences of food safety incidents and food recalls that have damaged consumer confidence, caused huge economic loss, and put pressure on food safety agencies. This review focuses on smart food traceability that has the potential to significantly improve food safety in global food supply chains. The basic concepts and critical perspectives for various detection strategies for food safety are summarized, including portable detection devices, smart indicators and sensors integrated on food packages, and data-assisted whole-genome sequencing. In addition, new digital technologies, such as Internet-of-things (IoTs) and cloud computing, are discussed with the aim of providing readers with an overview of the exciting opportunities in smart food traceability systems.}, } @article {pmid33019245, year = {2020}, author = {Ilokah, M and Eklund, JM}, title = {A Secure Privacy Preserving Cloud-based Framework for Sharing Electronic Health Data.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2020}, number = {}, pages = {5592-5597}, doi = {10.1109/EMBC44109.2020.9175792}, pmid = {33019245}, issn = {2694-0604}, mesh = {*Cloud Computing ; Computer Security ; Electronic Health Records ; Information Storage and Retrieval ; *Privacy ; }, abstract = {There exists a need for sharing user health data, especially with institutes for research purposes, in a secure fashion. This is especially true in the case of a system that includes a third party storage service, such as cloud computing, which limits the control of the data owner. The use of encryption for secure data storage continues to evolve to meet the need for flexible and fine-grained access control. This evolution has led to the development of Attribute Based Encryption (ABE). The use of ABE to ensure the security and privacy of health data has been explored. This paper presents an ABE based framework which allows for the secure outsourcing of the more computationally intensive processes for data decryption to the cloud servers. This reduces the time needed for decryption to occur at the user end and reduces the amount of computational power needed by users to access data.}, } @article {pmid33018935, year = {2020}, author = {Cheon, A and Jung, SY and Prather, C and Sarmiento, M and Wong, K and Woodbridge, DM}, title = {A Machine Learning Approach to Detecting Low Medication State with Wearable Technologies.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2020}, number = {}, pages = {4252-4255}, doi = {10.1109/EMBC44109.2020.9176310}, pmid = {33018935}, issn = {2694-0604}, mesh = {Humans ; *Machine Learning ; Medication Adherence ; *Wearable Electronic Devices ; }, abstract = {Medication adherence is a critical component and implicit assumption of the patient life cycle that is often violated, incurring financial and medical costs to both patients and the medical system at large. As obstacles to medication adherence are complex and varied, approaches to overcome them must themselves be multifaceted.This paper demonstrates one such approach using sensor data recorded by an Apple Watch to detect low counts of pill medication in standard prescription bottles. We use distributed computing on a cloud-based platform to efficiently process large volumes of high-frequency data and train a Gradient Boosted Tree machine learning model. Our final model yielded average cross-validated accuracy and F1 scores of 80.27% and 80.22%, respectively.We conclude this paper with two use cases in which wearable devices such as the Apple Watch can contribute to efforts to improve patient medication adherence.}, } @article {pmid33018783, year = {2020}, author = {LeMoyne, R and Mastroianni, T and Whiting, D and Tomycz, N}, title = {Parametric evaluation of deep brain stimulation parameter configurations for Parkinson's disease using a conformal wearable and wireless inertial sensor system and machine learning.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2020}, number = {}, pages = {3606-3611}, doi = {10.1109/EMBC44109.2020.9175408}, pmid = {33018783}, issn = {2694-0604}, mesh = {*Deep Brain Stimulation ; Humans ; Machine Learning ; *Parkinson Disease/therapy ; Tremor/therapy ; *Wearable Electronic Devices ; }, abstract = {Deep brain stimulation enables highly specified patient-unique therapeutic intervention ameliorating the symptoms of Parkinson's disease. Inherent to the efficacy of deep brain stimulation is the acquisition of an optimal parameter configuration. Using conventional methods, the optimization process for tuning the deep brain stimulation system parameters can intrinsically induce strain on clinical resources. An advanced means of quantifying Parkinson's hand tremor and distinguishing between parameter settings would be highly beneficial. The conformal wearable and wireless inertial sensor system, such as the BioStamp nPoint, has a volumetric profile on the order of a bandage that readily enables convenient quantification of Parkinson's disease hand tremor. Furthermore, the BioStamp nPoint has been certified by the FDA as a 510(k) medical device for acquisition of medical grade data. Parametric variation of the amplitude parameter for deep brain stimulation can be quantified through the BioStamp nPoint conformal wearable and wireless inertial sensor system mounted to the dorsum of the hand. The acquired inertial sensor signal data can be wirelessly transmitted to a secure Cloud computing environment for post-processing. The quantified inertial sensor data for the parametric study of the effects of varying amplitude can be distinguished through machine learning classification. Software automation through Python can consolidate the inertial sensor data into a suitable feature set format. Using the multilayer perceptron neural network considerable machine learning classification accuracy is attained to distinguish multiple parametric settings of amplitude for deep brain stimulation, such as 4.0 mA, 2.5 mA, 1.0 mA, and 'Off' status representing a baseline. These findings constitute an advance toward the pathway of attaining real-time closed loop automated parameter configuration tuning for treatment of Parkinson's disease using deep brain stimulation.}, } @article {pmid33007867, year = {2020}, author = {Wu, HL and Chang, CC and Zheng, YZ and Chen, LS and Chen, CC}, title = {A Secure IoT-Based Authentication System in Cloud Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {19}, pages = {}, pmid = {33007867}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) is currently the most popular field in communication and information techniques. However, designing a secure and reliable authentication scheme for IoT-based architectures is still a challenge. In 2019, Zhou et al. showed that schemes pro-posed by Amin et al. and Maitra et al. are vulnerable to off-line guessing attacks, user tracking attacks, etc. On this basis, a lightweight authentication scheme based on IoT is proposed, and an authentication scheme based on IoT is proposed, which can resist various types of attacks and realize key security features such as user audit, mutual authentication, and session security. However, we found weaknesses in the scheme upon evaluation. Hence, we proposed an enhanced scheme based on their mechanism, thus achieving the security requirements and resisting well-known attacks.}, } @article {pmid33001812, year = {2022}, author = {Li, P and Zhao, YB and Kang, Y}, title = {Integrated Channel-Aware Scheduling and Packet-Based Predictive Control for Wireless Cloud Control Systems.}, journal = {IEEE transactions on cybernetics}, volume = {52}, number = {5}, pages = {2735-2749}, doi = {10.1109/TCYB.2020.3019179}, pmid = {33001812}, issn = {2168-2275}, abstract = {The scheduling and control of wireless cloud control systems involving multiple independent control systems and a centralized cloud computing platform are investigated. For such systems, the scheduling of the data transmission as well as some particular design of the controller can be equally important. From this observation, we propose a dual channel-aware scheduling strategy under the packet-based model predictive control framework, which integrates a decentralized channel-aware access strategy for each sensor, a centralized access strategy for the controllers, and a packet-based predictive controller to stabilize each control system. First, the decentralized scheduling strategy for each sensor is set in a noncooperative game framework and is then designed with asymptotical convergence. Then, the central scheduler for the controllers takes advantage of a prioritized threshold strategy, which outperforms a random one neglecting the information of the channel gains. Finally, we prove the stability for each system by constructing a new Lyapunov function, and further reveal the dependence of the control system stability on the prediction horizon and successful access probabilities of each sensor and controller. These theoretical results are successfully verified by numerical simulation.}, } @article {pmid32989184, year = {2020}, author = {Eisa, M and Sandhu, A and Prakash, R and Ganocy, SJ and Fass, R}, title = {The Risk of Acute Myocardial Infarction in Patients With Gastroesophageal Reflux Disease.}, journal = {Journal of neurogastroenterology and motility}, volume = {26}, number = {4}, pages = {471-476}, pmid = {32989184}, issn = {2093-0879}, abstract = {BACKGROUND/AIMS: A number of inflammatory mediators have been documented to be elevated in gastroesophageal reflux disease (GERD). Similar inflammatory mediators are involved in coronary artery disease. Thus, the aim of the study is to determine if GERD is a risk factor for developing acute myocardial infarction (AMI).

METHODS: We used Explorys, a private cloud-based data store to which a number of health care systems feed information. We identified a cohort of GERD patients who have undergone an esophagogastroduodenoscopy compared to those without GERD. Incidence of AMI was studied after statistically controlling for known AMI risk factors.

RESULTS: Total of 200 400 patients were included in the GERD group and 386 800 patients in non-GERD group. The primary event of AMI occurred in 17 200 patients in the GERD group (8.6%) vs 24 300 in non-GERD group (6.3%). Using logistic regression analysis and controlling for 6 major risk factors which included male gender (OR, 1.09; 95% CI, 1.07-1.11; P < 0.001), hypertension (OR, 6.53; 95% CI, 6.21-6.88; P < 0.001), hyperlipidemia (OR, 3.08; 95% CI, 2.96-3.20; P < 0.001), diabetes mellitus (OR, 1.72; 95% CI, 1.69- 1.76; P < 0.001), obesity (OR, 1.02; 95% CI, 1.00-1.04; P = 0.044), and smoking (OR, 1.38; 95% CI, 1.35-1.41; P < 0.001). The odds of developing AMI in the GERD population was 1.11 (95% CI, 1.08-1.13; P < 0.001). GERD had higher odds of developing AMI than male gender or obesity in our study.

CONCLUSIONS: This study demonstrated that GERD is a risk factor for AMI, higher than male gender and obesity. However, the increased risk may be clinically insignificant.}, } @article {pmid32977409, year = {2020}, author = {Grigorescu, S and Cocias, T and Trasnea, B and Margheri, A and Lombardi, F and Aniello, L}, title = {Cloud2Edge Elastic AI Framework for Prototyping and Deployment of AI Inference Engines in Autonomous Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {19}, pages = {}, pmid = {32977409}, issn = {1424-8220}, abstract = {Self-driving cars and autonomous vehicles are revolutionizing the automotive sector, shaping the future of mobility altogether. Although the integration of novel technologies such as Artificial Intelligence (AI) and Cloud/Edge computing provides golden opportunities to improve autonomous driving applications, there is the need to modernize accordingly the whole prototyping and deployment cycle of AI components. This paper proposes a novel framework for developing so-called AI Inference Engines for autonomous driving applications based on deep learning modules, where training tasks are deployed elastically over both Cloud and Edge resources, with the purpose of reducing the required network bandwidth, as well as mitigating privacy issues. Based on our proposed data driven V-Model, we introduce a simple yet elegant solution for the AI components development cycle, where prototyping takes place in the cloud according to the Software-in-the-Loop (SiL) paradigm, while deployment and evaluation on the target ECUs (Electronic Control Units) is performed as Hardware-in-the-Loop (HiL) testing. The effectiveness of the proposed framework is demonstrated using two real-world use-cases of AI inference engines for autonomous vehicles, that is environment perception and most probable path prediction.}, } @article {pmid32974110, year = {2020}, author = {Cheng, CW and Brown, CR and Venugopalan, J and Wang, MD}, title = {Towards an Effective Patient Health Engagement System Using Cloud-Based Text Messaging Technology.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {8}, number = {}, pages = {2700107}, pmid = {32974110}, issn = {2168-2372}, abstract = {Patient and health provider interaction via text messaging (TM) has become an accepted form of communication, often favored by adolescents and young adults. While integration of TM in disease management has aided health interventions and behavior modifications, broader adoption is hindered by expense, fixed reporting schedules, and monotonic communication. A low-cost, flexible TM reporting system (REMOTES) was developed using inexpensive cloud-based services with features of two-way communication, personalized reporting scheduling, and scalable and secured data storage. REMOTES is a template-based reporting tool adaptable to a wide-range of complexity in response formats. In a pilot study, 27 adolescents with sickle cell disease participated to assess feasibility of REMOTES in both inpatient and outpatient settings. Subject compliance with at least one daily self-report pain query was 94.9% (112/118) during inpatient and 91.1% (327/359) during outpatient, with an overall accuracy of 99.2% (970/978). With use of a more complex 8-item questionnaire, 30% (7/21) inpatient and 66.6% (36/54) outpatient responses were reported with 98.1% (51/52) reporting accuracy. All participants expressed high pre-trial expectation (88%) and post-trial satisfaction (89%). The study suggests that cloud-based text messaging is feasible and an easy-of-use solution for low-cost and personalized patient engagement.}, } @article {pmid32970755, year = {2020}, author = {Wang, X and Qiu, P}, title = {A freight integer linear programming model under fog computing and its application in the optimization of vehicle networking deployment.}, journal = {PloS one}, volume = {15}, number = {9}, pages = {e0239628}, pmid = {32970755}, issn = {1932-6203}, mesh = {City Planning/standards ; *Computer Simulation ; *Internet of Things ; Motor Vehicles/*statistics & numerical data ; Remote Sensing Technology ; }, abstract = {The increase in data amount makes the traditional Internet of Vehicles (IoV) fail to meet users' needs. Hence, the IoV is explored in series. To study the construction of freight integer linear programming (ILP) model based on fog computing (FG), and to analyze the application of the model in the optimization of the networking deployment (ND) of the IoV. FG and ILP are combined to build a freight computing ILP model. The model is used to analyze the application of ND optimization in the IoV system through simulations. The results show that while analyzing the ND results in different scenarios, the model is more suitable for small-scale scenarios and can optimize the objective function; however, its utilization rate is low in large-scale scenarios. While comparing and analyzing the network cost and running time, compared with traditional cloud computing solutions, the ND solution based on FG requires less cost, shorter running time, and has apparent effectiveness and efficiency. Therefore, it is found that the FG-based model has low cost, short running time, and apparent efficiency, which provides an experimental basis for the application of the later deployment of freight vehicles (FVs) in the Internet of Things (IoT) system for ND optimization. The results will provide important theoretical support for the overall deployment of IoV.}, } @article {pmid32969658, year = {2020}, author = {Sun, G and Jin, Y and Li, S and Yang, Z and Shi, B and Chang, C and Abramov, YA}, title = {Virtual Coformer Screening by Crystal Structure Predictions: Crucial Role of Crystallinity in Pharmaceutical Cocrystallization.}, journal = {The journal of physical chemistry letters}, volume = {11}, number = {20}, pages = {8832-8838}, doi = {10.1021/acs.jpclett.0c02371}, pmid = {32969658}, issn = {1948-7185}, mesh = {Acetaminophen/*chemistry ; Computer Simulation ; Crystallization ; Density Functional Theory ; Drug Evaluation, Preclinical ; Indomethacin/*chemistry ; Models, Molecular ; Pharmaceutical Preparations/*chemistry ; Thermodynamics ; }, abstract = {One of the most popular strategies of the optimization of drug properties in the pharmaceutical industry appears to be a solid form changing into a cocrystalline form. A number of virtual screening approaches have been previously developed to allow a selection of the most promising cocrystal formers (coformers) for an experimental follow-up. A significant drawback of those methods is related to the lack of accounting for the crystallinity contribution to cocrystal formation. To address this issue, we propose in this study two virtual coformer screening approaches based on a modern cloud-computing crystal structure prediction (CSP) technology at a dispersion-corrected density functional theory (DFT-D) level. The CSP-based methods were for the first time validated on challenging cases of indomethacin and paracetamol cocrystallization, for which the previously developed approaches provided poor predictions. The calculations demonstrated a dramatic improvement of the virtual coformer screening performance relative to the other methods. It is demonstrated that the crystallinity contribution to the formation of paracetamol and indomethacin cocrystals is a dominant one and, therefore, should not be ignored in the virtual screening calculations. Our results encourage a broad utilization of the proposed CSP-based technology in the pharmaceutical industry as the only virtual coformer screening method that directly accounts for the crystallinity contribution.}, } @article {pmid32968122, year = {2020}, author = {Peter, BG and Messina, JP and Lin, Z and Snapp, SS}, title = {Crop climate suitability mapping on the cloud: a geovisualization application for sustainable agriculture.}, journal = {Scientific reports}, volume = {10}, number = {1}, pages = {15487}, pmid = {32968122}, issn = {2045-2322}, abstract = {Climate change, food security, and environmental sustainability are pressing issues faced by today's global population. As production demands increase and climate threatens crop productivity, agricultural research develops innovative technologies to meet these challenges. Strategies include biodiverse cropping arrangements, new crop introductions, and genetic modification of crop varieties that are resilient to climatic and environmental stressors. Geography in particular is equipped to address a critical question in this pursuit-when and where can crop system innovations be introduced? This manuscript presents a case study of the geographic scaling potential utilizing common bean, delivers an open access Google Earth Engine geovisualization application for mapping the fundamental climate niche of any crop, and discusses food security and legume biodiversity in Sub-Saharan Africa. The application is temporally agile, allowing variable growing season selections and the production of 'living maps' that are continually producible as new data become available. This is an essential communication tool for the future, as practitioners can evaluate the potential geographic range for newly-developed, experimental, and underrepresented crop varieties for facilitating sustainable and innovative agroecological solutions.}, } @article {pmid32967094, year = {2020}, author = {Tahir, A and Chen, F and Khan, HU and Ming, Z and Ahmad, A and Nazir, S and Shafiq, M}, title = {A Systematic Review on Cloud Storage Mechanisms Concerning e-Healthcare Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32967094}, issn = {1424-8220}, mesh = {*Cloud Computing ; *Information Storage and Retrieval ; Reproducibility of Results ; *Telemedicine ; }, abstract = {As the expenses of medical care administrations rise and medical services experts are becoming rare, it is up to medical services organizations and institutes to consider the implementation of medical Health Information Technology (HIT) innovation frameworks. HIT permits health associations to smooth out their considerable cycles and offer types of assistance in a more productive and financially savvy way. With the rise of Cloud Storage Computing (CSC), an enormous number of associations and undertakings have moved their healthcare data sources to distributed storage. As the information can be mentioned whenever universally, the accessibility of information becomes an urgent need. Nonetheless, outages in cloud storage essentially influence the accessibility level. Like the other basic variables of cloud storage (e.g., reliability quality, performance, security, and protection), availability also directly impacts the data in cloud storage for e-Healthcare systems. In this paper, we systematically review cloud storage mechanisms concerning the healthcare environment. Additionally, in this paper, the state-of-the-art cloud storage mechanisms are critically reviewed for e-Healthcare systems based on their characteristics. In short, this paper summarizes existing literature based on cloud storage and its impact on healthcare, and it likewise helps researchers, medical specialists, and organizations with a solid foundation for future studies in the healthcare environment.}, } @article {pmid35582325, year = {2022}, author = {Yassine, A and Hossain, MS}, title = {COVID-19 Networking Demand: An Auction-Based Mechanism for Automated Selection of Edge Computing Services.}, journal = {IEEE transactions on network science and engineering}, volume = {9}, number = {1}, pages = {308-318}, pmid = {35582325}, issn = {2327-4697}, abstract = {Network and cloud service providers are facing an unprecedented challenge to meet the demand of end-users during the COVID-19 pandemic. Currently, billions of people around the world are ordered to stay at home and use remote connection technologies to prevent the spread of the disease. The COVID-19 crisis brought a new reality to network service providers that will eventually accelerate the deployment of edge computing resources to attract the massive influx of users' traffic. The user can elect to procure its resource needs from any edge computing provider based on a variety of attributes such as price and quality. The main challenge for the user is how to choose between the price and multiple quality of service deals when such offerings are changing continually. This problem falls under multi-attribute decision-making. This paper investigates and proposes a novel auction mechanism by which network service brokers would be able to automate the selection of edge computing offers to support their end-users. We also propose a multi-attribute decision-making model that allows the broker to maximize its utility when several bids from edge-network providers are present. The evaluation and experimentation show the practicality and robustness of the proposed model.}, } @article {pmid32966438, year = {2020}, author = {Cerasoli, FT and Sherbert, K and Sławińska, J and Buongiorno Nardelli, M}, title = {Quantum computation of silicon electronic band structure.}, journal = {Physical chemistry chemical physics : PCCP}, volume = {22}, number = {38}, pages = {21816-21822}, doi = {10.1039/d0cp04008h}, pmid = {32966438}, issn = {1463-9084}, abstract = {Development of quantum architectures during the last decade has inspired hybrid classical-quantum algorithms in physics and quantum chemistry that promise simulations of fermionic systems beyond the capability of modern classical computers, even before the era of quantum computing fully arrives. Strong research efforts have been recently made to obtain minimal depth quantum circuits which could accurately represent chemical systems. Here, we show that unprecedented methods used in quantum chemistry, designed to simulate molecules on quantum processors, can be extended to calculate properties of periodic solids. In particular, we present minimal depth circuits implementing the variational quantum eigensolver algorithm and successfully use it to compute the band structure of silicon on a quantum machine for the first time. We are convinced that the presented quantum experiments performed on cloud-based platforms will stimulate more intense studies towards scalable electronic structure computation of advanced quantum materials.}, } @article {pmid32966223, year = {2020}, author = {Whaiduzzaman, M and Hossain, MR and Shovon, AR and Roy, S and Laszka, A and Buyya, R and Barros, A}, title = {A Privacy-Preserving Mobile and Fog Computing Framework to Trace and Prevent COVID-19 Community Transmission.}, journal = {IEEE journal of biomedical and health informatics}, volume = {24}, number = {12}, pages = {3564-3575}, pmid = {32966223}, issn = {2168-2208}, mesh = {COVID-19/*transmission/virology ; Humans ; Mobile Applications ; *Privacy ; SARS-CoV-2/isolation & purification ; }, abstract = {To slow down the spread of COVID-19, governments worldwide are trying to identify infected people, and contain the virus by enforcing isolation, and quarantine. However, it is difficult to trace people who came into contact with an infected person, which causes widespread community transmission, and mass infection. To address this problem, we develop an e-government Privacy-Preserving Mobile, and Fog computing framework entitled PPMF that can trace infected, and suspected cases nationwide. We use personal mobile devices with contact tracing app, and two types of stationary fog nodes, named Automatic Risk Checkers (ARC), and Suspected User Data Uploader Node (SUDUN), to trace community transmission alongside maintaining user data privacy. Each user's mobile device receives a Unique Encrypted Reference Code (UERC) when registering on the central application. The mobile device, and the central application both generate Rotational Unique Encrypted Reference Code (RUERC), which broadcasted using the Bluetooth Low Energy (BLE) technology. The ARCs are placed at the entry points of buildings, which can immediately detect if there are positive or suspected cases nearby. If any confirmed case is found, the ARCs broadcast pre-cautionary messages to nearby people without revealing the identity of the infected person. The SUDUNs are placed at the health centers that report test results to the central cloud application. The reported data is later used to map between infected, and suspected cases. Therefore, using our proposed PPMF framework, governments can let organizations continue their economic activities without complete lockdown.}, } @article {pmid32965236, year = {2020}, author = {Brown, AP and Randall, SM}, title = {Secure Record Linkage of Large Health Data Sets: Evaluation of a Hybrid Cloud Model.}, journal = {JMIR medical informatics}, volume = {8}, number = {9}, pages = {e18920}, pmid = {32965236}, issn = {2291-9694}, abstract = {BACKGROUND: The linking of administrative data across agencies provides the capability to investigate many health and social issues with the potential to deliver significant public benefit. Despite its advantages, the use of cloud computing resources for linkage purposes is scarce, with the storage of identifiable information on cloud infrastructure assessed as high risk by data custodians.

OBJECTIVE: This study aims to present a model for record linkage that utilizes cloud computing capabilities while assuring custodians that identifiable data sets remain secure and local.

METHODS: A new hybrid cloud model was developed, including privacy-preserving record linkage techniques and container-based batch processing. An evaluation of this model was conducted with a prototype implementation using large synthetic data sets representative of administrative health data.

RESULTS: The cloud model kept identifiers on premises and uses privacy-preserved identifiers to run all linkage computations on cloud infrastructure. Our prototype used a managed container cluster in Amazon Web Services to distribute the computation using existing linkage software. Although the cost of computation was relatively low, the use of existing software resulted in an overhead of processing of 35.7% (149/417 min execution time).

CONCLUSIONS: The result of our experimental evaluation shows the operational feasibility of such a model and the exciting opportunities for advancing the analysis of linkage outputs.}, } @article {pmid32960944, year = {2021}, author = {Huang, W and Zheng, P and Cui, Z and Li, Z and Gao, Y and Yu, H and Tang, Y and Yuan, X and Zhang, Z}, title = {MMAP: a cloud computing platform for mining the maximum accuracy of predicting phenotypes from genotypes.}, journal = {Bioinformatics (Oxford, England)}, volume = {37}, number = {9}, pages = {1324-1326}, pmid = {32960944}, issn = {1367-4811}, mesh = {Animals ; Bayes Theorem ; *Cloud Computing ; Genomics ; Genotype ; Humans ; *Models, Genetic ; Phenotype ; Polymorphism, Single Nucleotide ; }, abstract = {UNLABELLED: Accurately predicting phenotypes from genotypes holds great promise to improve health management in humans and animals, and breeding efficiency in animals and plants. Although many prediction methods have been developed, the optimal method differs across datasets due to multiple factors, including species, environments, populations and traits of interest. Studies have demonstrated that the number of genes underlying a trait and its heritability are the two key factors that determine which method fits the trait the best. In many cases, however, these two factors are unknown for the traits of interest. We developed a cloud computing platform for Mining the Maximum Accuracy of Predicting phenotypes from genotypes (MMAP) using unsupervised learning on publicly available real data and simulated data. MMAP provides a user interface to upload input data, manage projects and analyses and download the output results. The platform is free for the public to conduct computations for predicting phenotypes and genetic merit using the best prediction method optimized from many available ones, including Ridge Regression, gBLUP, compressed BLUP, Bayesian LASSO, Bayes A, B, Cpi and many more. Users can also use the platform to conduct data analyses with any methods of their choice. It is expected that extensive usage of MMAP would enrich the training data, which in turn results in continual improvement of the identification of the best method for use with particular traits.

The MMAP user manual, tutorials and example datasets are available at http://zzlab.net/MMAP.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid32960888, year = {2020}, author = {Ashraf, I and Umer, M and Majeed, R and Mehmood, A and Aslam, W and Yasir, MN and Choi, GS}, title = {Home automation using general purpose household electric appliances with Raspberry Pi and commercial smartphone.}, journal = {PloS one}, volume = {15}, number = {9}, pages = {e0238480}, pmid = {32960888}, issn = {1932-6203}, mesh = {Air Conditioning ; Automation/*instrumentation/*methods ; Computers ; Electrical Equipment and Supplies ; Electricity ; Humans ; Smartphone ; Software ; }, abstract = {This study presents the design and implementation of a home automation system that focuses on the use of ordinary electrical appliances for remote control using Raspberry Pi and relay circuits and does not use expensive IP-based devices. Common Lights, Heating, Ventilation, and Air Conditioning (HVAC), fans, and other electronic devices are among the appliances that can be used in this system. A smartphone app is designed that helps the user to design the smart home to his actual home via easy and interactive drag & drop option. The system provides control over the appliances via both the local network and remote access. Data logging over the Microsoft Azure cloud database ensures system recovery in case of gateway failure and data record for lateral use. Periodical notifications also help the user to optimize the usage of home appliances. Moreover, the user can set his preferences and the appliances are auto turned off and on to meet user-specific requirements. Raspberry Pi acting as the server maintains the database of each appliance. HTTP web interface and apache server are used for communication between the android app and raspberry pi. With a 5v relay circuit and micro-processor Raspberry Pi, the proposed system is low-cost, energy-efficient, easy to operate, and affordable for low-income houses.}, } @article {pmid32952600, year = {2020}, author = {Huang, PJ and Chang, JH and Lin, HH and Li, YX and Lee, CC and Su, CT and Li, YL and Chang, MT and Weng, S and Cheng, WH and Chiu, CH and Tang, P}, title = {DeepVariant-on-Spark: Small-Scale Genome Analysis Using a Cloud-Based Computing Framework.}, journal = {Computational and mathematical methods in medicine}, volume = {2020}, number = {}, pages = {7231205}, pmid = {32952600}, issn = {1748-6718}, mesh = {*Cloud Computing/economics ; Computational Biology/methods ; Cost-Benefit Analysis ; *Deep Learning ; *Genetic Variation ; Genome, Human ; High-Throughput Nucleotide Sequencing/economics/standards/statistics & numerical data ; Humans ; Neural Networks, Computer ; Software ; Whole Genome Sequencing/economics/standards/*statistics & numerical data ; }, abstract = {Although sequencing a human genome has become affordable, identifying genetic variants from whole-genome sequence data is still a hurdle for researchers without adequate computing equipment or bioinformatics support. GATK is a gold standard method for the identification of genetic variants and has been widely used in genome projects and population genetic studies for many years. This was until the Google Brain team developed a new method, DeepVariant, which utilizes deep neural networks to construct an image classification model to identify genetic variants. However, the superior accuracy of DeepVariant comes at the cost of computational intensity, largely constraining its applications. Accordingly, we present DeepVariant-on-Spark to optimize resource allocation, enable multi-GPU support, and accelerate the processing of the DeepVariant pipeline. To make DeepVariant-on-Spark more accessible to everyone, we have deployed the DeepVariant-on-Spark to the Google Cloud Platform (GCP). Users can deploy DeepVariant-on-Spark on the GCP following our instruction within 20 minutes and start to analyze at least ten whole-genome sequencing datasets using free credits provided by the GCP. DeepVaraint-on-Spark is freely available for small-scale genome analysis using a cloud-based computing framework, which is suitable for pilot testing or preliminary study, while reserving the flexibility and scalability for large-scale sequencing projects.}, } @article {pmid32947907, year = {2020}, author = {Silva, LAZD and Vidal, VF and Honório, LM and Dantas, MAR and Pinto, MF and Capretz, M}, title = {A Heterogeneous Edge-Fog Environment Supporting Digital Twins for Remote Inspections.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32947907}, issn = {1424-8220}, support = {PD-02651-0013/2017//TBE, EDP and ANEEL - The Brazilian Regullaroty Agency of Electricity/ ; }, abstract = {The increase in the development of digital twins brings several advantages to inspection and maintenance, but also new challenges. Digital models capable of representing real equipment for full remote inspection demand the synchronization, integration, and fusion of several sensors and methodologies such as stereo vision, monocular Simultaneous Localization and Mapping (SLAM), laser and RGB-D camera readings, texture analysis, filters, thermal, and multi-spectral images. This multidimensional information makes it possible to have a full understanding of given equipment, enabling remote diagnosis. To solve this problem, the present work uses an edge-fog-cloud architecture running over a publisher-subscriber communication framework to optimize the computational costs and throughput. In this approach, each process is embedded in an edge node responsible for prepossessing a given amount of data that optimizes the trade-off of processing capabilities and throughput delays. All information is integrated with different levels of fog nodes and a cloud server to maximize performance. To demonstrate this proposal, a real-time 3D reconstruction problem using moving cameras is shown. In this scenario, a stereo and RDB-D cameras run over edge nodes, filtering, and prepossessing the initial data. Furthermore, the point cloud and image registration, odometry, and filtering run over fog clusters. A cloud server is responsible for texturing and processing the final results. This approach enables us to optimize the time lag between data acquisition and operator visualization, and it is easily scalable if new sensors and algorithms must be added. The experimental results will demonstrate precision by comparing the results with ground-truth data, scalability by adding further readings and performance.}, } @article {pmid32943798, year = {2020}, author = {Moreno-Martínez, Á and Izquierdo-Verdiguier, E and Maneta, MP and Camps-Valls, G and Robinson, N and Muñoz-Marí, J and Sedano, F and Clinton, N and Running, SW}, title = {Multispectral high resolution sensor fusion for smoothing and gap-filling in the cloud.}, journal = {Remote sensing of environment}, volume = {247}, number = {}, pages = {111901}, pmid = {32943798}, issn = {0034-4257}, support = {80NSSC18M0025/ImNASA/Intramural NASA/United States ; }, abstract = {Remote sensing optical sensors onboard operational satellites cannot have high spectral, spatial and temporal resolutions simultaneously. In addition, clouds and aerosols can adversely affect the signal contaminating the land surface observations. We present a HIghly Scalable Temporal Adaptive Reflectance Fusion Model (HISTARFM) algorithm to combine multispectral images of different sensors to reduce noise and produce monthly gap free high resolution (30 m) observations over land. Our approach uses images from the Landsat (30 m spatial resolution and 16 day revisit cycle) and the MODIS missions, both from Terra and Aqua platforms (500 m spatial resolution and daily revisit cycle). We implement a bias-aware Kalman filter method in the Google Earth Engine (GEE) platform to obtain fused images at the Landsat spatial-resolution. The added bias correction in the Kalman filter estimates accounts for the fact that both model and observation errors are temporally auto-correlated and may have a non-zero mean. This approach also enables reliable estimation of the uncertainty associated with the final reflectance estimates, allowing for error propagation analyses in higher level remote sensing products. Quantitative and qualitative evaluations of the generated products through comparison with other state-of-the-art methods confirm the validity of the approach, and open the door to operational applications at enhanced spatio-temporal resolutions at broad continental scales.}, } @article {pmid32942759, year = {2020}, author = {Platt, S and Sanabria-Russo, L and Oliver, M}, title = {CoNTe: A Core Network Temporal Blockchain for 5G.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32942759}, issn = {1424-8220}, support = {AEI/FEDER TEC2016-79510//Generalitat de Catalunya/ ; GR2017-2019//Generalitat de Catalunya/ ; }, abstract = {Virtual Network Functions allow the effective separation between hardware and network functionality, a strong paradigm shift from previously tightly integrated monolithic, vendor, and technology dependent deployments. In this virtualized paradigm, all aspects of network operations can be made to deploy on demand, dynamically scale, as well as be shared and interworked in ways that mirror behaviors of general cloud computing. To date, although seeing rising demand, distributed ledger technology remains largely incompatible in such elastic deployments, by its nature as functioning as an immutable record store. This work focuses on the structural incompatibility of current blockchain designs and proposes a novel, temporal blockchain design built atop federated byzantine agreement, which has the ability to dynamically scale and be packaged as a Virtual Network Function (VNF) for the 5G Core.}, } @article {pmid32940684, year = {2020}, author = {Mayfield, CA and Gigler, ME and Snapper, L and Jose, J and Tynan, J and Scott, VC and Dulin, M}, title = {Using cloud-based, open-source technology to evaluate, improve, and rapidly disseminate community-based intervention data.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {27}, number = {11}, pages = {1741-1746}, pmid = {32940684}, issn = {1527-974X}, mesh = {*Cloud Computing ; Community Health Services/*organization & administration ; Data Collection ; *Data Management ; Databases, Factual ; Humans ; Information Dissemination/*methods ; North Carolina ; Ownership ; Pilot Projects ; Social Determinants of Health ; Software ; *Stakeholder Participation ; }, abstract = {Building Uplifted Families (BUF) is a cross-sector community initiative to improve health and economic disparities in Charlotte, North Carolina. A formative evaluation strategy was used to support iterative process improvement and collaborative engagement of cross-sector partners. To address challenges with electronic data collection through REDCap Cloud, we developed the BUF Rapid Dissemination (BUF-RD) model, a multistage data governance system supplemented by open-source technologies, such as: Stage 1) data collection; Stage 2) data integration and analysis; and Stage 3) dissemination. In Stage 3, results were disseminated through an interactive dashboard developed in RStudio using RShiny and Shiny Server solutions. The BUF-RD model was successfully deployed in a 6-month beta test to reduce the time lapse between data collection and dissemination from 3 months to 2 weeks. Having up-to-date preliminary results led to improved BUF implementation, enhanced stakeholder engagement, and greater responsiveness and alignment of program resources to specific participant needs.}, } @article {pmid32939771, year = {2020}, author = {Mehraeen, M and Dadkhah, M and Mehraeen, A}, title = {Investigating the capabilities of information technologies to support policymaking in COVID-19 crisis management; a systematic review and expert opinions.}, journal = {European journal of clinical investigation}, volume = {50}, number = {11}, pages = {e13391}, doi = {10.1111/eci.13391}, pmid = {32939771}, issn = {1365-2362}, mesh = {COVID-19 ; Coronavirus Infections/epidemiology/*prevention & control ; Expert Testimony ; Female ; Health Policy ; Humans ; Information Technology/*statistics & numerical data ; Internet ; Machine Learning ; Male ; *Outcome Assessment, Health Care ; Pandemics/*prevention & control/statistics & numerical data ; Pneumonia, Viral/epidemiology/*prevention & control ; Policy Making ; Social Media ; }, abstract = {BACKGROUND: Today, numerous countries are fighting to protect themselves against the Covid-19 crisis, while the policymakers are confounded and empty handed in dealing with this chaotic circumstance. The infection and its impacts have made it difficult to make optimal and suitable decisions. New information technologies play significant roles in such critical situations to address and relieve stress during the coronavirus crisis. This article endeavours to recognize the challenges policymakers have typically experienced during pandemic diseases, including Covid-19, and, accordingly, new information technology capabilities to encounter with them.

MATERIAL AND METHODS: The current study utilizes the synthesis of findings of experts' opinions within the systematic review process as the research method to recognize the best available evidence drawn from text and opinion to offer practical guidance for policymakers.

RESULTS: The results illustrate that the challenges fall into two categories including; encountering the disease and reducing the results of the disease. Furthermore, Internet of things, cloud computing, machine learning and social networking play the most significant roles to address these challenges.}, } @article {pmid32938391, year = {2020}, author = {Albrecht, B and Bağcı, C and Huson, DH}, title = {MAIRA- real-time taxonomic and functional analysis of long reads on a laptop.}, journal = {BMC bioinformatics}, volume = {21}, number = {Suppl 13}, pages = {390}, pmid = {32938391}, issn = {1471-2105}, mesh = {Classification/*methods ; Computers/*standards ; Humans ; Metagenomics/*methods ; }, abstract = {BACKGROUND: Advances in mobile sequencing devices and laptop performance make metagenomic sequencing and analysis in the field a technologically feasible prospect. However, metagenomic analysis pipelines are usually designed to run on servers and in the cloud.

RESULTS: MAIRA is a new standalone program for interactive taxonomic and functional analysis of long read metagenomic sequencing data on a laptop, without requiring external resources. The program performs fast, online, genus-level analysis, and on-demand, detailed taxonomic and functional analysis. It uses two levels of frame-shift-aware alignment of DNA reads against protein reference sequences, and then performs detailed analysis using a protein synteny graph.

CONCLUSIONS: We envision this software being used by researchers in the field, when access to servers or cloud facilities is difficult, or by individuals that do not routinely access such facilities, such as medical researchers, crop scientists, or teachers.}, } @article {pmid32937865, year = {2020}, author = {Koubaa, A and Ammar, A and Alahdab, M and Kanhouch, A and Azar, AT}, title = {DeepBrain: Experimental Evaluation of Cloud-Based Computation Offloading and Edge Computing in the Internet-of-Drones for Deep Learning Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32937865}, issn = {1424-8220}, abstract = {Unmanned Aerial Vehicles (UAVs) have been very effective in collecting aerial images data for various Internet-of-Things (IoT)/smart cities applications such as search and rescue, surveillance, vehicle detection, counting, intelligent transportation systems, to name a few. However, the real-time processing of collected data on edge in the context of the Internet-of-Drones remains an open challenge because UAVs have limited energy capabilities, while computer vision techniquesconsume excessive energy and require abundant resources. This fact is even more critical when deep learning algorithms, such as convolutional neural networks (CNNs), are used for classification and detection. In this paper, we first propose a system architecture of computation offloading for Internet-connected drones. Then, we conduct a comprehensive experimental study to evaluate the performance in terms of energy, bandwidth, and delay of the cloud computation offloading approach versus the edge computing approach of deep learning applications in the context of UAVs. In particular, we investigate the tradeoff between the communication cost and the computation of the two candidate approaches experimentally. The main results demonstrate that the computation offloading approach allows us to provide much higher throughput (i.e., frames per second) as compared to the edge computing approach, despite the larger communication delays.}, } @article {pmid32934216, year = {2020}, author = {Wang, S and Di Tommaso, S and Deines, JM and Lobell, DB}, title = {Mapping twenty years of corn and soybean across the US Midwest using the Landsat archive.}, journal = {Scientific data}, volume = {7}, number = {1}, pages = {307}, pmid = {32934216}, issn = {2052-4463}, abstract = {Field-level monitoring of crop types in the United States via the Cropland Data Layer (CDL) has played an important role in improving production forecasts and enabling large-scale study of agricultural inputs and outcomes. Although CDL offers crop type maps across the conterminous US from 2008 onward, such maps are missing in many Midwestern states or are uneven in quality before 2008. To fill these data gaps, we used the now-public Landsat archive and cloud computing services to map corn and soybean at 30 m resolution across the US Midwest from 1999-2018. Our training data were CDL from 2008-2018, and we validated the predictions on CDL 1999-2007 where available, county-level crop acreage statistics, and state-level crop rotation statistics. The corn-soybean maps, which we call the Corn-Soy Data Layer (CSDL), are publicly hosted on Google Earth Engine and also available for download online.}, } @article {pmid32927672, year = {2020}, author = {Utomo, D and Hsiung, PA}, title = {A Multitiered Solution for Anomaly Detection in Edge Computing for Smart Meters.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {18}, pages = {}, pmid = {32927672}, issn = {1424-8220}, abstract = {In systems connected to smart grids, smart meters with fast and efficient responses are very helpful in detecting anomalies in realtime. However, sending data with a frequency of a minute or less is not normal with today's technology because of the bottleneck of the communication network and storage media. Because mitigation cannot be done in realtime, we propose prediction techniques using Deep Neural Network (DNN), Support Vector Regression (SVR), and k-Nearest Neighbors (KNN). In addition to these techniques, the prediction timestep is chosen per day and wrapped in sliding windows, and clustering using Kmeans and intersection Kmeans and HDBSCAN is also evaluated. The predictive ability applied here is to predict whether anomalies in electricity usage will occur in the next few weeks. The aim is to give the user time to check their usage and from the utility side, whether it is necessary to prepare a sufficient supply. We also propose the latency reduction to counter higher latency as in the traditional centralized system by adding layer Edge Meter Data Management System (MDMS) and Cloud-MDMS as the inference and training model. Based on the experiments when running in the Raspberry Pi, the best solution is choosing DNN that has the shortest latency 1.25 ms, 159 kB persistent file size, and at 128 timesteps.}, } @article {pmid32906056, year = {2020}, author = {Mrozek, D}, title = {A review of Cloud computing technologies for comprehensive microRNA analyses.}, journal = {Computational biology and chemistry}, volume = {88}, number = {}, pages = {107365}, doi = {10.1016/j.compbiolchem.2020.107365}, pmid = {32906056}, issn = {1476-928X}, mesh = {Big Data ; *Cloud Computing ; Humans ; *Machine Learning ; MicroRNAs/*analysis ; }, abstract = {Cloud computing revolutionized many fields that require ample computational power. Cloud platforms may also provide huge support for microRNA analysis mainly through disclosing scalable resources of different types. In Clouds, these resources are available as services, which simplifies their allocation and releasing. This feature is especially useful during the analysis of large volumes of data, like the one produced by next generation sequencing experiments, which require not only extended storage space but also a distributed computing environment. In this paper, we show which of the Cloud properties and service models can be especially beneficial for microRNA analysis. We also explain the most useful services of the Cloud (including storage space, computational power, web application hosting, machine learning models, and Big Data frameworks) that can be used for microRNA analysis. At the same time, we review several solutions for microRNA and show that the utilization of the Cloud in this field is still weak, but can increase in the future when the awareness of their applicability grows.}, } @article {pmid32904507, year = {2020}, author = {Long, E and Chen, J and Wu, X and Liu, Z and Wang, L and Jiang, J and Li, W and Zhu, Y and Chen, C and Lin, Z and Li, J and Li, X and Chen, H and Guo, C and Zhao, L and Nie, D and Liu, X and Liu, X and Dong, Z and Yun, B and Wei, W and Xu, F and Lv, J and Li, M and Ling, S and Zhong, L and Chen, J and Zheng, Q and Zhang, L and Xiang, Y and Tan, G and Huang, K and Xiang, Y and Lin, D and Zhang, X and Dongye, M and Wang, D and Chen, W and Liu, X and Lin, H and Liu, Y}, title = {Artificial intelligence manages congenital cataract with individualized prediction and telehealth computing.}, journal = {NPJ digital medicine}, volume = {3}, number = {}, pages = {112}, pmid = {32904507}, issn = {2398-6352}, abstract = {A challenge of chronic diseases that remains to be solved is how to liberate patients and medical resources from the burdens of long-term monitoring and periodic visits. Precise management based on artificial intelligence (AI) holds great promise; however, a clinical application that fully integrates prediction and telehealth computing has not been achieved, and further efforts are required to validate its real-world benefits. Taking congenital cataract as a representative, we used Bayesian and deep-learning algorithms to create CC-Guardian, an AI agent that incorporates individualized prediction and scheduling, and intelligent telehealth follow-up computing. Our agent exhibits high sensitivity and specificity in both internal and multi-resource validation. We integrate our agent with a web-based smartphone app and prototype a prediction-telehealth cloud platform to support our intelligent follow-up system. We then conduct a retrospective self-controlled test validating that our system not only accurately detects and addresses complications at earlier stages, but also reduces the socioeconomic burdens compared to conventional methods. This study represents a pioneering step in applying AI to achieve real medical benefits and demonstrates a novel strategy for the effective management of chronic diseases.}, } @article {pmid34812355, year = {2020}, author = {Kolhar, M and Al-Turjman, F and Alameen, A and Abualhaj, MM}, title = {A Three Layered Decentralized IoT Biometric Architecture for City Lockdown During COVID-19 Outbreak.}, journal = {IEEE access : practical innovations, open solutions}, volume = {8}, number = {}, pages = {163608-163617}, pmid = {34812355}, issn = {2169-3536}, abstract = {In this article, we have built a prototype of a decentralized IoT based biometric face detection framework for cities that are under lockdown during COVID-19 outbreaks. To impose restrictions on public movements, we have utilized face detection using three-layered edge computing architecture. We have built a deep learning framework of multi-task cascading to recognize the face. For the face detection proposal we have compared with the state of the art methods on various benchmarking dataset such as FDDB and WIDER FACE. Furthermore, we have also conducted various experiments on latency and face detection load on three-layer and cloud computing architectures. It shows that our proposal has an edge over cloud computing architecture.}, } @article {pmid32878202, year = {2020}, author = {Hwang, YW and Lee, IY}, title = {A Study on CP-ABE-based Medical Data Sharing System with Key Abuse Prevention and Verifiable Outsourcing in the IoMT Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {17}, pages = {}, pmid = {32878202}, issn = {1424-8220}, abstract = {Recent developments in cloud computing allow data to be securely shared between users. This can be used to improve the quality of life of patients and medical staff in the Internet of Medical Things (IoMT) environment. However, in the IoMT cloud environment, there are various security threats to the patient's medical data. As a result, security features such as encryption of collected data and access control by legitimate users are essential. Many studies have been conducted on access control techniques using ciphertext-policy attribute-based encryption (CP-ABE), a form of attribute-based encryption, among various security technologies and studies are underway to apply them to the medical field. However, several problems persist. First, as the secret key does not identify the user, the user may maliciously distribute the secret key and such users cannot be tracked. Second, Attribute-Based Encryption (ABE) increases the size of the ciphertext depending on the number of attributes specified. This wastes cloud storage, and computational times are high when users decrypt. Such users must employ outsourcing servers. Third, a verification process is needed to prove that the results computed on the outsourcing server are properly computed. This paper focuses on the IoMT environment for a study of a CP-ABE-based medical data sharing system with key abuse prevention and verifiable outsourcing in a cloud environment. The proposed scheme can protect the privacy of user data stored in a cloud environment in the IoMT field, and if there is a problem with the secret key delegated by the user, it can trace a user who first delegated the key. This can prevent the key abuse problem. In addition, this scheme reduces the user's burden when decoding ciphertext and calculates accurate results through a server that supports constant-sized ciphertext output and verifiable outsourcing technology. The goal of this paper is to propose a system that enables patients and medical staff to share medical data safely and efficiently in an IoMT environment.}, } @article {pmid32876754, year = {2020}, author = {Nguyen, UNT and Pham, LTH and Dang, TD}, title = {Correction to: an automatic water detection approach using Landsat 8 OLI and Google earth engine cloud computing to map lakes and reservoirs in New Zealand.}, journal = {Environmental monitoring and assessment}, volume = {192}, number = {9}, pages = {616}, doi = {10.1007/s10661-020-08581-y}, pmid = {32876754}, issn = {1573-2959}, abstract = {In the published article:"An automatic water detection approach using Landsat 8 OLI and Google Earth Engine cloud computing to map lakes and reservoirs in New Zealand", the Acknowledgements was published incorrectly and funding statement was missing.}, } @article {pmid32868955, year = {2020}, author = {Mei, L and Rozanov, V and Burrows, JP}, title = {A fast and accurate radiative transfer model for aerosol remote sensing.}, journal = {Journal of quantitative spectroscopy & radiative transfer}, volume = {256}, number = {}, pages = {107270}, pmid = {32868955}, issn = {0022-4073}, abstract = {After several decades' development of retrieval techniques in aerosol remote sensing, no fast and accurate analytical Radiative Transfer Model (RTM) has been developed and applied to create global aerosol products for non-polarimetric instruments such as Ocean and Land Colour Instrument/Sentinel-3 (OLCI/Sentinel-3) and Meteosat Second Generation/Spinning Enhanced Visible and Infrared Imager (MSG/SEVIRI). Global aerosol retrieval algorithms are typically based on a Look-Up-Table (LUT) technique, requiring high-performance computers. The current eXtensible Bremen Aerosol/cloud and surfacE parameters Retrieval (XBAER) algorithm also utilizes the LUT method. In order to have a near-real time retrieval and achieve a quick and accurate "FIRST-LOOK" aerosol product without high-demand of computing resource, we have developed a Fast and Accurate Semi-analytical Model of Atmosphere-surface Reflectance (FASMAR) for aerosol remote sensing. The FASMAR is developed based on a successive order of scattering technique. In FASMAR, the first three orders of scattering are calculated exactly. The contribution of higher orders of scattering is estimated using an extrapolation technique and an additional correction function. The evaluation of FASMAR has been performed by comparing with radiative transfer model SCIATRAN for all typical observation/illumination geometries, surface/aerosol conditions, and wavelengths 412, 550, 670, 870, 1600, 2100 nm used for aerosol remote sensing. The selected observation/illumination conditions are based on the observations from both geostationary satellite (e.g. MSG/SEVIRI) and polar-orbit satellite (e.g. OLCI/Sentinel-3). The percentage error of the top of atmosphere reflectance calculated by FASMAR is within ± 3% for typical polar-orbit/geostationary satellites' observation/illumination geometries. The accuracy decreases for solar and viewing zenith angles larger than 70[∘]. However, even in such cases, the error is within the range ± 5%. The evaluation of model performance also shows that FASMAR can be used for all typical surfaces with albedo in the interval [ 0 - 1 ] and aerosol with optical thickness in the range [ 0.01 - 1 ] .}, } @article {pmid32863440, year = {2020}, author = {Wang, X and Xiao, X and Zou, Z and Chen, B and Ma, J and Dong, J and Doughty, RB and Zhong, Q and Qin, Y and Dai, S and Li, X and Zhao, B and Li, B}, title = {Tracking annual changes of coastal tidal flats in China during 1986-2016 through analyses of Landsat images with Google Earth Engine.}, journal = {Remote sensing of environment}, volume = {238}, number = {}, pages = {}, pmid = {32863440}, issn = {0034-4257}, support = {R01 AI101028/AI/NIAID NIH HHS/United States ; }, abstract = {Tidal flats (non-vegetated area), along with coastal vegetation area, constitute the coastal wetlands (intertidal zone) between high and low water lines, and play an important role in wildlife, biodiversity and biogeochemical cycles. However, accurate annual maps of coastal tidal flats over the last few decades are unavailable and their spatio-temporal changes in China are unknown. In this study, we analyzed all the available Landsat TM/ETM+/OLI imagery (~ 44,528 images) using the Google Earth Engine (GEE) cloud computing platform and a robust decision tree algorithm to generate annual frequency maps of open surface water body and vegetation to produce annual maps of coastal tidal flats in eastern China from 1986 to 2016 at 30-m spatial resolution. The resulting map of coastal tidal flats in 2016 was evaluated using very high-resolution images available in Google Earth. The total area of coastal tidal flats in China in 2016 was about 731,170 ha, mostly distributed in the provinces around Yellow River Delta and Pearl River Delta. The interannual dynamics of coastal tidal flats area in China over the last three decades can be divided into three periods: a stable period during 1986-1992, an increasing period during 1993-2001 and a decreasing period during 2002-2016. The resulting annual coastal tidal flats maps could be used to support sustainable coastal zone management policies that preserve coastal ecosystem services and biodiversity in China.}, } @article {pmid32857770, year = {2020}, author = {Samea, F and Azam, F and Rashid, M and Anwar, MW and Haider Butt, W and Muzaffar, AW}, title = {A model-driven framework for data-driven applications in serverless cloud computing.}, journal = {PloS one}, volume = {15}, number = {8}, pages = {e0237317}, pmid = {32857770}, issn = {1932-6203}, mesh = {*Cloud Computing ; Data Science/*methods ; *Models, Theoretical ; *Software ; }, abstract = {In a serverless cloud computing environment, the cloud provider dynamically manages the allocation of resources whereas the developers purely focus on their applications. The data-driven applications in serverless cloud computing mainly address the web as well as other distributed scenarios, and therefore, it is essential to offer a consistent user experience across different connection types. In order to address the issues of data-driven application in a real-time distributed environment, the use of GraphQL (Graph Query Language) is getting more and more popularity in state-of-the-art cloud computing approaches. However, the existing solutions target the low level implementation of GraphQL, for the development of a complex data-driven application, which may lead to several errors and involve a significant amount of development efforts due to various users' requirements in real-time. Therefore, it is critical to simplify the development process of data-driven applications in a serverless cloud computing environment. Consequently, this research introduces UMLPDA (Unified Modeling Language Profile for Data-driven Applications), which adopts the concepts of UML-based Model-driven Architectures to model the frontend as well as the backend requirements for data-driven applications developed at a higher abstraction level. Particularly, a modeling approach is proposed to resolve the development complexities such as data communication and synchronization. Subsequently, a complete open source transformation engine is developed using a Model-to-Text approach to automatically generate the frontend as well as backend low level implementations of Angular2 and GraphQL respectively. The validation of proposed work is performed with three different case studies, deployed on Amazon Web Services platform. The results show that the proposed framework enables to develop the data-driven applications with simplicity.}, } @article {pmid32857296, year = {2020}, author = {Fuentes, H and Mauricio, D}, title = {Smart water consumption measurement system for houses using IoT and cloud computing.}, journal = {Environmental monitoring and assessment}, volume = {192}, number = {9}, pages = {602}, pmid = {32857296}, issn = {1573-2959}, mesh = {Algorithms ; *Cloud Computing ; *Drinking ; Environmental Monitoring ; }, abstract = {Presently, in several parts of the world, water consumption is not measured or visualized in real time, in addition, water leaks are not detected in time and with high precision, generating unnecessary waste of water. That is why this article presents the implementation of a smart water measurement consumption system under an architecture design, with high decoupling and integration of various technologies, which allows real-time visualizing the consumptions, in addition, a leak detection algorithm is proposed based on rules, historical context, and user location that manages to cover 10 possible water consumption scenarios between normal and anomalous consumption. The system allows data to be collected by a smart meter, which is preprocessed by a local server (Gateway) and sent to the Cloud from time to time to be analyzed by the leak detection algorithm and, simultaneously, be viewed on a web interface. The results show that the algorithm has 100% Accuracy, Recall, Precision, and F1 score to detect leaks, far better than other procedures, and a margin of error of 4.63% recorded by the amount of water consumed.}, } @article {pmid32852146, year = {2020}, author = {Pang, R and Wei, Z and Liu, W and Chen, Z and Cheng, X and Zhang, H and Li, G and Liu, L}, title = {Influence of the pandemic dissemination of COVID-19 on facial rejuvenation: A survey of Twitter.}, journal = {Journal of cosmetic dermatology}, volume = {19}, number = {11}, pages = {2778-2784}, pmid = {32852146}, issn = {1473-2165}, support = {20A320033//The University key scientific research project of Henan Province/ ; }, mesh = {*Betacoronavirus ; COVID-19 ; Coronavirus Infections/*epidemiology ; *Cosmetic Techniques ; Face ; Humans ; Pandemics ; Pneumonia, Viral/*epidemiology ; *Public Opinion ; *Rejuvenation ; SARS-CoV-2 ; *Social Media ; }, abstract = {BACKGROUND: With the pandemic dissemination of COVID-19, attitude and sentiment surrounding facial rejuvenation have evolved rapidly.

AIMS: The purpose of this study was to understanding the impact of pandemic on the attitude of people toward facial skin rejuvenation.

METHODS: Twitter data related to facial rejuvenation were collected from January 1, 2020, to April 30, 2020. Sentiment analysis, frequency analysis, and word cloud were performed to analyze the data. Statistical analysis included two-tailed t tests and chi-square tests.

RESULTS: In the post-declaration, the number of tweets about facial rejuvenation increased significantly, and the search volume in Google Trends decreased. Negative public emotions increased, but positive emotions still dominate. The words frequency of "discounts" and "purchase" decreased. The dominant words in word cloud were "Botox," "facelift," "hyaluronic," and "skin."

CONCLUSION: The public has a positive attitude toward facial rejuvenation during the pandemic. In particular, minimally invasive procedures dominate the mainstream, such as "Botox," "Hyaluronic acid," and "PRP." The practitioners could understand the change of the public interest in facial rejuvenation in time and decide what to focus on.}, } @article {pmid32839626, year = {2020}, author = {Mahmood, T and Mubarik, MS}, title = {Balancing innovation and exploitation in the fourth industrial revolution: Role of intellectual capital and technology absorptive capacity.}, journal = {Technological forecasting and social change}, volume = {160}, number = {}, pages = {120248}, pmid = {32839626}, issn = {0040-1625}, abstract = {Industry 4.0, which features the Internet of things (IoT), cloud computing, big-data, digitalization, and cyber-physical systems, is transforming the way businesses are being run. It is making the business processes more autonomous, automated and intelligent, and is transmuting the organizational structures of businesses by digitalizing their end-to-end business processes. In this context, balancing innovation and exploitation-organization's ambidexterity-while stepping into the fourth industrial revolution can be critical for organizational capability. This study examines the role of intellectual capital (IC)-human capital, structural capital and relational capital-in balancing the innovation and exploitation activities. It also examines the role of technology's absorptive capacity in the relationship between IC and organizational ambidexterity (OA). Data were collected from 217 small and medium enterprises from the manufacturing sector of Pakistan using a closed-ended Likert scale-based questionnaire. The study employs partial least square-Structural Equation Modeling (PLS-SEM) for data analysis. Findings indicate a profound influence of all dimensions of IC, both overall and by dimensions on organizations' ambidexterity. Findings also exhibit a significant partial mediating role of technology absorptive capacity (TAC) in the association of IC and ambidexterity. The findings of the study emphasize the creation of specific policies aimed to develop IC of a firm, which in turn can enable a firm to maintain a balance between innovation and market exploitation activities. The study integrates the TAC with the IC-OA relationship, which is the novelty of the study.}, } @article {pmid32837593, year = {2021}, author = {Hsu, IC and Chang, CC}, title = {Integrating machine learning and open data into social Chatbot for filtering information rumor.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {12}, number = {1}, pages = {1023-1037}, pmid = {32837593}, issn = {1868-5137}, abstract = {Social networks have become a major platform for people to disseminate information, which can include negative rumors. In recent years, rumors on social networks has caused grave problems and considerable damages. We attempted to create a method to verify information from numerous social media messages. We propose a general architecture that integrates machine learning and open data with a Chatbot and is based cloud computing (MLODCCC), which can assist users in evaluating information authenticity on social platforms. The proposed MLODCCC architecture consists of six integrated modules: cloud computing, machine learning, data preparation, open data, chatbot, and intelligent social application modules. Food safety has garnered worldwide attention. Consequently, we used the proposed MLODCCC architecture to develop a Food Safety Information Platform (FSIP) that provides a friendly hyperlink and chatbot interface on Facebook to identify credible food safety information. The performance and accuracy of three binary classification algorithms, namely the decision tree, logistic regression, and support vector machine algorithms, operating in different cloud computing environments were compared. The binary classification accuracy was 0.769, which indicates that the proposed approach accurately classifies using the developed FSIP.}, } @article {pmid32837253, year = {2020}, author = {Ghinita, G and Nguyen, K and Maruseac, M and Shahabi, C}, title = {A secure location-based alert system with tunable privacy-performance trade-off.}, journal = {GeoInformatica}, volume = {24}, number = {4}, pages = {951-985}, pmid = {32837253}, issn = {1384-6175}, abstract = {Monitoring location updates from mobile users has important applications in many areas, ranging from public health (e.g., COVID-19 contact tracing) and national security to social networks and advertising. However, sensitive information can be derived from movement patterns, thus protecting the privacy of mobile users is a major concern. Users may only be willing to disclose their locations when some condition is met, for instance in proximity of a disaster area or an event of interest. Currently, such functionality can be achieved using searchable encryption. Such cryptographic primitives provide provable guarantees for privacy, and allow decryption only when the location satisfies some predicate. Nevertheless, they rely on expensive pairing-based cryptography (PBC), of which direct application to the domain of location updates leads to impractical solutions. We propose secure and efficient techniques for private processing of location updates that complement the use of PBC and lead to significant gains in performance by reducing the amount of required pairing operations. We implement two optimizations that further improve performance: materialization of results to expensive mathematical operations, and parallelization. We also propose an heuristic that brings down the computational overhead through enlarging an alert zone by a small factor (given as system parameter), therefore trading off a small and controlled amount of privacy for significant performance gains. Extensive experimental results show that the proposed techniques significantly improve performance compared to the baseline, and reduce the searchable encryption overhead to a level that is practical in a computing environment with reasonable resources, such as the cloud.}, } @article {pmid32837247, year = {2022}, author = {Ibrahim, AU and Al-Turjman, F and Sa'id, Z and Ozsoz, M}, title = {Futuristic CRISPR-based biosensing in the cloud and internet of things era: an overview.}, journal = {Multimedia tools and applications}, volume = {81}, number = {24}, pages = {35143-35171}, pmid = {32837247}, issn = {1380-7501}, abstract = {Biosensors-based devices are transforming medical diagnosis of diseases and monitoring of patient signals. The development of smart and automated molecular diagnostic tools equipped with biomedical big data analysis, cloud computing and medical artificial intelligence can be an ideal approach for the detection and monitoring of diseases, precise therapy, and storage of data over the cloud for supportive decisions. This review focused on the use of machine learning approaches for the development of futuristic CRISPR-biosensors based on microchips and the use of Internet of Things for wireless transmission of signals over the cloud for support decision making. The present review also discussed the discovery of CRISPR, its usage as a gene editing tool, and the CRISPR-based biosensors with high sensitivity of Attomolar (10[-18] M), Femtomolar (10[-15] M) and Picomolar (10[-12] M) in comparison to conventional biosensors with sensitivity of nanomolar 10[-9] M and micromolar 10[-3] M. Additionally, the review also outlines limitations and open research issues in the current state of CRISPR-based biosensing applications.}, } @article {pmid32837246, year = {2021}, author = {Al-Zinati, M and Alrashdan, R and Al-Duwairi, B and Aloqaily, M}, title = {A re-organizing biosurveillance framework based on fog and mobile edge computing.}, journal = {Multimedia tools and applications}, volume = {80}, number = {11}, pages = {16805-16825}, pmid = {32837246}, issn = {1380-7501}, abstract = {Biological threats are becoming a serious security issue for many countries across the world. Effective biosurveillance systems can primarily support appropriate responses to biological threats and consequently save human lives. Nevertheless, biosurveillance systems are costly to implement and hard to operate. Furthermore, they rely on static infrastructures that might not cope with the evolving dynamics of the monitored environment. In this paper, we present a reorganizing biosurveillance framework for the detection and localization of biological threats with fog and mobile edge computing support. In the proposed framework, a hierarchy of fog nodes are responsible for aggregating monitoring data within their regions and detecting potential threats. Although fog nodes are deployed on a fixed base station infrastructure, the framework provides an innovative technique for reorganizing the monitored environment structure to adapt to the evolving environmental conditions and to overcome the limitations of the static base station infrastructure. Evaluation results illustrate the ability of the framework to localize biological threats and detect infected areas. Moreover, the results show the effectiveness of the reorganization mechanisms in adjusting the environment structure to cope with the highly dynamic environment.}, } @article {pmid32835313, year = {2020}, author = {Blair, GS}, title = {A Tale of Two Cities: Reflections on Digital Technology and the Natural Environment.}, journal = {Patterns (New York, N.Y.)}, volume = {1}, number = {5}, pages = {100068}, pmid = {32835313}, issn = {2666-3899}, abstract = {Contemporary digital technologies can make a profound impact on our understanding of the natural environment in moving toward sustainable futures. Examples of such technologies included sources of new data (e.g., an environmental Internet of Things), the ability to storage and process the large datasets that will result from this (e.g., through cloud computing), and the potential of data science and AI to make sense of these data alongside human experts. However, these same trends pose a threat to sustainable futures through, for example, the carbon footprint of digital technology and the risks of this escalating through the very trends mentioned above.}, } @article {pmid32825602, year = {2020}, author = {Li, H and Lan, C and Fu, X and Wang, C and Li, F and Guo, H}, title = {A Secure and Lightweight Fine-Grained Data Sharing Scheme for Mobile Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {17}, pages = {}, pmid = {32825602}, issn = {1424-8220}, support = {No. 61602080, No. 61602084//National Natural Science Foundation of China/ ; No. GCIS201718//Guangxi Key Laboratory of Cryptography and Information Security/ ; No. LY19F020045//Natural Science Foundation of Zhejiang Province/ ; No. 2017B030314131-05//the Opening Project of Guangdong Provincial Key Laboratory of Information Security Technology/ ; No. Y201636547//the Department of Education of Zhejiang Province of China/ ; No. 2017C01062//the Key Research Project of Zhejiang Province/ ; }, abstract = {With the explosion of various mobile devices and the tremendous advancement in cloud computing technology, mobile devices have been seamlessly integrated with the premium powerful cloud computing known as an innovation paradigm named Mobile Cloud Computing (MCC) to facilitate the mobile users in storing, computing and sharing their data with others. Meanwhile, Attribute Based Encryption (ABE) has been envisioned as one of the most promising cryptographic primitives for providing secure and flexible fine-grained "one to many" access control, particularly in large scale distributed system with unknown participators. However, most existing ABE schemes are not suitable for MCC because they involve expensive pairing operations which pose a formidable challenge for resource-constrained mobile devices, thus greatly delaying the widespread popularity of MCC. To this end, in this paper, we propose a secure and lightweight fine-grained data sharing scheme (SLFG-DSS) for a mobile cloud computing scenario to outsource the majority of time-consuming operations from the resource-constrained mobile devices to the resource-rich cloud servers. Different from the current schemes, our novel scheme can enjoy the following promising merits simultaneously: (1) Supporting verifiable outsourced decryption, i.e., the mobile user can ensure the validity of the transformed ciphertext returned from the cloud server; (2) resisting decryption key exposure, i.e., our proposed scheme can outsource decryption for intensive computing tasks during the decryption phase without revealing the user's data or decryption key; (3) achieving a CCA security level; thus, our novel scheme can be applied to the scenarios with higher security level requirement. The concrete security proof and performance analysis illustrate that our novel scheme is proven secure and suitable for the mobile cloud computing environment.}, } @article {pmid32824989, year = {2020}, author = {Sarker, VK and Gia, TN and Ben Dhaou, I and Westerlund, T}, title = {Smart Parking System with Dynamic Pricing, Edge-Cloud Computing and LoRa.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {17}, pages = {}, pmid = {32824989}, issn = {1424-8220}, support = {328755//Academy of Finland/ ; }, abstract = {A rapidly growing number of vehicles in recent years cause long traffic jams and difficulty in the management of traffic in cities. One of the most significant reasons for increased traffic jams on the road is random parking in unauthorized and non-permitted places. In addition, managing of available parking places cannot achieve the expected reduction in traffic congestion related problems due to mismanagement, lack of real-time parking guidance to the drivers, and general ignorance. As the number of roads, highways and related resources has not increased significantly, a rising need for a smart, dynamic and effective parking solution is observed. Accordingly, with the use of multiple sensors, appropriate communication network and advanced processing capabilities of edge and cloud computing, a smart parking system can help manage parking effectively and make it easier for the vehicle owners. In this paper, we propose a multi-layer architecture for smart parking system consisting of multi-parametric parking slot sensor nodes, latest long-range low-power wireless communication technology and Edge-Cloud computation. The proposed system enables dynamic management of parking for large areas while providing useful information to the drivers about available parking locations and related services through near real-time monitoring of vehicles. Furthermore, we propose a dynamic pricing algorithm to yield maximum possible revenue for the parking authority and optimum parking slot availability for the drivers.}, } @article {pmid32824508, year = {2020}, author = {Nguyen, TT and Yeom, YJ and Kim, T and Park, DH and Kim, S}, title = {Horizontal Pod Autoscaling in Kubernetes for Elastic Container Orchestration.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {16}, pages = {}, pmid = {32824508}, issn = {1424-8220}, support = {NRF-2019R1F1A1059408//National Research Foundation of Korea/ ; 2018-0-00387//Institute of Information & Communications Technology Planning & Evaluation/ ; }, abstract = {Kubernetes, an open-source container orchestration platform, enables high availability and scalability through diverse autoscaling mechanisms such as Horizontal Pod Autoscaler (HPA), Vertical Pod Autoscaler and Cluster Autoscaler. Amongst them, HPA helps provide seamless service by dynamically scaling up and down the number of resource units, called pods, without having to restart the whole system. Kubernetes monitors default Resource Metrics including CPU and memory usage of host machines and their pods. On the other hand, Custom Metrics, provided by external software such as Prometheus, are customizable to monitor a wide collection of metrics. In this paper, we investigate HPA through diverse experiments to provide critical knowledge on its operational behaviors. We also discuss the essential difference between Kubernetes Resource Metrics (KRM) and Prometheus Custom Metrics (PCM) and how they affect HPA's performance. Lastly, we provide deeper insights and lessons on how to optimize the performance of HPA for researchers, developers, and system administrators working with Kubernetes in the future.}, } @article {pmid32824288, year = {2020}, author = {Yang, H and Kim, Y}, title = {Design and Implementation of Fast Fault Detection in Cloud Infrastructure for Containerized IoT Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {16}, pages = {}, pmid = {32824288}, issn = {1424-8220}, support = {2020-0-00946//Institute for Information and Communications Technology Promotion/ ; IITP-2020-2017-0-01633//Institute for Information and Communications Technology Promotion/ ; }, abstract = {The container-based cloud is used in various service infrastructures as it is lighter and more portable than a virtual machine (VM)-based infrastructure and is configurable in both bare-metal and VM environments. The Internet-of-Things (IoT) cloud-computing infrastructure is also evolving from a VM-based to a container-based infrastructure. In IoT clouds, the service availability of the cloud infrastructure is more important for mission-critical IoT services, such as real-time health monitoring, vehicle-to-vehicle (V2V) communication, and industrial IoT, than for general computing services. However, in the container environment that runs on a VM, the current fault detection method only considers the container's infra, thus limiting the level of availability necessary for the performance of mission-critical IoT cloud services. Therefore, in a container environment running on a VM, fault detection and recovery methods that consider both the VM and container levels are necessary. In this study, we analyze the fault-detection architecture in a container environment and designed and implemented a Fast Fault Detection Manager (FFDM) architecture using OpenStack and Kubernetes for realizing fast fault detection. Through performance measurements, we verified that the FFDM can improve the fault detection time by more than three times over the existing method.}, } @article {pmid32822005, year = {2021}, author = {Zhao, L and Batta, I and Matloff, W and O'Driscoll, C and Hobel, S and Toga, AW}, title = {Neuroimaging PheWAS (Phenome-Wide Association Study): A Free Cloud-Computing Platform for Big-Data, Brain-Wide Imaging Association Studies.}, journal = {Neuroinformatics}, volume = {19}, number = {2}, pages = {285-303}, pmid = {32822005}, issn = {1559-0089}, support = {P41 EB015922/EB/NIBIB NIH HHS/United States ; U54 EB020406/EB/NIBIB NIH HHS/United States ; //CIHR/Canada ; R01 MH094343/MH/NIMH NIH HHS/United States ; U01 AG024904/AG/NIA NIH HHS/United States ; P01 AG012435/AG/NIA NIH HHS/United States ; P30 AG066530/AG/NIA NIH HHS/United States ; }, mesh = {Alzheimer Disease/diagnostic imaging/genetics ; *Big Data ; Brain/*diagnostic imaging ; Case-Control Studies ; *Cloud Computing ; Genome-Wide Association Study/*methods ; Genomics/methods ; Humans ; Imaging, Three-Dimensional/methods ; Neuroimaging/*methods ; *Phenotype ; Polymorphism, Single Nucleotide/genetics ; }, abstract = {Large-scale, case-control genome-wide association studies (GWASs) have revealed genetic variations associated with diverse neurological and psychiatric disorders. Recent advances in neuroimaging and genomic databases of large healthy and diseased cohorts have empowered studies to characterize effects of the discovered genetic factors on brain structure and function, implicating neural pathways and genetic mechanisms in the underlying biology. However, the unprecedented scale and complexity of the imaging and genomic data requires new advanced biomedical data science tools to manage, process and analyze the data. In this work, we introduce Neuroimaging PheWAS (phenome-wide association study): a web-based system for searching over a wide variety of brain-wide imaging phenotypes to discover true system-level gene-brain relationships using a unified genotype-to-phenotype strategy. This design features a user-friendly graphical user interface (GUI) for anonymous data uploading, study definition and management, and interactive result visualizations as well as a cloud-based computational infrastructure and multiple state-of-art methods for statistical association analysis and multiple comparison correction. We demonstrated the potential of Neuroimaging PheWAS with a case study analyzing the influences of the apolipoprotein E (APOE) gene on various brain morphological properties across the brain in the Alzheimer's Disease Neuroimaging Initiative (ADNI) cohort. Benchmark tests were performed to evaluate the system's performance using data from UK Biobank. The Neuroimaging PheWAS system is freely available. It simplifies the execution of PheWAS on neuroimaging data and provides an opportunity for imaging genetics studies to elucidate routes at play for specific genetic variants on diseases in the context of detailed imaging phenotypic data.}, } @article {pmid32800692, year = {2020}, author = {McRoy, C and Patel, L and Gaddam, DS and Rothenberg, S and Herring, A and Hamm, J and Chelala, L and Weinstein, J and Smith, E and Awan, O}, title = {Radiology Education in the Time of COVID-19: A Novel Distance Learning Workstation Experience for Residents.}, journal = {Academic radiology}, volume = {27}, number = {10}, pages = {1467-1474}, pmid = {32800692}, issn = {1878-4046}, mesh = {*Betacoronavirus ; COVID-19 ; *Coronavirus Infections ; *Education, Distance ; *Internship and Residency ; *Pandemics ; *Pneumonia, Viral ; SARS-CoV-2 ; }, abstract = {RATIONALE AND OBJECTIVES: The coronavirus disease of 2019 (COVID-19) pandemic has challenged the educational missions of academic radiology departments nationwide. We describe a novel cloud-based HIPAA compliant and accessible education platform which simulates a live radiology workstation for continued education of first year radiology (R1) residents, with an emphasis on call preparation and peer to peer resident learning.

MATERIALS AND METHODS: Three tools were used in our education model: Pacsbin (Orion Medical Technologies, Baltimore, MD, pacsbin.com), Zoom (Zoom Video Communications, San Jose, CA, zoom.us), and Google Classroom (Google, Mountain View, CA, classroom.google.com). A senior radiology resident (R2-R4) (n = 7) driven workflow was established to provide scrollable Digital Imaging and Communications in Medicine (DICOM) based case collections to the R1 residents (n = 9) via Pacsbin. A centralized classroom was created using Google Classroom for assignments, reports, and discussion where attending radiologists could review content for accuracy. Daily case collections over an 8-week period from March to May were reviewed via Zoom video conference readout in small groups consisting of a R2-R4 teacher and R1 residents. Surveys were administered to R1 residents, R2-4 residents, and attending radiologist participants.

RESULTS: Hundred percent of R1 residents felt this model improved their confidence and knowledge to take independent call. Seventy-eight percent of the R1 residents (n = 7/9) demonstrated strong interest in continuing the project after pandemic related restrictions are lifted. Based on a Likert "helpfulness" scale of 1-5 with 5 being most helpful, the project earned an overall average rating of 4.9. Two R2-R4 teachers demonstrated increased interest in pursuing academic radiology.

CONCLUSION: In response to unique pandemic circumstances, our institution implemented a novel cloud-based distance learning solution to simulate the radiology workstation. This platform helped continue the program's educational mission, offered first year residents increased call preparation, and promoted peer to peer learning. This approach to case-based learning could be used at other institutions to educate residents.}, } @article {pmid32781671, year = {2020}, author = {D'Amico, G and L'Abbate, P and Liao, W and Yigitcanlar, T and Ioppolo, G}, title = {Understanding Sensor Cities: Insights from Technology Giant Company Driven Smart Urbanism Practices.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {16}, pages = {}, pmid = {32781671}, issn = {1424-8220}, abstract = {The data-driven approach to sustainable urban development is becoming increasingly popular among the cities across the world. This is due to cities' attention in supporting smart and sustainable urbanism practices. In an era of digitalization of urban services and processes, which is upon us, platform urbanism is becoming a fundamental tool to support smart urban governance, and helping in the formation of a new version of cities-i.e., City 4.0. This new version utilizes urban dashboards and platforms in its operations and management tasks of its complex urban metabolism. These intelligent systems help in maintaining the robustness of our cities, integrating various sensors (e.g., internet-of-things) and big data analysis technologies (e.g., artificial intelligence) with the aim of optimizing urban infrastructures and services (e.g., water, waste, energy), and turning the urban system into a smart one. The study generates insights from the sensor city best practices by placing some of renowned projects, implemented by Huawei, Cisco, Google, Ericsson, Microsoft, and Alibaba, under the microscope. The investigation findings reveal that the sensor city approach: (a) Has the potential to increase the smartness and sustainability level of cities; (b) Manages to engage citizens and companies in the process of planning, monitoring and analyzing urban processes; (c) Raises awareness on the local environmental, social and economic issues, and; (d) Provides a novel city blueprint for urban administrators, managers and planners. Nonetheless, the use of advanced technologies-e.g., real-time monitoring stations, cloud computing, surveillance cameras-poses a multitude of challenges related to: (a) Quality of the data used; (b) Level of protection of traditional and cybernetic urban security; (c) Necessary integration between the various urban infrastructure, and; (d) Ability to transform feedback from stakeholders into innovative urban policies.}, } @article {pmid32777825, year = {2020}, author = {Giménez-Alventosa, V and Segrelles, JD and Moltó, G and Roca-Sogorb, M}, title = {APRICOT: Advanced Platform for Reproducible Infrastructures in the Cloud via Open Tools.}, journal = {Methods of information in medicine}, volume = {59}, number = {S 02}, pages = {e33-e45}, pmid = {32777825}, issn = {2511-705X}, mesh = {Biological Science Disciplines ; *Cloud Computing ; Computational Biology ; Databases, Factual ; Magnetic Resonance Imaging ; Positron-Emission Tomography ; *Reproducibility of Results ; Research ; *Software ; }, abstract = {BACKGROUND: Scientific publications are meant to exchange knowledge among researchers but the inability to properly reproduce computational experiments limits the quality of scientific research. Furthermore, bibliography shows that irreproducible preclinical research exceeds 50%, which produces a huge waste of resources on nonprofitable research at Life Sciences field. As a consequence, scientific reproducibility is being fostered to promote Open Science through open databases and software tools that are typically deployed on existing computational resources. However, some computational experiments require complex virtual infrastructures, such as elastic clusters of PCs, that can be dynamically provided from multiple clouds. Obtaining these infrastructures requires not only an infrastructure provider, but also advanced knowledge in the cloud computing field.

OBJECTIVES: The main aim of this paper is to improve reproducibility in life sciences to produce better and more cost-effective research. For that purpose, our intention is to simplify the infrastructure usage and deployment for researchers.

METHODS: This paper introduces Advanced Platform for Reproducible Infrastructures in the Cloud via Open Tools (APRICOT), an open source extension for Jupyter to deploy deterministic virtual infrastructures across multiclouds for reproducible scientific computational experiments. To exemplify its utilization and how APRICOT can improve the reproduction of experiments with complex computation requirements, two examples in the field of life sciences are provided. All requirements to reproduce both experiments are disclosed within APRICOT and, therefore, can be reproduced by the users.

RESULTS: To show the capabilities of APRICOT, we have processed a real magnetic resonance image to accurately characterize a prostate cancer using a Message Passing Interface cluster deployed automatically with APRICOT. In addition, the second example shows how APRICOT scales the deployed infrastructure, according to the workload, using a batch cluster. This example consists of a multiparametric study of a positron emission tomography image reconstruction.

CONCLUSION: APRICOT's benefits are the integration of specific infrastructure deployment, the management and usage for Open Science, making experiments that involve specific computational infrastructures reproducible. All the experiment steps and details can be documented at the same Jupyter notebook which includes infrastructure specifications, data storage, experimentation execution, results gathering, and infrastructure termination. Thus, distributing the experimentation notebook and needed data should be enough to reproduce the experiment.}, } @article {pmid32765566, year = {2020}, author = {Apolo-Apolo, OE and Pérez-Ruiz, M and Martínez-Guanter, J and Valente, J}, title = {A Cloud-Based Environment for Generating Yield Estimation Maps From Apple Orchards Using UAV Imagery and a Deep Learning Technique.}, journal = {Frontiers in plant science}, volume = {11}, number = {}, pages = {1086}, pmid = {32765566}, issn = {1664-462X}, abstract = {Farmers require accurate yield estimates, since they are key to predicting the volume of stock needed at supermarkets and to organizing harvesting operations. In many cases, the yield is visually estimated by the crop producer, but this approach is not accurate or time efficient. This study presents a rapid sensing and yield estimation scheme using off-the-shelf aerial imagery and deep learning. A Region-Convolutional Neural Network was trained to detect and count the number of apple fruit on individual trees located on the orthomosaic built from images taken by the unmanned aerial vehicle (UAV). The results obtained with the proposed approach were compared with apple counts made in situ by an agrotechnician, and an R[2] value of 0.86 was acquired (MAE: 10.35 and RMSE: 13.56). As only parts of the tree fruits were visible in the top-view images, linear regression was used to estimate the number of total apples on each tree. An R[2] value of 0.80 (MAE: 128.56 and RMSE: 130.56) was obtained. With the number of fruits detected and tree coordinates two shapefile using Python script in Google Colab were generated. With the previous information two yield maps were displayed: one with information per tree and another with information per tree row. We are confident that these results will help to maximize the crop producers' outputs via optimized orchard management.}, } @article {pmid32753501, year = {2020}, author = {Petit, RA and Read, TD}, title = {Bactopia: a Flexible Pipeline for Complete Analysis of Bacterial Genomes.}, journal = {mSystems}, volume = {5}, number = {4}, pages = {}, pmid = {32753501}, issn = {2379-5077}, support = {U54 CK000485/CK/NCEZID CDC HHS/United States ; U54CK000485/ACL/ACL HHS/United States ; }, abstract = {Sequencing of bacterial genomes using Illumina technology has become such a standard procedure that often data are generated faster than can be conveniently analyzed. We created a new series of pipelines called Bactopia, built using Nextflow workflow software, to provide efficient comparative genomic analyses for bacterial species or genera. Bactopia consists of a data set setup step (Bactopia Data Sets [BaDs]), which creates a series of customizable data sets for the species of interest, the Bactopia Analysis Pipeline (BaAP), which performs quality control, genome assembly, and several other functions based on the available data sets and outputs the processed data to a structured directory format, and a series of Bactopia Tools (BaTs) that perform specific postprocessing on some or all of the processed data. BaTs include pan-genome analysis, computing average nucleotide identity between samples, extracting and profiling the 16S genes, and taxonomic classification using highly conserved genes. It is expected that the number of BaTs will increase to fill specific applications in the future. As a demonstration, we performed an analysis of 1,664 public Lactobacillus genomes, focusing on Lactobacillus crispatus, a species that is a common part of the human vaginal microbiome. Bactopia is an open source system that can scale from projects as small as one bacterial genome to ones including thousands of genomes and that allows for great flexibility in choosing comparison data sets and options for downstream analysis. Bactopia code can be accessed at https://www.github.com/bactopia/bactopiaIMPORTANCE It is now relatively easy to obtain a high-quality draft genome sequence of a bacterium, but bioinformatic analysis requires organization and optimization of multiple open source software tools. We present Bactopia, a pipeline for bacterial genome analysis, as an option for processing bacterial genome data. Bactopia also automates downloading of data from multiple public sources and species-specific customization. Because the pipeline is written in the Nextflow language, analyses can be scaled from individual genomes on a local computer to thousands of genomes using cloud resources. As a usage example, we processed 1,664 Lactobacillus genomes from public sources and used comparative analysis workflows (Bactopia Tools) to identify and analyze members of the L. crispatus species.}, } @article {pmid32751366, year = {2020}, author = {Navarro, E and Costa, N and Pereira, A}, title = {A Systematic Review of IoT Solutions for Smart Farming.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {15}, pages = {}, pmid = {32751366}, issn = {1424-8220}, mesh = {Agriculture/instrumentation/*methods ; Electronic Data Processing ; *Internet of Things ; }, abstract = {The world population growth is increasing the demand for food production. Furthermore, the reduction of the workforce in rural areas and the increase in production costs are challenges for food production nowadays. Smart farming is a farm management concept that may use Internet of Things (IoT) to overcome the current challenges of food production. This work uses the preferred reporting items for systematic reviews (PRISMA) methodology to systematically review the existing literature on smart farming with IoT. The review aims to identify the main devices, platforms, network protocols, processing data technologies and the applicability of smart farming with IoT to agriculture. The review shows an evolution in the way data is processed in recent years. Traditional approaches mostly used data in a reactive manner. In more recent approaches, however, new technological developments allowed the use of data to prevent crop problems and to improve the accuracy of crop diagnosis.}, } @article {pmid32750932, year = {2020}, author = {Ranchal, R and Bastide, P and Wang, X and Gkoulalas-Divanis, A and Mehra, M and Bakthavachalam, S and Lei, H and Mohindra, A}, title = {Disrupting Healthcare Silos: Addressing Data Volume, Velocity and Variety With a Cloud-Native Healthcare Data Ingestion Service.}, journal = {IEEE journal of biomedical and health informatics}, volume = {24}, number = {11}, pages = {3182-3188}, doi = {10.1109/JBHI.2020.3001518}, pmid = {32750932}, issn = {2168-2208}, mesh = {Big Data ; *Cloud Computing ; *Computer Security ; Delivery of Health Care ; Eating ; Humans ; }, abstract = {Healthcare enterprises are starting to adopt cloud computing due to its numerous advantages over traditional infrastructures. This has become a necessity because of the increased volume, velocity and variety of healthcare data, and the need to facilitate data correlation and large-scale analysis. Cloud computing infrastructures have the power to offer continuous acquisition of data from multiple heterogeneous sources, efficient data integration, and big data analysis. At the same time, security, availability, and disaster recovery are critical factors aiding towards the adoption of cloud computing. However, the migration of healthcare workloads to cloud is not straightforward due to the vagueness in healthcare data standards, heterogeneity and sensitive nature of healthcare data, and many regulations that govern its usage. This paper highlights the need for providing healthcare data acquisition using cloud infrastructures and presents the challenges, requirements, use-cases, and best practices for building a state-of-the-art healthcare data ingestion service on cloud.}, } @article {pmid32750051, year = {2020}, author = {Frake, AN and Peter, BG and Walker, ED and Messina, JP}, title = {Leveraging big data for public health: Mapping malaria vector suitability in Malawi with Google Earth Engine.}, journal = {PloS one}, volume = {15}, number = {8}, pages = {e0235697}, pmid = {32750051}, issn = {1932-6203}, support = {D43 TW009639/TW/FIC NIH HHS/United States ; U19 AI089683/AI/NIAID NIH HHS/United States ; }, mesh = {Animals ; Anopheles/physiology ; *Big Data ; Breeding ; Climate ; Humans ; Malaria/*epidemiology/transmission ; Malawi/epidemiology ; Mosquito Vectors/physiology ; *Public Health ; Search Engine ; Seasons ; }, abstract = {In an era of big data, the availability of satellite-derived global climate, terrain, and land cover imagery presents an opportunity for modeling the suitability of malaria disease vectors at fine spatial resolutions, across temporal scales, and over vast geographic extents. Leveraging cloud-based geospatial analytical tools, we present an environmental suitability model that considers water resources, flow accumulation areas, precipitation, temperature, vegetation, and land cover. In contrast to predictive models generated using spatially and temporally discontinuous mosquito presence information, this model provides continuous fine-spatial resolution information on the biophysical drivers of suitability. For the purposes of this study the model is parameterized for Anopheles gambiae s.s. in Malawi for the rainy (December-March) and dry seasons (April-November) in 2017; however, the model may be repurposed to accommodate different mosquito species, temporal periods, or geographical boundaries. Final products elucidate the drivers and potential habitat of Anopheles gambiae s.s. Rainy season results are presented by quartile of precipitation; Quartile four (Q4) identifies areas most likely to become inundated and shows 7.25% of Malawi exhibits suitable water conditions (water only) for Anopheles gambiae s.s., approximately 16% for water plus another factor, and 8.60% is maximally suitable, meeting suitability thresholds for water presence, terrain characteristics, and climatic conditions. Nearly 21% of Malawi is suitable for breeding based on land characteristics alone and 28.24% is suitable according to climate and land characteristics. Only 6.14% of the total land area is suboptimal. Dry season results show 25.07% of the total land area is suboptimal or unsuitable. Approximately 42% of Malawi is suitable based on land characteristics alone during the dry season, and 13.11% is suitable based on land plus another factor. Less than 2% meets suitability criteria for climate, water, and land criteria. Findings illustrate environmental drivers of suitability for malaria vectors, providing an opportunity for a more comprehensive approach to malaria control that includes not only modeled species distributions, but also the underlying drivers of suitability for a more effective approach to environmental management.}, } @article {pmid32749335, year = {2020}, author = {Kilper, DC and Peyghambarian, N}, title = {Changing evolution of optical communication systems at the network edges.}, journal = {Applied optics}, volume = {59}, number = {22}, pages = {G209-G218}, doi = {10.1364/AO.394119}, pmid = {32749335}, issn = {1539-4522}, abstract = {Metro and data center networks are growing rapidly, while global fixed Internet traffic growth shows evidence of slowing. An analysis of the distribution of network capacity versus distance reveals capacity gaps in networks important to wireless backhaul networks and cloud computing. These networks are built from layers of electronic aggregation switches. Photonic integration and software-defined networking control are identified as key enabling technologies for the use of optical switching in these applications. Advances in optical switching for data center and metro networks in the CIAN engineering research center are reviewed and examined as potential directions for optical communication system evolution.}, } @article {pmid32746308, year = {2020}, author = {Camara Gradim, LC and Archanjo Jose, M and Marinho Cezar da Cruz, D and de Deus Lopes, R}, title = {IoT Services and Applications in Rehabilitation: An Interdisciplinary and Meta-Analysis Review.}, journal = {IEEE transactions on neural systems and rehabilitation engineering : a publication of the IEEE Engineering in Medicine and Biology Society}, volume = {28}, number = {9}, pages = {2043-2052}, doi = {10.1109/TNSRE.2020.3005616}, pmid = {32746308}, issn = {1558-0210}, mesh = {Humans ; *Internet of Things ; *Wearable Electronic Devices ; }, abstract = {UNLABELLED: Internet of things (IoT) is a designation given to a technological system that can enhance possibilities of connectivity between people and things and has been showing to be an opportunity for developing and improving smart rehabilitation systems and helps in the e-Health area.

OBJECTIVE: to identify works involving IoT that deal with the development, architecture, application, implementation, use of technological equipment in the area of patient rehabilitation. Technology or Method: A systematic review based on Kitchenham's suggestions combined to the PRISMA protocol. The search strategy was carried out comprehensively in the IEEE Xplore Digital Library, Web of Science and Scopus databases with the data extraction method for assessment and analysis consist only of primary studies articles related to the IoT and Rehabilitation of patients.

RESULTS: We found 29 studies that addressed the research question, and all were classified based on scientific evidence.

CONCLUSIONS: This systematic review presents the current state of the art on the IoT in health rehabilitation and identifies findings in interdisciplinary researches in different clinical cases with technological systems including wearable devices and cloud computing. The gaps in IoT for rehabilitation include the need for more clinical randomized controlled trials and longitudinal studies. Clinical Impact: This paper has an interdisciplinary feature and includes areas such as Internet of Things Information and Communication Technology with their application to the medical and rehabilitation domains.}, } @article {pmid32731501, year = {2020}, author = {Jo, JH and Jo, B and Kim, JH and Choi, I}, title = {Implementation of IoT-Based Air Quality Monitoring System for Investigating Particulate Matter (PM10) in Subway Tunnels.}, journal = {International journal of environmental research and public health}, volume = {17}, number = {15}, pages = {}, pmid = {32731501}, issn = {1660-4601}, mesh = {Air Pollutants/*analysis ; Air Pollution/analysis/statistics & numerical data ; *Environmental Monitoring ; Particulate Matter/*analysis ; *Railroads ; Republic of Korea ; }, abstract = {Air quality monitoring for subway tunnels in South Korea is a topic of great interest because more than 8 million passengers per day use the subway, which has a concentration of particulate matter (PM10) greater than that of above ground. In this paper, an Internet of Things (IoT)-based air quality monitoring system, consisting of an air quality measurement device called Smart-Air, an IoT gateway, and a cloud computing web server, is presented to monitor the concentration of PM10 in subway tunnels. The goal of the system is to efficiently monitor air quality at any time and from anywhere by combining IoT and cloud computing technologies. This system was successfully implemented in Incheon's subway tunnels to investigate levels of PM10. The concentration of particulate matter was greatest between the morning and afternoon rush hours. In addition, the residence time of PM10 increased as the depth of the monitoring location increased. During the experimentation period, the South Korean government implemented an air quality management system. An analysis was performed to follow up after implementation and assess how the change improved conditions. Based on the experiments, the system was efficient and effective at monitoring particulate matter for improving air quality in subway tunnels.}, } @article {pmid32725321, year = {2020}, author = {Watts, P and Breedon, P and Nduka, C and Neville, C and Venables, V and Clarke, S}, title = {Cloud Computing Mobile Application for Remote Monitoring of Bell's Palsy.}, journal = {Journal of medical systems}, volume = {44}, number = {9}, pages = {149}, pmid = {32725321}, issn = {1573-689X}, support = {II-LA-0814-20008//National Institute for Health Research/ ; }, mesh = {*Bell Palsy ; Cloud Computing ; Computer Security ; Europe ; Humans ; *Mobile Applications ; }, abstract = {Mobile applications provide the healthcare industry with a means of connecting with patients in their own home utilizing their own personal mobile devices such as tablets and phones. This allows therapists to monitor the progress of people under their care from a remote location and all with the added benefit that patients are familiar with their own mobile devices; thereby reducing the time required to train patients with the new technology. There is also the added benefit to the health service that there is no additional cost required to purchase devices for use. The Facial Remote Activity Monitoring Eyewear (FRAME) mobile application and web service framework has been designed to work on the IOS and android platforms, the two most commonly used today. Results: The system utilizes secure cloud based data storage to collect, analyse and store data, this allows for near real time, secure access remotely by therapists to monitor their patients and intervene when required. The underlying framework has been designed to be secure, anonymous and flexible to ensure compliance with the data protection act and the latest General Data Protection Regulation (GDPR); this new standard came into effect in April 2018 and replaces the Data Protection Act in the UK and Europe.}, } @article {pmid32719837, year = {2020}, author = {Krissaane, I and De Niz, C and Gutiérrez-Sacristán, A and Korodi, G and Ede, N and Kumar, R and Lyons, J and Manrai, A and Patel, C and Kohane, I and Avillach, P}, title = {Scalability and cost-effectiveness analysis of whole genome-wide association studies on Google Cloud Platform and Amazon Web Services.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {27}, number = {9}, pages = {1425-1430}, pmid = {32719837}, issn = {1527-974X}, support = {K01 HL138259/HL/NHLBI NIH HHS/United States ; OT3 OD025466/OD/NIH HHS/United States ; OT3 HL142480/HL/NHLBI NIH HHS/United States ; }, mesh = {*Cloud Computing/economics ; Computer Communication Networks ; Cost-Benefit Analysis ; *Genome-Wide Association Study/economics/methods ; Genomics/methods ; Humans ; }, abstract = {OBJECTIVE: Advancements in human genomics have generated a surge of available data, fueling the growth and accessibility of databases for more comprehensive, in-depth genetic studies.

METHODS: We provide a straightforward and innovative methodology to optimize cloud configuration in order to conduct genome-wide association studies. We utilized Spark clusters on both Google Cloud Platform and Amazon Web Services, as well as Hail (http://doi.org/10.5281/zenodo.2646680) for analysis and exploration of genomic variants dataset.

RESULTS: Comparative evaluation of numerous cloud-based cluster configurations demonstrate a successful and unprecedented compromise between speed and cost for performing genome-wide association studies on 4 distinct whole-genome sequencing datasets. Results are consistent across the 2 cloud providers and could be highly useful for accelerating research in genetics.

CONCLUSIONS: We present a timely piece for one of the most frequently asked questions when moving to the cloud: what is the trade-off between speed and cost?}, } @article {pmid32719530, year = {2020}, author = {Li, B and Gould, J and Yang, Y and Sarkizova, S and Tabaka, M and Ashenberg, O and Rosen, Y and Slyper, M and Kowalczyk, MS and Villani, AC and Tickle, T and Hacohen, N and Rozenblatt-Rosen, O and Regev, A}, title = {Cumulus provides cloud-based data analysis for large-scale single-cell and single-nucleus RNA-seq.}, journal = {Nature methods}, volume = {17}, number = {8}, pages = {793-798}, pmid = {32719530}, issn = {1548-7105}, support = {/HHMI/Howard Hughes Medical Institute/United States ; T32 HG002295/HG/NHGRI NIH HHS/United States ; RC2 DK116691/DK/NIDDK NIH HHS/United States ; RM1 HG006193/HG/NHGRI NIH HHS/United States ; T32 CA207021/CA/NCI NIH HHS/United States ; }, mesh = {Cloud Computing/*economics ; Computational Biology/economics/*methods ; High-Throughput Nucleotide Sequencing/economics/*methods ; Sequence Analysis, RNA/economics/*methods ; Single-Cell Analysis/*methods ; }, abstract = {Massively parallel single-cell and single-nucleus RNA sequencing has opened the way to systematic tissue atlases in health and disease, but as the scale of data generation is growing, so is the need for computational pipelines for scaled analysis. Here we developed Cumulus-a cloud-based framework for analyzing large-scale single-cell and single-nucleus RNA sequencing datasets. Cumulus combines the power of cloud computing with improvements in algorithm and implementation to achieve high scalability, low cost, user-friendliness and integrated support for a comprehensive set of features. We benchmark Cumulus on the Human Cell Atlas Census of Immune Cells dataset of bone marrow cells and show that it substantially improves efficiency over conventional frameworks, while maintaining or improving the quality of results, enabling large-scale studies.}, } @article {pmid32707801, year = {2020}, author = {Song, Y and Zhu, Y and Nan, T and Hou, J and Du, S and Song, S}, title = {Accelerating Faceting Wide-Field Imaging Algorithm with FPGA for SKA Radio Telescope as a Vast Sensor Array.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {15}, pages = {}, pmid = {32707801}, issn = {1424-8220}, support = {U1831118//the National Natural Science Foundation of China/ ; }, abstract = {The SKA (Square Kilometer Array) radio telescope will become the most sensitive telescope by correlating a huge number of antenna nodes to form a vast array of sensors in a region over one hundred kilometers. Faceting, the wide-field imaging algorithm, is a novel approach towards solving image construction from sensing data where earth surface curves cannot be ignored. However, the traditional processor of cloud computing, even if the most sophisticated supercomputer is used, cannot meet the extremely high computation performance requirement. In this paper, we propose the design and implementation of high-efficiency FPGA (Field Programmable Gate Array) -based hardware acceleration of the key algorithm, faceting in SKA by focusing on phase rotation and gridding, which are the most time-consuming phases in the faceting algorithm. Through the analysis of algorithm behavior and bottleneck, we design and optimize the memory architecture and computing logic of the FPGA-based accelerator. The simulation and tests on FPGA are done to confirm the acceleration result of our design and it is shown that the acceleration performance we achieved on phase rotation is 20× the result of the previous work. We then further designed and optimized an efficient microstructure of loop unrolling and pipeline for the gridding accelerator, and the designed system simulation was done to confirm the performance of our structure. The result shows that the acceleration ratio is 5.48 compared to the result tested on software in gridding parts. Hence, our approach enables efficient acceleration of the faceting algorithm on FPGAs with high performance to meet the computational constraints of SKA as a representative vast sensor array.}, } @article {pmid32706696, year = {2020}, author = {Saarikko, J and Niela-Vilen, H and Ekholm, E and Hamari, L and Azimi, I and Liljeberg, P and Rahmani, AM and Löyttyniemi, E and Axelin, A}, title = {Continuous 7-Month Internet of Things-Based Monitoring of Health Parameters of Pregnant and Postpartum Women: Prospective Observational Feasibility Study.}, journal = {JMIR formative research}, volume = {4}, number = {7}, pages = {e12417}, pmid = {32706696}, issn = {2561-326X}, abstract = {BACKGROUND: Monitoring during pregnancy is vital to ensure the mother's and infant's health. Remote continuous monitoring provides health care professionals with significant opportunities to observe health-related parameters in their patients and to detect any pathological signs at an early stage of pregnancy, and may thus partially replace traditional appointments.

OBJECTIVE: This study aimed to evaluate the feasibility of continuously monitoring the health parameters (physical activity, sleep, and heart rate) of nulliparous women throughout pregnancy and until 1 month postpartum, with a smart wristband and an Internet of Things (IoT)-based monitoring system.

METHODS: This prospective observational feasibility study used a convenience sample of 20 nulliparous women from the Hospital District of Southwest Finland. Continuous monitoring of physical activity/step counts, sleep, and heart rate was performed with a smart wristband for 24 hours a day, 7 days a week over 7 months (6 months during pregnancy and 1 month postpartum). The smart wristband was connected to a cloud server. The total number of possible monitoring days during pregnancy weeks 13 to 42 was 203 days and 28 days in the postpartum period.

RESULTS: Valid physical activity data were available for a median of 144 (range 13-188) days (75% of possible monitoring days), and valid sleep data were available for a median of 137 (range 0-184) days (72% of possible monitoring days) per participant during pregnancy. During the postpartum period, a median of 15 (range 0-25) days (54% of possible monitoring days) of valid physical activity data and 16 (range 0-27) days (57% of possible monitoring days) of valid sleep data were available. Physical activity decreased from the second trimester to the third trimester by a mean of 1793 (95% CI 1039-2548) steps per day (P<.001). The decrease continued by a mean of 1339 (95% CI 474-2205) steps to the postpartum period (P=.004). Sleep during pregnancy also decreased from the second trimester to the third trimester by a mean of 20 minutes (95% CI -0.7 to 42 minutes; P=.06) and sleep time shortened an additional 1 hour (95% CI 39 minutes to 1.5 hours) after delivery (P<.001). The mean resting heart rate increased toward the third trimester and returned to the early pregnancy level during the postpartum period.

CONCLUSIONS: The smart wristband with IoT technology was a feasible system for collecting representative data on continuous variables of health parameters during pregnancy. Continuous monitoring provides real-time information between scheduled appointments and thus may help target and tailor pregnancy follow-up.}, } @article {pmid32704048, year = {2020}, author = {Szamosfalvi, B and Yessayan, L}, title = {Innovations in CKRT: individualized therapy with fewer complications.}, journal = {Nature reviews. Nephrology}, volume = {16}, number = {10}, pages = {560-561}, pmid = {32704048}, issn = {1759-507X}, mesh = {Acute Kidney Injury/therapy ; Blood Coagulation Disorders/prevention & control ; Cloud Computing ; Continuous Renal Replacement Therapy/adverse effects/instrumentation/*methods ; Extracorporeal Membrane Oxygenation/instrumentation/methods ; Humans ; Inventions ; Monitoring, Physiologic/methods ; Precision Medicine/adverse effects/instrumentation/methods ; }, abstract = {Continuous kidney replacement therapy (CKRT) can be a lifesaving intervention for critically ill patients; however, mortality remains high. The adaptation of existing innovations, including anti-clotting measures; cloud-computing for optimized treatment prescribing and therapy monitoring; and real-time sensing of blood and/or filter effluent composition to CKRT devices has the potential to enable personalized care and improve the safety and efficacy of this therapy.}, } @article {pmid32679671, year = {2020}, author = {Jabbar, R and Kharbeche, M and Al-Khalifa, K and Krichen, M and Barkaoui, K}, title = {Blockchain for the Internet of Vehicles: A Decentralized IoT Solution for Vehicles Communication Using Ethereum.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {14}, pages = {}, pmid = {32679671}, issn = {1424-8220}, support = {NPRP8-910-2-387//Qatar National Research Fund/ ; }, abstract = {The concept of smart cities has become prominent in modern metropolises due to the emergence of embedded and connected smart devices, systems, and technologies. They have enabled the connection of every "thing" to the Internet. Therefore, in the upcoming era of the Internet of Things, the Internet of Vehicles (IoV) will play a crucial role in newly developed smart cities. The IoV has the potential to solve various traffic and road safety problems effectively in order to prevent fatal crashes. However, a particular challenge in the IoV, especially in Vehicle-to-Vehicle (V2V) and Vehicle-to-Infrastructure (V2I) communications, is to ensure fast, secure transmission and accurate recording of the data. In order to overcome these challenges, this work is adapting Blockchain technology for real time application (RTA) to solve Vehicle-to-Everything (V2X) communications problems. Therefore, the main novelty of this paper is to develop a Blockchain-based IoT system in order to establish secure communication and create an entirely decentralized cloud computing platform. Moreover, the authors qualitatively tested the performance and resilience of the proposed system against common security attacks. Computational tests showed that the proposed solution solved the main challenges of Vehicle-to-X (V2X) communications such as security, centralization, and lack of privacy. In addition, it guaranteed an easy data exchange between different actors of intelligent transportation systems.}, } @article {pmid32679583, year = {2021}, author = {Onnela, JP}, title = {Opportunities and challenges in the collection and analysis of digital phenotyping data.}, journal = {Neuropsychopharmacology : official publication of the American College of Neuropsychopharmacology}, volume = {46}, number = {1}, pages = {45-54}, pmid = {32679583}, issn = {1740-634X}, mesh = {Data Collection ; *Machine Learning ; Phenotype ; Research Design ; *Smartphone ; }, abstract = {The broad adoption and use of smartphones has led to fundamentally new opportunities for capturing social, behavioral, and cognitive phenotypes in free-living settings, outside of research laboratories and clinics. Predicated on the use of existing personal devices rather than the introduction of additional instrumentation, smartphone-based digital phenotyping presents us with several opportunities and challenges in data collection and data analysis. These two aspects are strongly coupled, because decisions about what data to collect and how to collect it constrain what statistical analyses can be carried out, now and years later, and therefore ultimately determine what scientific, clinical, and public health questions may be asked and answered. Digital phenotyping combines the excitement of fast-paced technologies, smartphones, cloud computing and machine learning, with deep mathematical and statistical questions, and it does this in the service of a better understanding our own behavior in ways that are objective, scalable, and reproducible. We will discuss some fundamental aspects of collection and analysis of digital phenotyping data, which takes us on a brief tour of several important scientific and technological concepts, from the open-source paradigm to computational complexity, with some unexpected insights provided by fields as varied as zoology and quantum mechanics.}, } @article {pmid34976554, year = {2020}, author = {Hussain, AA and Bouachir, O and Al-Turjman, F and Aloqaily, M}, title = {AI Techniques for COVID-19.}, journal = {IEEE access : practical innovations, open solutions}, volume = {8}, number = {}, pages = {128776-128795}, pmid = {34976554}, issn = {2169-3536}, abstract = {Artificial Intelligence (AI) intent is to facilitate human limits. It is getting a standpoint on human administrations, filled by the growing availability of restorative clinical data and quick progression of insightful strategies. Motivated by the need to highlight the need for employing AI in battling the COVID-19 Crisis, this survey summarizes the current state of AI applications in clinical administrations while battling COVID-19. Furthermore, we highlight the application of Big Data while understanding this virus. We also overview various intelligence techniques and methods that can be applied to various types of medical information-based pandemic. We classify the existing AI techniques in clinical data analysis, including neural systems, classical SVM, and edge significant learning. Also, an emphasis has been made on regions that utilize AI-oriented cloud computing in combating various similar viruses to COVID-19. This survey study is an attempt to benefit medical practitioners and medical researchers in overpowering their faced difficulties while handling COVID-19 big data. The investigated techniques put forth advances in medical data analysis with an exactness of up to 90%. We further end up with a detailed discussion about how AI implementation can be a huge advantage in combating various similar viruses.}, } @article {pmid35662897, year = {2020}, author = {Shao, D and Kellogg, G and Mahony, S and Lai, W and Pugh, BF}, title = {PEGR: a management platform for ChIP-based next generation sequencing pipelines.}, journal = {PEARC20 : Practice and Experience in Advanced Research Computing 2020 : Catch the wave : July 27-31, 2020, Portland, Or Virtual Conference. Practice and Experience in Advanced Research Computing (Conference) (2020 : Online)}, volume = {2020}, number = {}, pages = {285-292}, pmid = {35662897}, support = {R01 ES013768/ES/NIEHS NIH HHS/United States ; R01 GM125722/GM/NIGMS NIH HHS/United States ; }, abstract = {There has been a rapid development in genome sequencing, including high-throughput next generation sequencing (NGS) technologies, automation in biological experiments, new bioinformatics tools and utilization of high-performance computing and cloud computing. ChIP-based NGS technologies, e.g. ChIP-seq and ChIP-exo, are widely used to detect the binding sites of DNA-interacting proteins in the genome and help us to have a deeper mechanistic understanding of genomic regulation. As sequencing data is generated at an unprecedented pace from the ChIP-based NGS pipelines, there is an urgent need for a metadata management system. To meet this need, we developed the Platform for Eukaryotic Genomic Regulation (PEGR), a web service platform that logs metadata for samples and sequencing experiments, manages the data processing workflows, and provides reporting and visualization. PEGR links together people, samples, protocols, DNA sequencers and bioinformatics computation. With the help of PEGR, scientists can have a more integrated understanding of the sequencing data and better understand the scientific mechanisms of genomic regulation. In this paper, we present the architecture and the major functionalities of PEGR. We also share our experience in developing this application and discuss the future directions.}, } @article {pmid35615582, year = {2020}, author = {Choi, IK and Abeysinghe, E and Coulter, E and Marru, S and Pierce, M and Liu, X}, title = {TopPIC Gateway: A Web Gateway for Top-Down Mass Spectrometry Data Interpretation.}, journal = {PEARC20 : Practice and Experience in Advanced Research Computing 2020 : Catch the wave : July 27-31, 2020, Portland, Or Virtual Conference. Practice and Experience in Advanced Research Computing (Conference) (2020 : Online)}, volume = {2020}, number = {}, pages = {461-464}, pmid = {35615582}, support = {R01 GM118470/GM/NIGMS NIH HHS/United States ; U54 AG065181/AG/NIA NIH HHS/United States ; }, abstract = {Top-down mass spectrometry-based proteomics has become the method of choice for identifying and quantifying intact proteoforms in biological samples. We present a web-based gateway for TopPIC suite, a widely used software suite consisting of four software tools for top-down mass spectrometry data interpretation: TopFD, TopPIC, TopMG, and TopDiff. The gateway enables the community to use heterogeneous collection of computing resources that includes high performance computing clusters at Indiana University and virtual clusters on XSEDE's Jetstream Cloud resource for top-down mass spectral data analysis using TopPIC suite. The gateway will be a useful resource for proteomics researchers and students who have limited access to high-performance computing resources or who are not familiar with interacting with server-side supercomputers.}, } @article {pmid35098264, year = {2020}, author = {Sivagnanam, S and Gorman, W and Doherty, D and Neymotin, SA and Fang, S and Hovhannisyan, H and Lytton, WW and Dura-Bernal, S}, title = {Simulating Large-scale Models of Brain Neuronal Circuits using Google Cloud Platform.}, journal = {PEARC20 : Practice and Experience in Advanced Research Computing 2020 : Catch the wave : July 27-31, 2020, Portland, Or Virtual Conference. Practice and Experience in Advanced Research Computing (Conference) (2020 : Online)}, volume = {2020}, number = {}, pages = {505-509}, pmid = {35098264}, support = {R01 DC012947/DC/NIDCD NIH HHS/United States ; U01 EB017695/EB/NIBIB NIH HHS/United States ; U24 EB028998/EB/NIBIB NIH HHS/United States ; }, abstract = {Biophysically detailed modeling provides an unmatched method to integrate data from many disparate experimental studies, and manipulate and explore with high precision the resultin brain circuit simulation. We developed a detailed model of the brain motor cortex circuits, simulating over 10,000 biophysically detailed neurons and 30 million synaptic connections. Optimization and evaluation of the cortical model parameters and responses was achieved via parameter exploration using grid search parameter sweeps and evolutionary algorithms. This involves running tens of thousands of simulations requiring significant computational resources. This paper describes our experience in setting up and using Google Compute Platform (GCP) with Slurm to run these large-scale simulations. We describe the best practices and solutions to the issues that arose during the process, and present preliminary results from running simulations on GCP.}, } @article {pmid34336373, year = {2020}, author = {Qu, X and Wu, Y and Liu, J and Cui, L}, title = {HRV-Spark: Computing Heart Rate Variability Measures Using Apache Spark.}, journal = {Proceedings. IEEE International Conference on Bioinformatics and Biomedicine}, volume = {2020}, number = {}, pages = {}, pmid = {34336373}, issn = {2156-1125}, support = {R01 NS116287/NS/NINDS NIH HHS/United States ; }, abstract = {Heart rate variability (HRV) analysis has been serving as a significant promising marker in clinical research over the last few decades. The rapidly growing heart rate data generated from various devices, particularly the electrocardiograph (ECG), need to be stored properly and processed timely. There is a pressing need to develop efficient approaches for performing HRV analyses based on ECG signals. In this paper, we introduce a cloud computing approach (called HRV-Spark) to compute HRV measures in parallel by leveraging Apache Spark and a QRS detection algorithm in [1]. We ran HRV-Spark on Amazon Web Services (AWS) clusters using large-scale datasets in the National Sleep Research Resource. We evaluated the performance and scalability of HRV-Spark in terms of the number of computing nodes in the AWS cluster, the size of the input datasets, and the hardware configuration of the computing nodes. The results show that HRV-Spark is an efficient and scalable approach for computing HRV measures.}, } @article {pmid33693367, year = {2019}, author = {Werner, M}, title = {Parallel Processing Strategies for Big Geospatial Data.}, journal = {Frontiers in big data}, volume = {2}, number = {}, pages = {44}, pmid = {33693367}, issn = {2624-909X}, abstract = {This paper provides an abstract analysis of parallel processing strategies for spatial and spatio-temporal data. It isolates aspects such as data locality and computational locality as well as redundancy and locally sequential access as central elements of parallel algorithm design for spatial data. Furthermore, the paper gives some examples from simple and advanced GIS and spatial data analysis highlighting both that big data systems have been around long before the current hype of big data and that they follow some design principles which are inevitable for spatial data including distributed data structures and messaging, which are, however, incompatible with the popular MapReduce paradigm. Throughout this discussion, the need for a replacement or extension of the MapReduce paradigm for spatial data is derived. This paradigm should be able to deal with the imperfect data locality inherent to spatial data hindering full independence of non-trivial computational tasks. We conclude that more research is needed and that spatial big data systems should pick up more concepts like graphs, shortest paths, raster data, events, and streams at the same time instead of solving exactly the set of spatially separable problems such as line simplifications or range queries in manydifferent ways.}, } @article {pmid33816885, year = {2019}, author = {Capuccini, M and Larsson, A and Carone, M and Novella, JA and Sadawi, N and Gao, J and Toor, S and Spjuth, O}, title = {On-demand virtual research environments using microservices.}, journal = {PeerJ. Computer science}, volume = {5}, number = {}, pages = {e232}, pmid = {33816885}, issn = {2376-5992}, abstract = {The computational demands for scientific applications are continuously increasing. The emergence of cloud computing has enabled on-demand resource allocation. However, relying solely on infrastructure as a service does not achieve the degree of flexibility required by the scientific community. Here we present a microservice-oriented methodology, where scientific applications run in a distributed orchestration platform as software containers, referred to as on-demand, virtual research environments. The methodology is vendor agnostic and we provide an open source implementation that supports the major cloud providers, offering scalable management of scientific pipelines. We demonstrate applicability and scalability of our methodology in life science applications, but the methodology is general and can be applied to other scientific domains.}, } @article {pmid34104428, year = {2019}, author = {Schooley, B and San Nicolas-Rocca, T and Burkhard, R}, title = {Cloud-based multi-media systems for patient education and adherence: a pilot study to explore patient compliance with colonoscopy procedure preparation.}, journal = {Health systems (Basingstoke, England)}, volume = {10}, number = {2}, pages = {89-103}, pmid = {34104428}, issn = {2047-6965}, abstract = {Technology based patient education and adherence approaches are increasingly utilized to instruct and remind patients to prepare correctly for medical procedures. This study examines the interaction between two primary factors: patterns of patient adherence to challenging medical preparation procedures; and the demonstrated, measurable potential for cloud-based multi-media information technology (IT) interventions to improve patient adherence. An IT artifact was developed through prior design science research to serve information, reminders, and online video instruction modules to patients. The application was tested with 297 patients who were assessed clinically by physicians. Results indicate modest potential (43.4% relative improvement) for the IT-based approach for improving patient adherence to endoscopy preparations. Purposively designed cloud-based applications hold promise for aiding patients with complex medical procedure preparation. Health care provider involvement in the design and evaluation of a patient application may be an effective strategy to produce medical evidence and encourage the adoption of adherence apps.}, } @article {pmid33816864, year = {2019}, author = {Khani, H and Khanmirza, H}, title = {Randomized routing of virtual machines in IaaS data centers.}, journal = {PeerJ. Computer science}, volume = {5}, number = {}, pages = {e211}, pmid = {33816864}, issn = {2376-5992}, abstract = {Cloud computing technology has been a game changer in recent years. Cloud computing providers promise cost-effective and on-demand resource computing for their users. Cloud computing providers are running the workloads of users as virtual machines (VMs) in a large-scale data center consisting a few thousands physical servers. Cloud data centers face highly dynamic workloads varying over time and many short tasks that demand quick resource management decisions. These data centers are large scale and the behavior of workload is unpredictable. The incoming VM must be assigned onto the proper physical machine (PM) in order to keep a balance between power consumption and quality of service. The scale and agility of cloud computing data centers are unprecedented so the previous approaches are fruitless. We suggest an analytical model for cloud computing data centers when the number of PMs in the data center is large. In particular, we focus on the assignment of VM onto PMs regardless of their current load. For exponential VM arrival with general distribution sojourn time, the mean power consumption is calculated. Then, we show the minimum power consumption under quality of service constraint will be achieved with randomize assignment of incoming VMs onto PMs. Extensive simulation supports the validity of our analytical model.}, } @article {pmid33323271, year = {2019}, author = {Faes, L and Wagner, SK and Fu, DJ and Liu, X and Korot, E and Ledsam, JR and Back, T and Chopra, R and Pontikos, N and Kern, C and Moraes, G and Schmid, MK and Sim, D and Balaskas, K and Bachmann, LM and Denniston, AK and Keane, PA}, title = {Automated deep learning design for medical image classification by health-care professionals with no coding experience: a feasibility study.}, journal = {The Lancet. Digital health}, volume = {1}, number = {5}, pages = {e232-e242}, doi = {10.1016/S2589-7500(19)30108-6}, pmid = {33323271}, issn = {2589-7500}, support = {CS-2014-14-023/DH_/Department of Health/United Kingdom ; MC_PC_19005/MRC_/Medical Research Council/United Kingdom ; NIHR-CS-2014-14-023/DH_/Department of Health/United Kingdom ; }, mesh = {Adult ; *Algorithms ; *Data Interpretation, Statistical ; *Deep Learning ; Feasibility Studies ; Fundus Oculi ; Humans ; Skin Neoplasms/diagnosis ; *Software ; Tomography, Optical Coherence/statistics & numerical data ; }, abstract = {BACKGROUND: Deep learning has the potential to transform health care; however, substantial expertise is required to train such models. We sought to evaluate the utility of automated deep learning software to develop medical image diagnostic classifiers by health-care professionals with no coding-and no deep learning-expertise.

METHODS: We used five publicly available open-source datasets: retinal fundus images (MESSIDOR); optical coherence tomography (OCT) images (Guangzhou Medical University and Shiley Eye Institute, version 3); images of skin lesions (Human Against Machine [HAM] 10000), and both paediatric and adult chest x-ray (CXR) images (Guangzhou Medical University and Shiley Eye Institute, version 3 and the National Institute of Health [NIH] dataset, respectively) to separately feed into a neural architecture search framework, hosted through Google Cloud AutoML, that automatically developed a deep learning architecture to classify common diseases. Sensitivity (recall), specificity, and positive predictive value (precision) were used to evaluate the diagnostic properties of the models. The discriminative performance was assessed using the area under the precision recall curve (AUPRC). In the case of the deep learning model developed on a subset of the HAM10000 dataset, we did external validation using the Edinburgh Dermofit Library dataset.

FINDINGS: Diagnostic properties and discriminative performance from internal validations were high in the binary classification tasks (sensitivity 73·3-97·0%; specificity 67-100%; AUPRC 0·87-1·00). In the multiple classification tasks, the diagnostic properties ranged from 38% to 100% for sensitivity and from 67% to 100% for specificity. The discriminative performance in terms of AUPRC ranged from 0·57 to 1·00 in the five automated deep learning models. In an external validation using the Edinburgh Dermofit Library dataset, the automated deep learning model showed an AUPRC of 0·47, with a sensitivity of 49% and a positive predictive value of 52%.

INTERPRETATION: All models, except the automated deep learning model trained on the multilabel classification task of the NIH CXR14 dataset, showed comparable discriminative performance and diagnostic properties to state-of-the-art performing deep learning algorithms. The performance in the external validation study was low. The quality of the open-access datasets (including insufficient information about patient flow and demographics) and the absence of measurement for precision, such as confidence intervals, constituted the major limitations of this study. The availability of automated deep learning platforms provide an opportunity for the medical community to enhance their understanding in model development and evaluation. Although the derivation of classification models without requiring a deep understanding of the mathematical, statistical, and programming principles is attractive, comparable performance to expertly designed models is limited to more elementary classification tasks. Furthermore, care should be placed in adhering to ethical principles when using these automated models to avoid discrimination and causing harm. Future studies should compare several application programming interfaces on thoroughly curated datasets.

FUNDING: National Institute for Health Research and Moorfields Eye Charity.}, } @article {pmid33267339, year = {2019}, author = {Li, J and Liang, X and Dai, C and Xiang, S}, title = {Reversible Data Hiding Algorithm in Fully Homomorphic Encrypted Domain.}, journal = {Entropy (Basel, Switzerland)}, volume = {21}, number = {7}, pages = {}, pmid = {33267339}, issn = {1099-4300}, support = {61772234//National Natural Science Foundation of China/ ; }, abstract = {This paper proposes a reversible data hiding scheme by exploiting the DGHV fully homomorphic encryption, and analyzes the feasibility of the scheme for data hiding from the perspective of information entropy. In the proposed algorithm, additional data can be embedded directly into a DGHV fully homomorphic encrypted image without any preprocessing. On the sending side, by using two encrypted pixels as a group, a data hider can get the difference of two pixels in a group. Additional data can be embedded into the encrypted image by shifting the histogram of the differences with the fully homomorphic property. On the receiver side, a legal user can extract the additional data by getting the difference histogram, and the original image can be restored by using modular arithmetic. Besides, the additional data can be extracted after decryption while the original image can be restored. Compared with the previous two typical algorithms, the proposed scheme can effectively avoid preprocessing operations before encryption and can successfully embed and extract additional data in the encrypted domain. The extensive testing results on the standard images have certified the effectiveness of the proposed scheme.}, } @article {pmid33267176, year = {2019}, author = {Tilei, G and Tong, L and Ming, Y and Rong, J}, title = {Research on a Trustworthiness Measurement Method of Cloud Service Construction Processes Based on Information Entropy.}, journal = {Entropy (Basel, Switzerland)}, volume = {21}, number = {5}, pages = {}, pmid = {33267176}, issn = {1099-4300}, abstract = {The popularity of cloud computing has made cloud services gradually become the leading computing model nowadays. The trustworthiness of cloud services depends mainly on construction processes. The trustworthiness measurement of cloud service construction processes (CSCPs) is crucial for cloud service developers. It can help to find out the causes of failures and to improve the development process, thereby ensuring the quality of cloud service. Herein, firstly, a trustworthiness hierarchy model of CSCP was proposed, and the influential factors of the processes were identified following the international standard ISO/IEC 12207 of the software development process.Further, a method was developed combined with the theory of information entropy and the concept of trustworthiness. It aimed to calculate the risk uncertainty and risk loss expectation affecting trustworthiness. Also, the trustworthiness of cloud service and its main construction processes were calculated. Finally, the feasibility of the measurement method were verified through a case study, and through comparing with AHP and CMM/CMMI methods, the advantages of this method were embodied.}, } @article {pmid34366521, year = {2019}, author = {Afgan, E and Lonie, A and Taylor, J and Goonasekera, N}, title = {CloudLaunch: Discover and Deploy Cloud Applications.}, journal = {Future generations computer systems : FGCS}, volume = {94}, number = {}, pages = {802-810}, pmid = {34366521}, issn = {0167-739X}, support = {MR/L015080/1/MRC_/Medical Research Council/United Kingdom ; U01 CA184826/CA/NCI NIH HHS/United States ; U24 HG010263/HG/NHGRI NIH HHS/United States ; U41 HG006620/HG/NHGRI NIH HHS/United States ; }, abstract = {Cloud computing is a common platform for delivering software to end users. However, the process of making complex-to-deploy applications available across different cloud providers requires isolated and uncoordinated application-specific solutions, often locking-in developers to a particular cloud provider. Here, we present the CloudLaunch application as a uniform platform for discovering and deploying applications for different cloud providers. CloudLaunch allows arbitrary applications to be added to a catalog with each application having its own customizable user interface and control over the launch process, while preserving cloud-agnosticism so that authors can easily make their applications available on multiple clouds with minimal effort. It then provides a uniform interface for launching available applications by end users across different cloud providers. Architecture details are presented along with examples of different deployable applications that highlight architectural features.}, } @article {pmid34384573, year = {2019}, author = {Park, SY and Nanda, S and Faraci, G and Park, Y and Lee, HY}, title = {CCMP: Software-as-a-service approach for fully-automated microbiome profiling.}, journal = {Journal of biomedical informatics}, volume = {100S}, number = {}, pages = {100040}, doi = {10.1016/j.yjbinx.2019.100040}, pmid = {34384573}, issn = {1532-0480}, abstract = {Microbiome profiling holds great promise for the development of novel disease biomarkers and therapeutics. Next-generation sequencing is currently the preferred method for microbiome data collection and multiple standardized tools, packages, and pipelines have been developed for the purpose of raw data processing and microbial annotation. However, these currently available pipelines come with entry-level barriers such as high-performance hardware, software installation, and sequential command-line scripting that often deter end-users. We thus created Cloud Computing for Microbiome Profiling (CCMP, https://ccmp.usc.edu), a public cloud-based web tool which combines the analytical power of current microbiome analysis platforms with a user-friendly interface. CCMP is a free-of-charge software-as-a-service (SaaS) that simplifies user experience by enabling users to complete their analysis in a single step, uploading raw sequencing data files. Once users upload 16S ribosomal RNA gene sequence data, our pipeline performs taxonomic annotation, abundance profiling, and statistical tests to report microbiota signatures altered by diseases or experimental conditions. CCMP took a 125 gigabyte (GB) input of 16S ribosomal RNA gene sequence data from 1052 specimens in FASTQ format and reported figures and tables of taxonomic annotations, statistical tests, α and β diversity calculations, and principal coordinate analyses within 21 h. CCMP is the first fully-automated web interface that integrates three key solutions for large-scale data analysis: cloud computing, fast file transfer technology, and microbiome analysis tools. As a reliable platform that supplies consistent microbiome analysis, CCMP will advance microbiome research by making effortful bioinformatics easily accessible to public.}, } @article {pmid33490308, year = {2019}, author = {Chen, M and Miao, Y and Gharavi, H and Hu, L and Humar, I}, title = {Intelligent Traffic Adaptive Resource Allocation for Edge Computing-based 5G Networks.}, journal = {IEEE transactions on cognitive communications and networking}, volume = {6}, number = {2}, pages = {}, pmid = {33490308}, issn = {2332-7731}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {The popularity of smart mobile devices has led to a tremendous increase in mobile traffic, which has put a considerable strain on the fifth generation of mobile communication networks (5G). Among the three application scenarios covered by 5G, ultra-high reliability and ultra-low latency (uRLLC) communication can best be realized with the assistance of artificial intelligence. For a combined 5G, edge computing and IoT-Cloud (a platform that integrates the Internet of Things and cloud) in particular, there remains many challenges to meet the uRLLC latency and reliability requirements despite a tremendous effort to develop smart data-driven methods. Therefore, this paper mainly focuses on artificial intelligence for controlling mobile-traffic flow. In our approach, we first develop a traffic-flow prediction algorithm that is based on long short-term memory (LSTM) with an attention mechanism to train mobile-traffic data in single-site mode. The algorithm is capable of effectively predicting the peak value of the traffic flow. For a multi-site case, we present an intelligent IoT-based mobile traffic prediction-and-control architecture capable of dynamically dispatching communication and computing resources. In our experiments, we demonstrate the effectiveness of the proposed scheme in reducing communication latency and its impact on lowering packet-loss ratio. Finally, we present future work and discuss some of the open issues.}, } @article {pmid33013153, year = {2019}, author = {Kunicki, ZJ and Zambrotta, NS and Tate, MC and Surrusco, AR and Risi, MM and Harlow, LL}, title = {Keep Your Stats in the Cloud! Evaluating the Use of Google Sheets to Teach Quantitative Methods.}, journal = {Journal of statistics education : an international journal on the teaching and learning of statistics}, volume = {27}, number = {3}, pages = {188-197}, pmid = {33013153}, issn = {1069-1898}, support = {G20 RR030883/RR/NCRR NIH HHS/United States ; }, abstract = {Teaching quantitative methods at the undergraduate level is a difficult yet rewarding endeavor due to the challenges instructors face in presenting the material. One way to bolster student learning is through the use of statistical software packages. Google Sheets is a cloud-based spreadsheet program capable of many basic statistical procedures, which has yet to be evaluated for use in quantitative methods courses. This article contains pros and cons to using Google Sheets in the classroom, and provides an evaluation of student attitudes towards using Google Sheets in an introductory quantitative methods class. The results suggest favorable student attitudes towards Google Sheets and, that attitudes towards Google Sheets show a positive relationship with quantitative self-efficacy. Thus, based on the positive student attitudes and the unique features of Google Sheets, it is a viable program to use in introductory methods classes. However, due to limited functionality, Google Sheets may not be useful for more advanced courses. Future research may want to evaluate the use of third-party Google Sheets applications, which can increase functionality, and the use of Google Sheets in online classes.}, } @article {pmid35095187, year = {2018}, author = {Thorsen, TJ and Kato, S and Loeb, NG and Rose, FG}, title = {Observation-Based Decomposition of Radiative Perturbations and Radiative Kernels.}, journal = {Journal of climate}, volume = {31}, number = {24}, pages = {10039-10058}, pmid = {35095187}, issn = {0894-8755}, support = {/SCMD-EarthScienceSystem/Science Earth Science System NASA/United States ; }, abstract = {The Clouds and the Earth's Radiant Energy System (CERES)-partial radiative perturbation [PRP (CERES-PRP)] methodology applies partial-radiative-perturbation-like calculations to observational datasets to directly isolate the individual cloud, atmospheric, and surface property contributions to the variability of the radiation budget. The results of these calculations can further be used to construct radiative kernels. A suite of monthly mean observation-based inputs are used for the radiative transfer, including cloud properties from either the diurnally resolved passive-sensor-based CERES synoptic (SYN) data or the combination of the CloudSat cloud radar and Cloud-Aerosol Lidar and Infrared Pathfinder Satellite Observations (CALIPSO) lidar. The CloudSat/CALIPSO cloud profiles are incorporated via a clustering method that obtains monthly mean cloud properties suitable for accurate radiative transfer calculations. The computed fluxes are validated using the TOA fluxes observed by CERES. Applications of the CERES-PRP methodology are demonstrated by computing the individual contributions to the variability of the radiation budget over multiple years and by deriving water vapor radiative kernels. The calculations for the former are used to show that an approximately linear decomposition of the total flux anomalies is achieved. The observation-based water vapor kernels were used to investigate the accuracy of the GCM-based NCAR CAM3.0 water vapor kernel. Differences between our observation-based kernel and the NCAR one are marginally larger than those inferred by previous comparisons among different GCM kernels.}, } @article {pmid34386295, year = {2018}, author = {Afgan, E and Jalili, V and Goonasekera, N and Taylor, J and Goecks, J}, title = {Federated Galaxy: Biomedical Computing at the Frontier.}, journal = {Proceedings. IEEE International Conference on Cloud Computing}, volume = {2018}, number = {}, pages = {}, pmid = {34386295}, issn = {2159-6190}, support = {R01 HG004909/HG/NHGRI NIH HHS/United States ; U41 HG006620/HG/NHGRI NIH HHS/United States ; U24 HG006620/HG/NHGRI NIH HHS/United States ; RC2 HG005542/HG/NHGRI NIH HHS/United States ; R21 HG005133/HG/NHGRI NIH HHS/United States ; }, abstract = {Biomedical data exploration requires integrative analyses of large datasets using a diverse ecosystem of tools. For more than a decade, the Galaxy project (https://galaxyproject.org) has provided researchers with a web-based, user-friendly, scalable data analysis framework complemented by a rich ecosystem of tools (https://usegalaxy.org/toolshed) used to perform genomic, proteomic, metabolomic, and imaging experiments. Galaxy can be deployed on the cloud (https://launch.usegalaxy.org), institutional computing clusters, and personal computers, or readily used on a number of public servers (e.g., https://usegalaxy.org). In this paper, we present our plan and progress towards creating Galaxy-as-a-Service-a federation of distributed data and computing resources into a panoptic analysis platform. Users can leverage a pool of public and institutional resources, in addition to plugging-in their private resources, helping answer the challenge of resource divergence across various Galaxy instances and enabling seamless analysis of biomedical data.}, } @article {pmid35095126, year = {2018}, author = {Zhao, G and Gao, H}, title = {Automatic correction of contaminated images for assessment of reservoir surface area dynamics.}, journal = {Geophysical research letters}, volume = {45}, number = {12}, pages = {6092-6099}, pmid = {35095126}, issn = {0094-8276}, support = {80NSSC17K0358/ImNASA/Intramural NASA/United States ; 80NSSC18K0939/ImNASA/Intramural NASA/United States ; }, abstract = {The potential of using Landsat for assessing long-term water surface dynamics of individual reservoirs at a global scale has been significantly hindered by contaminations from clouds, cloud shadows, and terrain shadows. A novel algorithm was developed towards the automatic correction of these contaminated image classifications. By applying this algorithm to the dataset by Pekel et al. (2016), time series of area values for 6817 global reservoirs (with an integrated capacity of 6099 km[3]) were generated from 1984 to 2015. The number of effective images that can be used in each time series has been improved by 81% on average. The long-term average area for these global reservoirs was corrected from 1.73×10[5] km[2] to 3.94×10[5] km[2]. The results were proven to be robust through validation using observations, synthetic data, and visual inspection. This continuous reservoir surface area dataset can provide benefit to various applications (both at continental and local scales).}, } @article {pmid35531371, year = {2018}, author = {Xu, H and Yu, W and Griffith, D and Golmie, N}, title = {A Survey on Industrial Internet of Things: A Cyber-Physical Systems Perspective.}, journal = {IEEE access : practical innovations, open solutions}, volume = {6}, number = {}, pages = {}, pmid = {35531371}, issn = {2169-3536}, support = {9999-NIST/ImNIST/Intramural NIST DOC/United States ; }, abstract = {The vision of Industry 4.0, otherwise known as the fourth industrial revolution, is the integration of massively deployed smart computing and network technologies in industrial production and manufacturing settings for the purposes of automation, reliability, and control, implicating the development of an Industrial Internet of Things (I-IoT). Specifically, I-IoT is devoted to adopting the Internet of Things (IoT) to enable the interconnection of anything, anywhere, and at anytime in the manufacturing system context to improve the productivity, efficiency, safety and intelligence. As an emerging technology, I-IoT has distinct properties and requirements that distinguish it from consumer IoT, including the unique types of smart devices incorporated, network technologies and quality of service requirements, and strict needs of command and control. To more clearly understand the complexities of I-IoT and its distinct needs, and to present a unified assessment of the technology from a systems perspective, in this paper we comprehensively survey the body of existing research on I-IoT. Particularly, we first present the I-IoT architecture, I-IoT applications (i.e., factory automation (FA) and process automation (PA)) and their characteristics. We then consider existing research efforts from the three key systems aspects of control, networking and computing. Regarding control, we first categorize industrial control systems and then present recent and relevant research efforts. Next, considering networking, we propose a three-dimensional framework to explore the existing research space, and investigate the adoption of some representative networking technologies, including 5G, machine-to-machine (M2M) communication, and software defined networking (SDN). Similarly, concerning computing, we again propose a second three-dimensional framework that explores the problem space of computing in I-IoT, and investigate the cloud, edge, and hybrid cloud and edge computing platforms. Finally, we outline particular challenges and future research needs in control, networking, and computing systems, as well as for the adoption of machine learning, in an I-IoT context.}, } @article {pmid33265095, year = {2017}, author = {Santos, J and Wauters, T and Volckaert, B and De Turck, F}, title = {Fog Computing: Enabling the Management and Orchestration of Smart City Applications in 5G Networks.}, journal = {Entropy (Basel, Switzerland)}, volume = {20}, number = {1}, pages = {}, pmid = {33265095}, issn = {1099-4300}, support = {"Intelligent DEnse And Longe range IoT networks (IDEAL-IoT)" under Grant Agreement #S004017N//Fonds Wetenschappelijk Onderzoek (FWO)/ ; "Service-oriented management of a virtualised future internet"//Fonds Wetenschappelijk Onderzoek (FWO)/ ; }, abstract = {Fog computing extends the cloud computing paradigm by placing resources close to the edges of the network to deal with the upcoming growth of connected devices. Smart city applications, such as health monitoring and predictive maintenance, will introduce a new set of stringent requirements, such as low latency, since resources can be requested on-demand simultaneously by multiple devices at different locations. It is then necessary to adapt existing network technologies to future needs and design new architectural concepts to help meet these strict requirements. This article proposes a fog computing framework enabling autonomous management and orchestration functionalities in 5G-enabled smart cities. Our approach follows the guidelines of the European Telecommunications Standards Institute (ETSI) NFV MANO architecture extending it with additional software components. The contribution of our work is its fully-integrated fog node management system alongside the foreseen application layer Peer-to-Peer (P2P) fog protocol based on the Open Shortest Path First (OSPF) routing protocol for the exchange of application service provisioning information between fog nodes. Evaluations of an anomaly detection use case based on an air monitoring application are presented. Our results show that the proposed framework achieves a substantial reduction in network bandwidth usage and in latency when compared to centralized cloud solutions.}, } @article {pmid34430067, year = {2017}, author = {Maabreh, M and Qolomany, B and Alsmadi, I and Gupta, A}, title = {Deep Learning-based MSMS Spectra Reduction in Support of Running Multiple Protein Search Engines on Cloud.}, journal = {Proceedings. IEEE International Conference on Bioinformatics and Biomedicine}, volume = {2017}, number = {}, pages = {1909-1914}, pmid = {34430067}, issn = {2156-1125}, support = {R15 GM120820/GM/NIGMS NIH HHS/United States ; }, abstract = {The diversity of the available protein search engines with respect to the utilized matching algorithms, the low overlap ratios among their results and the disparity of their coverage encourage the community of proteomics to utilize ensemble solutions of different search engines. The advancing in cloud computing technology and the availability of distributed processing clusters can also provide support to this task. However, data transferring and results' combining, in this case, could be the major bottleneck. The flood of billions of observed mass spectra, hundreds of Gigabytes or potentially Terabytes of data, could easily cause the congestions, increase the risk of failure, poor performance, add more computations' cost, and waste available resources. Therefore, in this study, we propose a deep learning model in order to mitigate the traffic over cloud network and, thus reduce the cost of cloud computing. The model, which depends on the top 50 intensities and their m/z values of each spectrum, removes any spectrum which is predicted not to pass the majority voting of the participated search engines. Our results using three search engines namely: pFind, Comet and X!Tandem, and four different datasets are promising and promote the investment in deep learning to solve such type of Big data problems.}, } @article {pmid34423340, year = {2016}, author = {Goonasekera, N and Lonie, A and Taylor, J and Afgan, E}, title = {CloudBridge: a Simple Cross-Cloud Python Library.}, journal = {Proceedings of XSEDE16 : Diversity, Big Data, and Science at Scale : July 17-21, 2016, Intercontinental Miami Hotel, Miami, Florida, USA. Conference on Extreme Science and Engineering Discovery Environment (5th : 2016 : Miami, Fla.)}, volume = {2016}, number = {}, pages = {}, doi = {10.1145/2949550.2949648}, pmid = {34423340}, support = {U41 HG006620/HG/NHGRI NIH HHS/United States ; }, abstract = {With clouds becoming a standard target for deploying applications, it is more important than ever to be able to seamlessly utilise resources and services from multiple providers. Proprietary vendor APIs make this challenging and lead to conditional code being written to accommodate various API differences, requiring application authors to deal with these complexities and to test their applications against each supported cloud. In this paper, we describe an open source Python library called CloudBridge that provides a simple, uniform, and extensible API for multiple clouds. The library defines a standard 'contract' that all supported providers must implement, and an extensive suite of conformance tests to ensure that any exposed behavior is uniform across cloud providers, thus allowing applications to confidently utilise any of the supported clouds without any cloud-specific code or testing.}, } @article {pmid33907528, year = {2012}, author = {Afgan, E and Baker, D and , and Nekrutenko, A and Taylor, J}, title = {A reference model for deploying applications in virtualized environments.}, journal = {Concurrency and computation : practice & experience}, volume = {24}, number = {12}, pages = {1349-1361}, pmid = {33907528}, issn = {1532-0626}, support = {R01 HG004909/HG/NHGRI NIH HHS/United States ; RC2 HG005542/HG/NHGRI NIH HHS/United States ; }, abstract = {Modern scientific research has been revolutionized by the availability of powerful and flexible computational infrastructure. Virtualization has made it possible to acquire computational resources on demand. Establishing and enabling use of these environments is essential, but their widespread adoption will only succeed if they are transparently usable. Requiring changes to applications being deployed or requiring users to change how they utilize those applications represent barriers to the infrastructure acceptance. The problem lies in the process of deploying applications so that they can take advantage of the elasticity of the environment and deliver it transparently to users. Here, we describe a reference model for deploying applications into virtualized environments. The model is rooted in the low-level components common to a range of virtualized environments and it describes how to compose those otherwise dispersed components into a coherent unit. Use of the model enables applications to be deployed into the new environment without any modifications, it imposes minimal overhead on management of the infrastructure required to run the application, and yields a set of higher-level services as a byproduct of the component organization and the underlying infrastructure. We provide a fully functional sample application deployment and implement a framework for managing the overall application deployment.}, } @article {pmid34875801, year = {2011}, author = {O'Leary, MA and Kaufman, S}, title = {MorphoBank: phylophenomics in the "cloud".}, journal = {Cladistics : the international journal of the Willi Hennig Society}, volume = {27}, number = {5}, pages = {529-537}, doi = {10.1111/j.1096-0031.2011.00355.x}, pmid = {34875801}, issn = {1096-0031}, abstract = {A highly interoperable informatics infrastructure rapidly emerged to handle genomic data used for phylogenetics and was instrumental in the growth of molecular systematics. Parallel growth in software and databases to address needs peculiar to phylophenomics has been relatively slow and fragmented. Systematists currently face the challenge that Earth may hold tens of millions of species (living and fossil) to be described and classified. Grappling with research on this scale has increasingly resulted in work by teams, many constructing large phenomic supermatrices. Until now, phylogeneticists have managed data in single-user, file-based desktop software wholly unsuitable for real-time, team-based collaborative work. Furthermore, phenomic data often differ from genomic data in readily lending themselves to media representation (e.g. 2D and 3D images, video, sound). Phenomic data are a growing component of phylogenetics, and thus teams require the ability to record homology hypotheses using media and to share and archive these data. Here we describe MorphoBank, a web application and database leveraging software as a service methodology compatible with "cloud" computing technology for the construction of matrices of phenomic data. In its tenth year, and fully available to the scientific community at-large since inception, MorphoBank enables interactive collaboration not possible with desktop software, permitting self-assembling teams to develop matrices, in real time, with linked media in a secure web environment. MorphoBank also provides any user with tools to build character and media ontologies (rule sets) within matrices, and to display these as directed acyclic graphs. These rule sets record the phylogenetic interrelatedness of characters (e.g. if X is absent, Y is inapplicable, or X-Z characters share a media view). MorphoBank has enabled an order of magnitude increase in phylophenomic data collection: a recent collaboration by more than 25 researchers has produced a database of > 4500 phenomic characters supported by > 10 000 media. © The Willi Hennig Society 2011.}, } @article {pmid32673064, year = {2023}, author = {Mubarakali, A and Durai, AD and Alshehri, M and AlFarraj, O and Ramakrishnan, J and Mavaluru, D}, title = {Fog-Based Delay-Sensitive Data Transmission Algorithm for Data Forwarding and Storage in Cloud Environment for Multimedia Applications.}, journal = {Big data}, volume = {11}, number = {2}, pages = {128-136}, doi = {10.1089/big.2020.0090}, pmid = {32673064}, issn = {2167-647X}, mesh = {*Multimedia ; Algorithms ; Software ; *Internet of Things ; Japan ; }, abstract = {Fog computing is playing a vital role in data transmission to distributed devices in the Internet of Things (IoT) and another network paradigm. The fundamental element of fog computing is an additional layer added between an IoT device/node and a cloud server. These fog nodes are used to speed up time-critical applications. Current research efforts and user trends are pushing for fog computing, and the path is far from being paved. Unless it can reap the benefits of applying software-defined networks and network function virtualization techniques, network monitoring will be an additional burden for fog. However, the seamless integration of these techniques in fog computing is not easy and will be a challenging task. To overcome the issues as already mentioned, the fog-based delay-sensitive data transmission algorithm develops a robust optimal technique to ensure the low and predictable delay in delay-sensitive applications such as traffic monitoring and vehicle tracking applications. The method reduces latency by storing and processing the data close to the source of information with optimal depth in the network. The deployment results show that the proposed algorithm reduces 15.67 ms round trip time and 2 seconds averaged delay on 10 KB, 100 KB, and 1 MB data set India, Singapore, and Japan Amazon Datacenter Regions compared with conventional methodologies.}, } @article {pmid32664251, year = {2020}, author = {Slamnik-Kriještorac, N and Silva, EBE and Municio, E and Resende, HCC and Hadiwardoyo, SA and Marquez-Barja, JM}, title = {Network Service and Resource Orchestration: A Feature and Performance Analysis within the MEC-Enhanced Vehicular Network Context.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {14}, pages = {}, pmid = {32664251}, issn = {1424-8220}, support = {825012//H2020 5G-CARMEN/ ; 723638//FED4FIRE+/ ; }, abstract = {By providing storage and computational resources at the network edge, which enables hosting applications closer to the mobile users, Multi-Access Edge Computing (MEC) uses the mobile backhaul, and the network core more efficiently, thereby reducing the overall latency. Fostering the synergy between 5G and MEC brings ultra-reliable low-latency in data transmission, and paves the way towards numerous latency-sensitive automotive use cases, with the ultimate goal of enabling autonomous driving. Despite the benefits of significant latency reduction, bringing MEC platforms into 5G-based vehicular networks imposes severe challenges towards poorly scalable network management, as MEC platforms usually represent a highly heterogeneous environment. Therefore, there is a strong need to perform network management and orchestration in an automated way, which, being supported by Software Defined Networking (SDN) and Network Function Virtualization (NFV), will further decrease the latency. With recent advances in SDN, along with NFV, which aim to facilitate management automation for tackling delay issues in vehicular communications, we study the closed-loop life-cycle management of network services, and map such cycle to the Management and Orchestration (MANO) systems, such as ETSI NFV MANO. In this paper, we provide a comprehensive overview of existing MANO solutions, studying their most important features to enable network service and resource orchestration in MEC-enhanced vehicular networks. Finally, using a real testbed setup, we conduct and present an extensive performance analysis of Open Baton and Open Source MANO that are, due to their lightweight resource footprint, and compliance to ETSI standards, suitable solutions for resource and service management and orchestration within the network edge.}, } @article {pmid32658738, year = {2020}, author = {Zeng, Y and Zhang, J}, title = {A machine learning model for detecting invasive ductal carcinoma with Google Cloud AutoML Vision.}, journal = {Computers in biology and medicine}, volume = {122}, number = {}, pages = {103861}, doi = {10.1016/j.compbiomed.2020.103861}, pmid = {32658738}, issn = {1879-0534}, mesh = {*Carcinoma, Ductal ; Humans ; *Machine Learning ; Neural Networks, Computer ; }, abstract = {OBJECTIVES: This study is aimed to assess the feasibility of AutoML technology for the identification of invasive ductal carcinoma (IDC) in whole slide images (WSI).

METHODS: The study presents an experimental machine learning (ML) model based on Google Cloud AutoML Vision instead of a handcrafted neural network. A public dataset of 278,124 labeled histopathology images is used as the original dataset for the model creation. In order to balance the number of positive and negative IDC samples, this study also augments the original public dataset by rotating a large portion of positive image samples. As a result, a total number of 378,215 labeled images are applied.

RESULTS: A score of 91.6% average accuracy is achieved during the model evaluation as measured by the area under precision-recall curve (AuPRC). A subsequent test on a held-out test dataset (unseen by the model) yields a balanced accuracy of 84.6%. These results outperform the ones reported in the earlier studies. Similar performance is observed from a generalization test with new breast tissue samples we collected from the hospital.

CONCLUSIONS: The results obtained from this study demonstrate the maturity and feasibility of an AutoML approach for IDC identification. The study also shows the advantage of AutoML approach when combined at scale with cloud computing.}, } @article {pmid32657996, year = {2020}, author = {Wang, SY and Pershing, S and Lee, AY and , }, title = {Big data requirements for artificial intelligence.}, journal = {Current opinion in ophthalmology}, volume = {31}, number = {5}, pages = {318-323}, pmid = {32657996}, issn = {1531-7021}, support = {K23 EY029246/EY/NEI NIH HHS/United States ; P30 EY010572/EY/NEI NIH HHS/United States ; T15 LM007033/LM/NLM NIH HHS/United States ; }, mesh = {Artificial Intelligence/*standards ; *Big Data ; Electronic Health Records ; Humans ; Ophthalmology/*standards ; }, abstract = {PURPOSE OF REVIEW: To summarize how big data and artificial intelligence technologies have evolved, their current state, and next steps to enable future generations of artificial intelligence for ophthalmology.

RECENT FINDINGS: Big data in health care is ever increasing in volume and variety, enabled by the widespread adoption of electronic health records (EHRs) and standards for health data information exchange, such as Digital Imaging and Communications in Medicine and Fast Healthcare Interoperability Resources. Simultaneously, the development of powerful cloud-based storage and computing architectures supports a fertile environment for big data and artificial intelligence in health care. The high volume and velocity of imaging and structured data in ophthalmology and is one of the reasons why ophthalmology is at the forefront of artificial intelligence research. Still needed are consensus labeling conventions for performing supervised learning on big data, promotion of data sharing and reuse, standards for sharing artificial intelligence model architectures, and access to artificial intelligence models through open application program interfaces (APIs).

SUMMARY: Future requirements for big data and artificial intelligence include fostering reproducible science, continuing open innovation, and supporting the clinical use of artificial intelligence by promoting standards for data labels, data sharing, artificial intelligence model architecture sharing, and accessible code and APIs.}, } @article {pmid32646548, year = {2020}, author = {Jiang, W and Guo, L and Wu, H and Ying, J and Yang, Z and Wei, B and Pan, F and Han, Y}, title = {Use of a smartphone for imaging, modelling, and evaluation of keloids.}, journal = {Burns : journal of the International Society for Burn Injuries}, volume = {46}, number = {8}, pages = {1896-1902}, doi = {10.1016/j.burns.2020.05.026}, pmid = {32646548}, issn = {1879-1409}, mesh = {Adult ; Burns/complications/diagnostic imaging ; China ; Female ; Humans ; Imaging, Three-Dimensional/methods/*standards/statistics & numerical data ; Keloid/*diagnostic imaging ; Male ; Middle Aged ; Reproducibility of Results ; Smartphone/instrumentation/*standards/statistics & numerical data ; }, abstract = {OBJECTIVE: We used a smartphone to construct three-dimensional (3D) models of keloids, then quantitatively simulate and evaluate these tissues.

METHODS: We uploaded smartphone photographs of 33 keloids on the chest, shoulder, neck, limbs, or abdomen of 28 patients. We used the parallel computing power of a graphics processing unit to calculate the spatial co-ordinates of each pixel in the cloud, then generated 3D models. We obtained the longest diameter, thickness, and volume of each keloid, then compared these data to findings obtained by traditional methods.

RESULTS: Measurement repeatability was excellent: intraclass correlation coefficients were 0.998 for longest diameter, 0.978 for thickness, and 0.993 for volume. When measuring the longest diameter and volume, the results agreed with Vernier caliper measurements and with measurements obtained after the injection of water into the cavity. When measuring thickness, the findings were similar to those obtained by ultrasound. Bland-Altman analyses showed that the ratios of 95% confidence interval extremes were 3.03% for longest diameter, 3.03% for volume, and 6.06% for thickness.

CONCLUSION: Smartphones were used to acquire data that was then employed to construct 3D models of keloids; these models yielded quantitative data with excellent reliability and validity. The smartphone can serve as an additional tool for keloid diagnosis and research, and will facilitate medical treatment over the internet.}, } @article {pmid32637027, year = {2019}, author = {Tilton, JC and Wolfe, RE and Lin, GG and Dellomo, JJ}, title = {On-Orbit Measurement of the Effective Focal Length and Band-to-Band Registration of Satellite-Borne Whiskbroom Imaging Sensors.}, journal = {IEEE journal of selected topics in applied earth observations and remote sensing}, volume = {12}, number = {11}, pages = {4622-4633}, pmid = {32637027}, issn = {1939-1404}, support = {/SCMD-EarthScienceSystem/Science Earth Science System NASA/United States ; }, abstract = {We have developed an approach for the measurement of the Effective Focal Length (EFL) and Band-to-Band Registration (BBR) of selected spectral bands of satellite-borne whiskbroom imaging sensors from on-orbit data. Our approach is based on simulating the coarser spatial resolution whiskbroom sensor data with finer spatial resolution Landsat 7 ETM+ or Landsat 8 OLI data using the geolocation (Earth location) information from each sensor, and computing the correlation between the simulated and original data. For each scan of a selected spectral band of the whiskbroom data set, various subsets of the data are examined to find the subset with the highest spatial correlation between the original and simulated data using the nominal geolocation information. Then, for this best subset, the focal length value and the spatial shift are varied to find the values that produce the highest spatial correlation between the original and simulated data. This best focal length value is taken to be the measured instrument EFL and the best spatial shift is taken to be the registration of the whiskbroom data relative to the Landsat data, from which the BBR is inferred. Best results are obtained with cloud-free subsets with contrasting land features. This measurement is repeated over other scans with cloud-free subsets. We demonstrate our approach with on-orbit data from the Aqua and Terra MODIS instruments and SNPP and J1 VIIRS instruments.}, } @article {pmid32636922, year = {2020}, author = {Di Gennaro, SF and Matese, A}, title = {Evaluation of novel precision viticulture tool for canopy biomass estimation and missing plant detection based on 2.5D and 3D approaches using RGB images acquired by UAV platform.}, journal = {Plant methods}, volume = {16}, number = {}, pages = {91}, pmid = {32636922}, issn = {1746-4811}, abstract = {BACKGROUND: The knowledge of vine vegetative status within a vineyard plays a key role in canopy management in order to achieve a correct vine balance and reach the final desired yield/quality. Detailed information about canopy architecture and missing plants distribution provides useful support for farmers/winegrowers to optimize canopy management practices and the replanting process, respectively. In the last decade, there has been a progressive diffusion of UAV (Unmanned Aerial Vehicles) technologies for Precision Viticulture purposes, as fast and accurate methodologies for spatial variability of geometric plant parameters. The aim of this study was to implement an unsupervised and integrated procedure of biomass estimation and missing plants detection, using both the 2.5D-surface and 3D-alphashape methods.

RESULTS: Both methods showed good overall accuracy respect to ground truth biomass measurements with high values of R[2] (0.71 and 0.80 for 2.5D and 3D, respectively). The 2.5D method led to an overestimation since it is derived by considering the vine as rectangular cuboid form. On the contrary, the 3D method provided more accurate results as a consequence of the alphashape algorithm, which is capable to detect each single shoot and holes within the canopy. Regarding the missing plants detection, the 3D approach confirmed better performance in cases of hidden conditions by shoots of adjacent plants or sparse canopy with some empty spaces along the row, where the 2.5D method based on the length of section of the row with lower thickness than the threshold used (0.10 m), tended to return false negatives and false positives, respectively.

CONCLUSIONS: This paper describes a rapid and objective tool for the farmer to promptly identify canopy management strategies and drive replanting decisions. The 3D approach provided results closer to real canopy volume and higher performance in missing plant detection. However, the dense cloud based analysis required more processing time. In a future perspective, given the continuous technological evolution in terms of computing performance, the overcoming of the current limit represented by the pre- and post-processing phases of the large image dataset should mainstream this methodology.}, } @article {pmid32635632, year = {2020}, author = {Pastor-Vargas, R and Tobarra, L and Robles-Gómez, A and Martin, S and Hernández, R and Cano, J}, title = {A WoT Platform for Supporting Full-Cycle IoT Solutions from Edge to Cloud Infrastructures: A Practical Case.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {13}, pages = {}, pmid = {32635632}, issn = {1424-8220}, support = {eNMoLabs research project//Universidad Nacional de Educación a Distancia/ ; }, abstract = {Internet of Things (IoT) learning involves the acquisition of transversal skills ranging from the development based on IoT devices and sensors (edge computing) to the connection of the devices themselves to management environments that allow the storage and processing (cloud computing) of data generated by sensors. The usual development cycle for IoT applications consists of the following three stages: stage 1 corresponds to the description of the devices and basic interaction with sensors. In stage 2, data acquired by the devices/sensors are employed by communication models from the origin edge to the management middleware in the cloud. Finally, stage 3 focuses on processing and presentation models. These models present the most relevant indicators for IoT devices and sensors. Students must acquire all the necessary skills and abilities to understand and develop these types of applications, so lecturers need an infrastructure to enable the learning of development of full IoT applications. A Web of Things (WoT) platform named Labs of Things at UNED (LoT@UNED) has been used for this goal. This paper shows the fundamentals and features of this infrastructure, and how the different phases of the full development cycle of solutions in IoT environments are implemented using LoT@UNED. The proposed system has been tested in several computer science subjects. Students can perform remote experimentation with a collaborative WoT learning environment in the cloud, including the possibility to analyze the generated data by IoT sensors.}, } @article {pmid32635561, year = {2020}, author = {Lavysh, D and Neu-Yilik, G}, title = {UPF1-Mediated RNA Decay-Danse Macabre in a Cloud.}, journal = {Biomolecules}, volume = {10}, number = {7}, pages = {}, pmid = {32635561}, issn = {2218-273X}, support = {NE 593/2-1 , NE 593/2-2//Deutsche Forschungsgemeinschaft/International ; }, mesh = {Animals ; Fungal Proteins/metabolism ; Humans ; Nonsense Mediated mRNA Decay ; RNA Helicases/*metabolism ; RNA, Messenger/*chemistry ; Trans-Activators/*metabolism ; Yeasts/*metabolism ; }, abstract = {Nonsense-mediated RNA decay (NMD) is the prototype example of a whole family of RNA decay pathways that unfold around a common central effector protein called UPF1. While NMD in yeast appears to be a linear pathway, NMD in higher eukaryotes is a multifaceted phenomenon with high variability with respect to substrate RNAs, degradation efficiency, effector proteins and decay-triggering RNA features. Despite increasing knowledge of the mechanistic details, it seems ever more difficult to define NMD and to clearly distinguish it from a growing list of other UPF1-mediated RNA decay pathways (UMDs). With a focus on mammalian, we here critically examine the prevailing NMD models and the gaps and inconsistencies in these models. By exploring the minimal requirements for NMD and other UMDs, we try to elucidate whether they are separate and definable pathways, or rather variations of the same phenomenon. Finally, we suggest that the operating principle of the UPF1-mediated decay family could be considered similar to that of a computing cloud providing a flexible infrastructure with rapid elasticity and dynamic access according to specific user needs.}, } @article {pmid32611428, year = {2020}, author = {Hyder, A and May, AA}, title = {Translational data analytics in exposure science and environmental health: a citizen science approach with high school students.}, journal = {Environmental health : a global access science source}, volume = {19}, number = {1}, pages = {73}, pmid = {32611428}, issn = {1476-069X}, support = {1645226//Division of Chemical, Bioengineering, Environmental, and Transport Systems/International ; }, mesh = {Adolescent ; Air Pollution/analysis ; Citizen Science/*organization & administration ; Data Science/*methods/organization & administration ; *Environmental Exposure ; Environmental Health/*methods ; Environmental Monitoring/methods ; Humans ; Schools ; Students ; }, abstract = {BACKGROUND: Translational data analytics aims to apply data analytics principles and techniques to bring about broader societal or human impact. Translational data analytics for environmental health is an emerging discipline and the objective of this study is to describe a real-world example of this emerging discipline.

METHODS: We implemented a citizen-science project at a local high school. Multiple cohorts of citizen scientists, who were students, fabricated and deployed low-cost air quality sensors. A cloud-computing solution provided real-time air quality data for risk screening purposes, data analytics and curricular activities.

RESULTS: The citizen-science project engaged with 14 high school students over a four-year period that is continuing to this day. The project led to the development of a website that displayed sensor-based measurements in local neighborhoods and a GitHub-like repository for open source code and instructions. Preliminary results showed a reasonable comparison between sensor-based and EPA land-based federal reference monitor data for CO and NOx.

CONCLUSIONS: Initial sensor-based data collection efforts showed reasonable agreement with land-based federal reference monitors but more work needs to be done to validate these results. Lessons learned were: 1) the need for sustained funding because citizen science-based project timelines are a function of community needs/capacity and building interdisciplinary rapport in academic settings and 2) the need for a dedicated staff to manage academic-community relationships.}, } @article {pmid32598292, year = {2020}, author = {Saleh, N and Abo Agyla, A}, title = {An integrated assessment system for the accreditation of medical laboratories.}, journal = {Biomedizinische Technik. Biomedical engineering}, volume = {}, number = {}, pages = {}, doi = {10.1515/bmt-2019-0133}, pmid = {32598292}, issn = {1862-278X}, abstract = {Medical laboratory accreditation becomes a trend to be trustable for diagnosis of diseases. It is always performed at regular intervals to assure competence of quality management systems (QMS) based on pre-defined standards. However, few attempts were carried out to assess the quality level of medical laboratory services. Moreover, there is no realistic study that classifies and makes analyses of laboratory performance based on a computational model. The purpose of this study was to develop an integrated system for medical laboratory accreditation that assesses QMS against ISO 15189. In addition, a deep analysis of factors that sustain accreditation was presented. The system started with establishing a core matrix that maps QMS elements with ISO 15189 clauses. Through this map, a questionnaire was developed to measure the performance. Therefore, score indices were calculated for the QMS. A fuzzy logic model was designed based on the calculated scores to classify medical laboratories according to their tendency for accreditation. Further, in case of failure of accreditation, cause-and-effect root analysis was done to realize the causes. Finally, cloud computing principles were employed to launch a web application in order to facilitate user interface with the proposed system. In verification, the system has been tested using a dataset of 12 medical laboratories in Egypt. Results have proved system robustness and consistency. Thus, the system is considered as a self-assessment tool that demonstrates points of weakness and strength.}, } @article {pmid32570956, year = {2020}, author = {Ogiela, L and Ogiela, MR and Ko, H}, title = {Intelligent Data Management and Security in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {12}, pages = {}, pmid = {32570956}, issn = {1424-8220}, support = {DEC-2016/23/B/HS4/00616//Narodowym Centrum Nauki/ ; No 16.16.120.773//Akademia Górniczo-Hutnicza im. Stanislawa Staszica/ ; No. 2017R1A6A1A03015496//National Research Foundation of Korea/ ; }, abstract = {This paper will present the authors' own techniques of secret data management and protection, with particular attention paid to techniques securing data services. Among the solutions discussed, there will be information-sharing protocols dedicated to the tasks of secret (confidential) data sharing. Such solutions will be presented in an algorithmic form, aimed at solving the tasks of protecting and securing data against unauthorized acquisition. Data-sharing protocols will execute the tasks of securing a special type of information, i.e., data services. The area of data protection will be defined for various levels, within which will be executed the tasks of data management and protection. The authors' solution concerning securing data with the use of cryptographic threshold techniques used to split the secret among a specified group of secret trustees, simultaneously enhanced by the application of linguistic methods of description of the shared secret, forms a new class of protocols, i.e., intelligent linguistic threshold schemes. The solutions presented in this paper referring to the service management and securing will be dedicated to various levels of data management. These levels could be differentiated both in the structure of a given entity and in its environment. There is a special example thereof, i.e., the cloud management processes. These will also be subject to the assessment of feasibility of application of the discussed protocols in these areas. Presented solutions will be based on the application of an innovative approach, in which we can use a special formal graph for the creation of a secret representation, which can then be divided and transmitted over a distributed network.}, } @article {pmid32570566, year = {2020}, author = {Coman Schmid, D and Crameri, K and Oesterle, S and Rinn, B and Sengstag, T and Stockinger, H and , }, title = {SPHN - The BioMedIT Network: A Secure IT Platform for Research with Sensitive Human Data.}, journal = {Studies in health technology and informatics}, volume = {270}, number = {}, pages = {1170-1174}, doi = {10.3233/SHTI200348}, pmid = {32570566}, issn = {1879-8365}, mesh = {Big Data ; Cloud Computing ; Computer Security ; *Information Storage and Retrieval ; Privacy ; }, abstract = {The BioMedIT project is funded by the Swiss government as an integral part of the Swiss Personalized Health Network (SPHN), aiming to provide researchers with access to a secure, powerful and versatile IT infrastructure for doing data-driven research on sensitive biomedical data while ensuring data privacy protection. The BioMedIT network gives researchers the ability to securely transfer, store, manage and process sensitive research data. The underlying BioMedIT nodes provide compute and storage capacity that can be used locally or through a federated environment. The network operates under a common Information Security Policy using state-of-the-art security techniques. It utilizes cloud computing, virtualization, compute accelerators (GPUs), big data storage as well as federation technologies to lower computational boundaries for researchers and to guarantee that sensitive data can be processed in a secure and lawful way. Building on existing expertise and research infrastructure at the partnering Swiss institutions, the BioMedIT network establishes a competitive Swiss private-cloud - a secure national infrastructure resource that can be used by researchers of Swiss universities, hospitals and other research institutions.}, } @article {pmid32570417, year = {2020}, author = {Niyitegeka, D and Bellafqira, R and Genin, E and Coatrieux, G}, title = {Secure Collapsing Method Based on Fully Homomorphic Encryption.}, journal = {Studies in health technology and informatics}, volume = {270}, number = {}, pages = {412-416}, doi = {10.3233/SHTI200193}, pmid = {32570417}, issn = {1879-8365}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; Female ; Genome-Wide Association Study ; Genomics ; Humans ; Logistic Models ; Male ; Privacy ; }, abstract = {In this paper, we propose a new approach for performing privacy-preserving genome-wide association study (GWAS) in cloud environments. This method allows a Genomic Research Unit (GRU) who possesses genetic variants of diseased individuals (cases) to compare his/her data against genetic variants of healthy individuals (controls) from a Genomic Research Center (GRC). The originality of this work stands on a secure version of the collapsing method based on the logistic regression model considering that all data of GRU are stored into the cloud. To do so, we take advantage of fully homomorphic encryption and of secure multiparty computation. Experiment results carried out on real genetic data using the BGV cryptosystem indicate that the proposed scheme provides the same results as the ones achieved on clear data.}, } @article {pmid32562490, year = {2020}, author = {Lawlor, B and Sleator, RD}, title = {The democratization of bioinformatics: A software engineering perspective.}, journal = {GigaScience}, volume = {9}, number = {6}, pages = {}, pmid = {32562490}, issn = {2047-217X}, mesh = {Cloud Computing ; Computational Biology/*methods ; Genomics/methods ; *Software ; }, abstract = {Today, thanks to advances in cloud computing, it is possible for small teams of software developers to produce internet-scale products, a feat that was previously the preserve of large organizations. Herein, we describe how these advances in software engineering can be made more readily available to bioinformaticians. In the same way that cloud computing has democratized access to distributed systems engineering for generalist software engineers, access to scalable and reproducible bioinformatic engineering can be democratized for generalist bioinformaticians and biologists. We present solutions, based on our own efforts, to achieve this goal.}, } @article {pmid32560653, year = {2020}, author = {Ehwerhemuepha, L and Gasperino, G and Bischoff, N and Taraman, S and Chang, A and Feaster, W}, title = {HealtheDataLab - a cloud computing solution for data science and advanced analytics in healthcare with application to predicting multi-center pediatric readmissions.}, journal = {BMC medical informatics and decision making}, volume = {20}, number = {1}, pages = {115}, pmid = {32560653}, issn = {1472-6947}, mesh = {Child ; Child, Preschool ; *Cloud Computing ; *Data Science ; Delivery of Health Care ; Female ; Humans ; Infant ; Infant, Newborn ; Male ; Patient Readmission ; Solutions ; }, abstract = {BACKGROUND: There is a shortage of medical informatics and data science platforms using cloud computing on electronic medical record (EMR) data, and with computing capacity for analyzing big data. We implemented, described, and applied a cloud computing solution utilizing the fast health interoperability resources (FHIR) standardization and state-of-the-art parallel distributed computing platform for advanced analytics.

METHODS: We utilized the architecture of the modern predictive analytics platform called Cerner® HealtheDataLab and described the suite of cloud computing services and Apache Projects that it relies on. We validated the platform by replicating and improving on a previous single pediatric institution study/model on readmission and developing a multi-center model of all-cause readmission for pediatric-age patients using the Cerner® Health Facts Deidentified Database (now updated and referred to as the Cerner Real World Data). We retrieved a subset of 1.4 million pediatric encounters consisting of 48 hospitals' data on pediatric encounters in the database based on a priori inclusion criteria. We built and analyzed corresponding random forest and multilayer perceptron (MLP) neural network models using HealtheDataLab.

RESULTS: Using the HealtheDataLab platform, we developed a random forest model and multi-layer perceptron model with AUC of 0.8446 (0.8444, 0.8447) and 0.8451 (0.8449, 0.8453) respectively. We showed the distribution in model performance across hospitals and identified a set of novel variables under previous resource utilization and generic medications that may be used to improve existing readmission models.

CONCLUSION: Our results suggest that high performance, elastic cloud computing infrastructures such as the platform presented here can be used for the development of highly predictive models on EMR data in a secure and robust environment. This in turn can lead to new clinical insights/discoveries.}, } @article {pmid32557071, year = {2020}, author = {Deep, B and Mathur, I and Joshi, N}, title = {Coalescing IoT and Wi-Fi technologies for an optimized approach in urban route planning.}, journal = {Environmental science and pollution research international}, volume = {27}, number = {27}, pages = {34434-34441}, pmid = {32557071}, issn = {1614-7499}, mesh = {Air Pollutants/*analysis ; Air Pollution/*analysis ; Environmental Monitoring ; India ; Nitrogen Dioxide/analysis ; Particulate Matter/analysis ; }, abstract = {The quality of air that we breathe is one of the more serious environmental challenges that the government faces all around the world. It is a matter of concern for almost all developed and developing countries. The National Air Quality Index (NAQI) in India was first initiated and unveiled by the central government under the Swachh Bharat Abhiyan (Clean India Campaign). It was launched to spread cleanliness, and awareness to work towards a clean and healthy environment among all citizens living in India. This index is computed based on values obtained by monitoring eight types of pollutants that are known to commonly permeate around our immediate environment. These are particulate matter PM10; particulate matter PM2.5; nitrogen dioxide; sulfur dioxide; carbon monoxide; lead; ammonia; and ozone. Studies conducted have shown that almost 90% of particulate matters are produced from vehicular emissions, dust, debris on roads, and industries and from construction sites spanning across rural, semi-urban, and urban areas. While the State and Central governments have devised and implemented several schemes to keep air pollution levels under control, these alone have proved inadequate in cases such as the Delhi region of India. Internet of Things (IoT) offers a range of options that do extends into the domain of environmental management. Using an online monitoring system based on IoT technologies, users can stay informed on fluctuating levels of air pollution. In this paper, the design of a low-price pollution measurement kit working around a dust sensor, capable of transmitting data to a cloud service through a Wi-Fi module, is described. A system overview of urban route planning is also proposed. The proposed model can make users aware of pollutant concentrations at any point of time and can also act as useful input towards the design of the least polluted path prediction app. Hence, the proposed model can help travelers to plan a less polluted route in urban areas.}, } @article {pmid32545495, year = {2020}, author = {Lee, D and Moon, H and Oh, S and Park, D}, title = {mIoT: Metamorphic IoT Platform for On-Demand Hardware Replacement in Large-Scaled IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {12}, pages = {}, pmid = {32545495}, issn = {1424-8220}, support = {NRF2019R1A2C2005099//Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Science and ICT/ ; }, abstract = {As the Internet of Things (IoT) is becoming more pervasive in our daily lives, the number of devices that connect to IoT edges and data generated at the edges are rapidly increasing. On account of the bottlenecks in servers, due to the increase in data, as well as security and privacy issues, the IoT paradigm has shifted from cloud computing to edge computing. Pursuant to this trend, embedded devices require complex computation capabilities. However, due to various constraints, edge devices cannot equip enough hardware to process data, so the flexibility of operation is reduced, because of the limitations of fixed hardware functions, relative to cloud computing. Recently, as application fields and collected data types diversify, and, in particular, applications requiring complex computation such as artificial intelligence (AI) and signal processing are applied to edges, flexible processing and computation capabilities based on hardware acceleration are required. In this paper, to meet these needs, we propose a new IoT platform, called a metamorphic IoT (mIoT) platform, which can various hardware acceleration with limited hardware platform resources, through on-demand transmission and reconfiguration of required hardware at edges instead of via transference of sensing data to a server. The proposed platform reconfigures the edge's hardware with minimal overhead, based on a probabilistic value, known as callability. The mIoT consists of reconfigurable edge devices based on RISC-V architecture and a server that manages the reconfiguration of edge devices based on callability. Through various experimental results, we confirmed that the callability-based mIoT platform can provide the hardware required by the edge device in real time. In addition, by performing various functions with small hardware, power consumption, which is a major constraint of IoT, can be reduced.}, } @article {pmid32540846, year = {2020}, author = {Suver, C and Thorogood, A and Doerr, M and Wilbanks, J and Knoppers, B}, title = {Bringing Code to Data: Do Not Forget Governance.}, journal = {Journal of medical Internet research}, volume = {22}, number = {7}, pages = {e18087}, pmid = {32540846}, issn = {1438-8871}, support = {//CIHR/Canada ; }, mesh = {Biomedical Research/*methods ; Cloud Computing/*standards ; Humans ; Information Dissemination/*methods ; Reproducibility of Results ; }, abstract = {Developing or independently evaluating algorithms in biomedical research is difficult because of restrictions on access to clinical data. Access is restricted because of privacy concerns, the proprietary treatment of data by institutions (fueled in part by the cost of data hosting, curation, and distribution), concerns over misuse, and the complexities of applicable regulatory frameworks. The use of cloud technology and services can address many of the barriers to data sharing. For example, researchers can access data in high performance, secure, and auditable cloud computing environments without the need for copying or downloading. An alternative path to accessing data sets requiring additional protection is the model-to-data approach. In model-to-data, researchers submit algorithms to run on secure data sets that remain hidden. Model-to-data is designed to enhance security and local control while enabling communities of researchers to generate new knowledge from sequestered data. Model-to-data has not yet been widely implemented, but pilots have demonstrated its utility when technical or legal constraints preclude other methods of sharing. We argue that model-to-data can make a valuable addition to our data sharing arsenal, with 2 caveats. First, model-to-data should only be adopted where necessary to supplement rather than replace existing data-sharing approaches given that it requires significant resource commitments from data stewards and limits scientific freedom, reproducibility, and scalability. Second, although model-to-data reduces concerns over data privacy and loss of local control when sharing clinical data, it is not an ethical panacea. Data stewards will remain hesitant to adopt model-to-data approaches without guidance on how to do so responsibly. To address this gap, we explored how commitments to open science, reproducibility, security, respect for data subjects, and research ethics oversight must be re-evaluated in a model-to-data context.}, } @article {pmid32540775, year = {2020}, author = {Margheri, A and Masi, M and Miladi, A and Sassone, V and Rosenzweig, J}, title = {Decentralised provenance for healthcare data.}, journal = {International journal of medical informatics}, volume = {141}, number = {}, pages = {104197}, doi = {10.1016/j.ijmedinf.2020.104197}, pmid = {32540775}, issn = {1872-8243}, mesh = {Delivery of Health Care ; *Electronic Health Records ; Health Facilities ; *Health Level Seven ; Humans ; Information Storage and Retrieval ; }, abstract = {OBJECTIVE: The creation and exchange of patients' Electronic Healthcare Records have developed significantly in the last decade. Patients' records are however distributed in data silos across multiple healthcare facilities, posing technical and clinical challenges that may endanger patients' safety. Current healthcare sharing systems ensure interoperability of patients' records across facilities, but they have limits in presenting doctors with the clinical context of the data in the records. We design and implement a platform for managing provenance tracking of Electronic Healthcare Records based on blockchain technology, compliant with the latest healthcare standards and following the patient-informed consent preferences.

METHODS: The platform leverages two pillars: the use of international standards such as Integrating the Healthcare Enterprise (IHE), Health Level Seven International (HL7) and Fast Healthcare Interoperability Resources (FHIR) to achieve interoperability, and the use of a provenance creation process that by-design, avoids personal data storage within the blockchain. The platform consists of: (1) a smart contract implemented within the Hyperledger Fabric blockchain that manages provenance according to the W3C PROV for medical document in standardised formats (e.g. a CDA document, a FHIR resource, a DICOM study, etc.); (2) a Java Proxy that intercepts all the document submissions and retrievals for which provenance shall be evaluated; (3) a service used to retrieve the PROV document.

RESULTS: We integrated our decentralised platform with the SpiritEHR engine, an enterprise-grade healthcare system, and we stored and retrieved the available documents in the Mandel's sample CDA repository,[1] which contained no protected health information. Using a cloud-based blockchain solution, we observed that the overhead added to the typical processing time of reading and writing medical data is in the order of milliseconds. Moreover, the integration of the Proxy at the level of exchanged messages in EHR systems allows transparent usage of provenance data in multiple health computing domains such as decision making, data reconciliation, and patient consent auditing.

CONCLUSIONS: By using international healthcare standards and a cloud-based blockchain deployment, we delivered a solution that can manage provenance of patients' records via transparent integration within the routine operations on healthcare data.}, } @article {pmid32537483, year = {2020}, author = {Ben Hassen, H and Ayari, N and Hamdi, B}, title = {A home hospitalization system based on the Internet of things, Fog computing and cloud computing.}, journal = {Informatics in medicine unlocked}, volume = {20}, number = {}, pages = {100368}, pmid = {32537483}, issn = {2352-9148}, abstract = {In recent years, the world has witnessed a significant increase in the number of elderly who often suffer from chronic diseases, and has witnessed in recent months a major spread of the new coronavirus (COVID-19), which has led to thousands of deaths, especially among the elderly and people who suffer from chronic diseases. Coronavirus has also caused many problems in hospitals, where these are no longer able to accommodate a large number of patients. This virus has also begun to spread between medical and paramedical teams, and this causes a major risk to the health of patients staying in hospitals. To reduce the spread of the virus and maintain the health of patients who need a hospital stay, home hospitalization is one of the best possible solutions. This paper proposes a home hospitalization system based on the Internet of Things (IoT), Fog computing, and Cloud computing, which are among the most important technologies that have contributed to the development of the healthcare sector in a significant way. These systems allow patients to recover and receive treatment in their homes and among their families, where patient health and the hospitalization room environmental state are monitored, to enable doctors to follow the hospitalization process and make recommendations to patients and their supervisors, through monitoring units and mobile applications developed for this purpose. The results of evaluation have shown great acceptance of this system by patients and doctors alike.}, } @article {pmid32535840, year = {2020}, author = {Kurzawski, JW and Mikellidou, K and Morrone, MC and Pestilli, F}, title = {The visual white matter connecting human area prostriata and the thalamus is retinotopically organized.}, journal = {Brain structure & function}, volume = {225}, number = {6}, pages = {1839-1853}, pmid = {32535840}, issn = {1863-2661}, support = {797603//H2020 Marie Skłodowska-Curie Actions/ ; 641805//H2020 Marie Skłodowska-Curie Actions/ ; 1636893//Directorate for Computer and Information Science and Engineering/ ; 1734853//Directorate for Social, Behavioral and Economic Sciences/ ; UL1TR002529//National Center for Advancing Translational Sciences/ ; 832813//H2020 European Research Council/ ; 1916518//National Science Foundation/ ; 2017SBCPZY_02//Ministero dell'Istruzione, dell'Università e della Ricerca/ ; ULTTR001108//National Institute of Mental Health/ ; }, mesh = {Connectome ; Diffusion Magnetic Resonance Imaging ; Geniculate Bodies/anatomy & histology ; Humans ; Occipital Lobe/anatomy & histology ; Thalamus/*anatomy & histology ; Visual Cortex/*anatomy & histology ; Visual Pathways/anatomy & histology ; White Matter/*anatomy & histology ; }, abstract = {The human visual system is capable of processing visual information from fovea to the far peripheral visual field. Recent fMRI studies have shown a full and detailed retinotopic map in area prostriata, located ventro-dorsally and anterior to the calcarine sulcus along the parieto-occipital sulcus with strong preference for peripheral and wide-field stimulation. Here, we report the anatomical pattern of white matter connections between area prostriata and the thalamus encompassing the lateral geniculate nucleus (LGN). To this end, we developed and utilized an automated pipeline comprising a series of Apps that run openly on the cloud computing platform brainlife.io to analyse 139 subjects of the Human Connectome Project (HCP). We observe a continuous and extended bundle of white matter fibers from which two subcomponents can be extracted: one passing ventrally parallel to the optic radiations (OR) and another passing dorsally circumventing the lateral ventricle. Interestingly, the loop travelling dorsally connects the thalamus with the central visual field representation of prostriata located anteriorly, while the other loop travelling more ventrally connects the LGN with the more peripheral visual field representation located posteriorly. We then analyse an additional cohort of 10 HCP subjects using a manual plane extraction method outside brainlife.io to study the relationship between the two extracted white matter subcomponents and eccentricity, myelin and cortical thickness gradients within prostriata. Our results are consistent with a retinotopic segregation recently demonstrated in the OR, connecting the LGN and V1 in humans and reveal for the first time a retinotopic segregation regarding the trajectory of a fiber bundle between the thalamus and an associative visual area.}, } @article {pmid32525944, year = {2020}, author = {Alnajrani, HM and Norman, AA and Ahmed, BH}, title = {Privacy and data protection in mobile cloud computing: A systematic mapping study.}, journal = {PloS one}, volume = {15}, number = {6}, pages = {e0234312}, pmid = {32525944}, issn = {1932-6203}, mesh = {Cell Phone ; *Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; *Mobile Applications ; *Privacy ; }, abstract = {As a result of a shift in the world of technology, the combination of ubiquitous mobile networks and cloud computing produced the mobile cloud computing (MCC) domain. As a consequence of a major concern of cloud users, privacy and data protection are getting substantial attention in the field. Currently, a considerable number of papers have been published on MCC with a growing interest in privacy and data protection. Along with this advance in MCC, however, no specific investigation highlights the results of the existing studies in privacy and data protection. In addition, there are no particular exploration highlights trends and open issues in the domain. Accordingly, the objective of this paper is to highlight the results of existing primary studies published in privacy and data protection in MCC to identify current trends and open issues. In this investigation, a systematic mapping study was conducted with a set of six research questions. A total of 1711 studies published from 2009 to 2019 were obtained. Following a filtering process, a collection of 74 primary studies were selected. As a result, the present data privacy threats, attacks, and solutions were identified. Also, the ongoing trends of data privacy exercise were observed. Moreover, the most utilized measures, research type, and contribution type facets were emphasized. Additionally, the current open research issues in privacy and data protection in MCC were highlighted. Furthermore, the results demonstrate the current state-of-the-art of privacy and data protection in MCC, and the conclusion will help to identify research trends and open issues in MCC for researchers and offer useful information in MCC for practitioners.}, } @article {pmid32511322, year = {2021}, author = {Getz, M and Wang, Y and An, G and Asthana, M and Becker, A and Cockrell, C and Collier, N and Craig, M and Davis, CL and Faeder, JR and Ford Versypt, AN and Mapder, T and Gianlupi, JF and Glazier, JA and Hamis, S and Heiland, R and Hillen, T and Hou, D and Islam, MA and Jenner, AL and Kurtoglu, F and Larkin, CI and Liu, B and Macfarlane, F and Maygrundter, P and Morel, PA and Narayanan, A and Ozik, J and Pienaar, E and Rangamani, P and Saglam, AS and Shoemaker, JE and Smith, AM and Weaver, JJA and Macklin, P}, title = {Iterative community-driven development of a SARS-CoV-2 tissue simulator.}, journal = {bioRxiv : the preprint server for biology}, volume = {}, number = {}, pages = {}, doi = {10.1101/2020.04.02.019075}, pmid = {32511322}, support = {P41 GM103712/GM/NIGMS NIH HHS/United States ; R01 AI139088/AI/NIAID NIH HHS/United States ; R35 GM133763/GM/NIGMS NIH HHS/United States ; }, abstract = {The 2019 novel coronavirus, SARS-CoV-2, is a pathogen of critical significance to international public health. Knowledge of the interplay between molecular-scale virus-receptor interactions, single-cell viral replication, intracellular-scale viral transport, and emergent tissue-scale viral propagation is limited. Moreover, little is known about immune system-virus-tissue interactions and how these can result in low-level (asymptomatic) infections in some cases and acute respiratory distress syndrome (ARDS) in others, particularly with respect to presentation in different age groups or pre-existing inflammatory risk factors. Given the nonlinear interactions within and among each of these processes, multiscale simulation models can shed light on the emergent dynamics that lead to divergent outcomes, identify actionable "choke points" for pharmacologic interventions, screen potential therapies, and identify potential biomarkers that differentiate patient outcomes. Given the complexity of the problem and the acute need for an actionable model to guide therapy discovery and optimization, we introduce and iteratively refine a prototype of a multiscale model of SARS-CoV-2 dynamics in lung tissue. The first prototype model was built and shared internationally as open source code and an online interactive model in under 12 hours, and community domain expertise is driving regular refinements. In a sustained community effort, this consortium is integrating data and expertise across virology, immunology, mathematical biology, quantitative systems physiology, cloud and high performance computing, and other domains to accelerate our response to this critical threat to international health. More broadly, this effort is creating a reusable, modular framework for studying viral replication and immune response in tissues, which can also potentially be adapted to related problems in immunology and immunotherapy.}, } @article {pmid32509260, year = {2020}, author = {Khan, F and Khan, MA and Abbas, S and Athar, A and Siddiqui, SY and Khan, AH and Saeed, MA and Hussain, M}, title = {Cloud-Based Breast Cancer Prediction Empowered with Soft Computing Approaches.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {8017496}, pmid = {32509260}, issn = {2040-2309}, mesh = {Breast/*diagnostic imaging ; Breast Neoplasms/*diagnosis ; *Cloud Computing/statistics & numerical data ; *Diagnosis, Computer-Assisted/statistics & numerical data ; Early Detection of Cancer ; Expert Systems ; Female ; Humans ; Support Vector Machine ; }, abstract = {The developing countries are still starving for the betterment of health sector. The disease commonly found among the women is breast cancer, and past researches have proven results that if the cancer is detected at a very early stage, the chances to overcome the disease are higher than the disease treated or detected at a later stage. This article proposed cloud-based intelligent BCP-T1F-SVM with 2 variations/models like BCP-T1F and BCP-SVM. The proposed BCP-T1F-SVM system has employed two main soft computing algorithms. The proposed BCP-T1F-SVM expert system specifically defines the stage and the type of cancer a person is suffering from. Expert system will elaborate the grievous stages of the cancer, to which extent a patient has suffered. The proposed BCP-SVM gives the higher precision of the proposed breast cancer detection model. In the limelight of breast cancer, the proposed BCP-T1F-SVM expert system gives out the higher precision rate. The proposed BCP-T1F expert system is being employed in the diagnosis of breast cancer at an initial stage. Taking different stages of cancer into account, breast cancer is being dealt by BCP-T1F expert system. The calculations and the evaluation done in this research have revealed that BCP-SVM is better than BCP-T1F. The BCP-T1F concludes out the 96.56 percentage accuracy, whereas the BCP-SVM gives accuracy of 97.06 percentage. The above unleashed research is wrapped up with the conclusion that BCP-SVM is better than the BCP-T1F. The opinions have been recommended by the medical expertise of Sheikh Zayed Hospital Lahore, Pakistan, and Cavan General Hospital, Lisdaran, Cavan, Ireland.}, } @article {pmid32504192, year = {2021}, author = {Soriano-Valdez, D and Pelaez-Ballestas, I and Manrique de Lara, A and Gastelum-Strozzi, A}, title = {The basics of data, big data, and machine learning in clinical practice.}, journal = {Clinical rheumatology}, volume = {40}, number = {1}, pages = {11-23}, pmid = {32504192}, issn = {1434-9949}, mesh = {*Big Data ; Delivery of Health Care ; Humans ; Machine Learning ; *Medical Informatics ; Software ; }, abstract = {Health informatics and biomedical computing have introduced the use of computer methods to analyze clinical information and provide tools to assist clinicians during the diagnosis and treatment of diverse clinical conditions. With the amount of information that can be obtained in the healthcare setting, new methods to acquire, organize, and analyze the data are being developed each day, including new applications in the world of big data and machine learning. In this review, first we present the most basic concepts in data science, including the structural hierarchy of information and how it is managed. A section is dedicated to discussing topics relevant to the acquisition of data, importantly the availability and use of online resources such as survey software and cloud computing services. Along with digital datasets, these tools make it possible to create more diverse models and facilitate collaboration. After, we describe concepts and techniques in machine learning used to process and analyze health data, especially those most widely applied in rheumatology. Overall, the objective of this review is to aid in the comprehension of how data science is used in health, with a special emphasis on the relevance to the field of rheumatology. It provides clinicians with basic tools on how to approach and understand new trends in health informatics analysis currently being used in rheumatology practice. If clinicians understand the potential use and limitations of health informatics, this will facilitate interdisciplinary conversations and continued projects relating to data, big data, and machine learning.}, } @article {pmid32500999, year = {2020}, author = {Zhang, C and Liu, L and Zhou, L and Yin, X and Wei, X and Hu, Y and Liu, Y and Chen, S and Wang, J and Wang, ZL}, title = {Self-Powered Sensor for Quantifying Ocean Surface Water Waves Based on Triboelectric Nanogenerator.}, journal = {ACS nano}, volume = {14}, number = {6}, pages = {7092-7100}, doi = {10.1021/acsnano.0c01827}, pmid = {32500999}, issn = {1936-086X}, abstract = {An ocean wave contains various marine information, but it is generally difficult to obtain the high-precision quantification to meet the needs of ocean development and utilization. Here, we report a self-powered and high-performance triboelectric ocean-wave spectrum sensor (TOSS) fabricated using a tubular triboelectric nanogenerator (TENG) and hollow ball buoy, which not only can adapt to the measurement of ocean surface water waves in any direction but also can eliminate the influence of seawater on the performance of the sensor. Based on the high-sensitivity advantage of TENG, an ultrahigh sensitivity of 2530 mV mm[-1] (which is 100 times higher than that of previous work) and a minimal monitoring error of 0.1% are achieved in monitoring wave height and wave period, respectively. Importantly, six basic ocean-wave parameters (wave height, wave period, wave frequency, wave velocity, wavelength, and wave steepness), wave velocity spectrum, and mechanical energy spectrum have been derived by the electrical signals of TOSS. Our finding not only can provide ocean-wave parameters but also can offer significant and accurate data support for cloud computing of ocean big data.}, } @article {pmid32498594, year = {2020}, author = {Wang, L and Alexander, CA}, title = {Big data analytics in medical engineering and healthcare: methods, advances and challenges.}, journal = {Journal of medical engineering & technology}, volume = {44}, number = {6}, pages = {267-283}, doi = {10.1080/03091902.2020.1769758}, pmid = {32498594}, issn = {1464-522X}, mesh = {*Big Data ; *Biomedical Engineering ; *Delivery of Health Care ; Humans ; }, abstract = {Big data analytics are gaining popularity in medical engineering and healthcare use cases. Stakeholders are finding big data analytics reduce medical costs and personalise medical services for each individual patient. Big data analytics can be used in large-scale genetics studies, public health, personalised and precision medicine, new drug development, etc. The introduction of the types, sources, and features of big data in healthcare as well as the applications and benefits of big data and big data analytics in healthcare is key to understanding healthcare big data and will be discussed in this article. Major methods, platforms and tools of big data analytics in medical engineering and healthcare are also presented. Advances and technology progress of big data analytics in healthcare are introduced, which includes artificial intelligence (AI) with big data, infrastructure and cloud computing, advanced computation and data processing, privacy and cybersecurity, health economic outcomes and technology management, and smart healthcare with sensing, wearable devices and Internet of things (IoT). Current challenges of dealing with big data and big data analytics in medical engineering and healthcare as well as future work are also presented.}, } @article {pmid32490091, year = {2020}, author = {Corbane, C and Politis, P and Kempeneers, P and Simonetti, D and Soille, P and Burger, A and Pesaresi, M and Sabo, F and Syrris, V and Kemper, T}, title = {A global cloud free pixel- based image composite from Sentinel-2 data.}, journal = {Data in brief}, volume = {31}, number = {}, pages = {105737}, pmid = {32490091}, issn = {2352-3409}, abstract = {Large-scale land cover classification from satellite imagery is still a challenge due to the big volume of data to be processed, to persistent cloud-cover in cloud-prone areas as well as seasonal artefacts that affect spatial homogeneity. Sentinel-2 times series from Copernicus Earth Observation program offer a great potential for fine scale land cover mapping thanks to high spatial and temporal resolutions, with a decametric resolution and five-day repeat time. However, the selection of best available scenes, their download together with the requirements in terms of storage and computing resources pose restrictions for large-scale land cover mapping. The dataset presented in this paper corresponds to global cloud-free pixel based composite created from the Sentinel-2 data archive (Level L1C) available in Google Earth Engine for the period January 2017- December 2018. The methodology used for generating the image composite is described and the metadata associated with the 10 m resolution dataset is presented. The data with a total volume of 15 TB is stored on the Big Data platform of the Joint Research Centre. It can be downloaded per UTM grid zone, loaded into GIS clients and displayed easily thanks to pre-computed overviews.}, } @article {pmid32486383, year = {2020}, author = {Abd-El-Atty, B and Iliyasu, AM and Alaskar, H and Abd El-Latif, AA}, title = {A Robust Quasi-Quantum Walks-Based Steganography Protocol for Secure Transmission of Images on Cloud-Based E-healthcare Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {11}, pages = {}, pmid = {32486383}, issn = {1424-8220}, support = {Advanced Computational Intelligence & Intelligent Systems Engineering (ACIISE) Research Group Project Number 2019/01/9862//Prince Sattam bin Abdulaziz University/ ; }, mesh = {*Algorithms ; *Cloud Computing ; *Computer Security ; *Image Processing, Computer-Assisted ; Quantum Theory ; *Telemedicine ; }, abstract = {Traditionally, tamper-proof steganography involves using efficient protocols to encrypt the stego cover image and/or hidden message prior to embedding it into the carrier object. However, as the inevitable transition to the quantum computing paradigm beckons, its immense computing power will be exploited to violate even the best non-quantum, i.e., classical, stego protocol. On its part, quantum walks can be tailored to utilise their astounding 'quantumness' to propagate nonlinear chaotic behaviours as well as its sufficient sensitivity to alterations in primary key parameters both important properties for efficient information security. Our study explores using a classical (i.e., quantum-inspired) rendition of the controlled alternate quantum walks (i.e., CAQWs) model to fabricate a robust image steganography protocol for cloud-based E-healthcare platforms by locating content that overlays the secret (or hidden) bits. The design employed in our technique precludes the need for pre and/or post encryption of the carrier and secret images. Furthermore, our design simplifies the process to extract the confidential (hidden) information since only the stego image and primary states to run the CAQWs are required. We validate our proposed protocol on a dataset of medical images, which exhibited remarkable outcomes in terms of their security, good visual quality, high resistance to data loss attacks, high embedding capacity, etc., making the proposed scheme a veritable strategy for efficient medical image steganography.}, } @article {pmid32485943, year = {2020}, author = {Silva, FSD and Silva, E and Neto, EP and Lemos, M and Neto, AJV and Esposito, F}, title = {A Taxonomy of DDoS Attack Mitigation Approaches Featured by SDN Technologies in IoT Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {11}, pages = {}, pmid = {32485943}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) has attracted much attention from the Information and Communication Technology (ICT) community in recent years. One of the main reasons for this is the availability of techniques provided by this paradigm, such as environmental monitoring employing user data and everyday objects. The facilities provided by the IoT infrastructure allow the development of a wide range of new business models and applications (e.g., smart homes, smart cities, or e-health). However, there are still concerns over the security measures which need to be addressed to ensure a suitable deployment. Distributed Denial of Service (DDoS) attacks are among the most severe virtual threats at present and occur prominently in this scenario, which can be mainly owed to their ease of execution. In light of this, several research studies have been conducted to find new strategies as well as improve existing techniques and solutions. The use of emerging technologies such as those based on the Software-Defined Networking (SDN) paradigm has proved to be a promising alternative as a means of mitigating DDoS attacks. However, the high granularity that characterizes the IoT scenarios and the wide range of techniques explored during the DDoS attacks make the task of finding and implementing new solutions quite challenging. This problem is exacerbated by the lack of benchmarks that can assist developers when designing new solutions for mitigating DDoS attacks for increasingly complex IoT scenarios. To fill this knowledge gap, in this study we carry out an in-depth investigation of the state-of-the-art and create a taxonomy that describes and characterizes existing solutions and highlights their main limitations. Our taxonomy provides a comprehensive view of the reasons for the deployment of the solutions, and the scenario in which they operate. The results of this study demonstrate the main benefits and drawbacks of each solution set when applied to specific scenarios by examining current trends and future perspectives, for example, the adoption of emerging technologies based on Cloud and Edge (or Fog) Computing.}, } @article {pmid32479601, year = {2020}, author = {Mohanraj, S and Díaz-Mejía, JJ and Pham, MD and Elrick, H and Husić, M and Rashid, S and Luo, P and Bal, P and Lu, K and Patel, S and Mahalanabis, A and Naidas, A and Christensen, E and Croucher, D and Richards, LM and Shooshtari, P and Brudno, M and Ramani, AK and Pugh, TJ}, title = {CReSCENT: CanceR Single Cell ExpressioN Toolkit.}, journal = {Nucleic acids research}, volume = {48}, number = {W1}, pages = {W372-W379}, pmid = {32479601}, issn = {1362-4962}, mesh = {Humans ; Neoplasms/*genetics/immunology ; RNA-Seq/*methods ; Single-Cell Analysis/*methods ; *Software ; T-Lymphocytes/metabolism ; }, abstract = {CReSCENT: CanceR Single Cell ExpressioN Toolkit (https://crescent.cloud), is an intuitive and scalable web portal incorporating a containerized pipeline execution engine for standardized analysis of single-cell RNA sequencing (scRNA-seq) data. While scRNA-seq data for tumour specimens are readily generated, subsequent analysis requires high-performance computing infrastructure and user expertise to build analysis pipelines and tailor interpretation for cancer biology. CReSCENT uses public data sets and preconfigured pipelines that are accessible to computational biology non-experts and are user-editable to allow optimization, comparison, and reanalysis for specific experiments. Users can also upload their own scRNA-seq data for analysis and results can be kept private or shared with other users.}, } @article {pmid32479411, year = {2020}, author = {Ye, Q and Zhou, J and Wu, H}, title = {Using Information Technology to Manage the COVID-19 Pandemic: Development of a Technical Framework Based on Practical Experience in China.}, journal = {JMIR medical informatics}, volume = {8}, number = {6}, pages = {e19515}, pmid = {32479411}, issn = {2291-9694}, abstract = {BACKGROUND: The coronavirus disease (COVID-19) epidemic poses an enormous challenge to the global health system, and governments have taken active preventive and control measures. The health informatics community in China has actively taken action to leverage health information technologies for epidemic monitoring, detection, early warning, prevention and control, and other tasks.

OBJECTIVE: The aim of this study was to develop a technical framework to respond to the COVID-19 epidemic from a health informatics perspective.

METHODS: In this study, we collected health information technology-related information to understand the actions taken by the health informatics community in China during the COVID-19 outbreak and developed a health information technology framework for epidemic response based on health information technology-related measures and methods.

RESULTS: Based on the framework, we review specific health information technology practices for managing the outbreak in China, describe the highlights of their application in detail, and discuss critical issues to consider when using health information technology. Technologies employed include mobile and web-based services such as Internet hospitals and Wechat, big data analyses (including digital contact tracing through QR codes or epidemic prediction), cloud computing, Internet of things, Artificial Intelligence (including the use of drones, robots, and intelligent diagnoses), 5G telemedicine, and clinical information systems to facilitate clinical management for COVID-19.

CONCLUSIONS: Practical experience in China shows that health information technologies play a pivotal role in responding to the COVID-19 epidemic.}, } @article {pmid32479340, year = {2020}, author = {Li, NS and Chen, YT and Hsu, YP and Pang, HH and Huang, CY and Shiue, YL and Wei, KC and Yang, HW}, title = {Mobile healthcare system based on the combination of a lateral flow pad and smartphone for rapid detection of uric acid in whole blood.}, journal = {Biosensors & bioelectronics}, volume = {164}, number = {}, pages = {112309}, doi = {10.1016/j.bios.2020.112309}, pmid = {32479340}, issn = {1873-4235}, mesh = {*Biosensing Techniques ; *Delivery of Health Care ; *Gout/diagnosis ; Humans ; *Smartphone ; Uric Acid/blood ; }, abstract = {Excessive production of uric acid (UA) in blood may lead to gout, hyperuricaemia and kidney disorder; thus, a fast, simple and reliable biosensor is needed to routinely determine the UA concentration in blood without pretreatment. The purpose of this study was to develop a mobile healthcare (mHealth) system using a drop of blood, which comprised a lateral flow pad (LFP), mesoporous Prussian blue nanoparticles (MPBs) as artificial nanozymes and auto-calculation software for on-site determination of UA in blood and data management. A standard curve was found to be linear in the range of 1.5-8.5 mg/dL UA, and convenience, cloud computing and personal information management were simultaneously achieved for the proposed mHealth system. Our mHealth system appropriately met the requirements of application in patients' homes, with the potential of real-time monitoring by their primary care physicians (PCPs).}, } @article {pmid32477655, year = {2020}, author = {Lee, V and Parekh, K and Matthew, G and Shi, Q and Pelletier, K and Canale, A and Luzuriaga, K and Mathew, J}, title = {JITA: A Platform for Enabling Real Time Point-of-Care Patient Recruitment.}, journal = {AMIA Joint Summits on Translational Science proceedings. AMIA Joint Summits on Translational Science}, volume = {2020}, number = {}, pages = {355-359}, pmid = {32477655}, issn = {2153-4063}, support = {UL1 TR000161/TR/NCATS NIH HHS/United States ; }, abstract = {Timely accrual continues to be a challenge in clinical trials. The evolution of Electronic Health Record systems and cohort selection tools like i2b2 have improved identification of potential candidate participants. However, delays in receiving relevant patient information and lack of real time patient identification cause difficulty in meeting recruitment targets. The authors have designed and developed a proof of concept platform that informs authorized study team members about potential participant matches while the patient is at a healthcare setting. This Just-In-Time Alert (JITA) application leverages Health Level 7 (HL7) messages and parses them against study eligibility criteria using Amazon Web Services (AWS) cloud technologies. When required conditions are satisfied, the rules engine triggers an alert to the study team. Our pilot tests using difficult to recruit trials currently underway at the UMass Medical School have shown significant potential by generating more than 90 patient alerts in a 90-day testing timeframe.}, } @article {pmid32473441, year = {2020}, author = {Liu, A and Wu, Q and Cheng, X}, title = {Using the Google Earth Engine to estimate a 10 m resolution monthly inventory of soil fugitive dust emissions in Beijing, China.}, journal = {The Science of the total environment}, volume = {735}, number = {}, pages = {139174}, doi = {10.1016/j.scitotenv.2020.139174}, pmid = {32473441}, issn = {1879-1026}, abstract = {Soil fugitive dust (SFD) is an important contributor to ambient particulate matter (PM), but most current SFD emission inventories are updated slowly or have low resolution. In areas where vegetation coverage and climatic conditions undergo significant seasonal changes, the classic wind erosion equation (WEQ) tends to underestimate SFD emissions, increasing the need for higher spatiotemporal data resolution. Continuous acquisition of precise bare soil maps is the key barrier to compiling monthly high-resolution SFD emission inventories. In this study, we proposed taking advantage of the massive Landsat and Sentinel-2 imagery data sets stored in the Google Earth Engine (GEE) cloud platform to enable the rapid production of bare soil maps with spatial resolutions of up to 10 m. The resulting improved spatiotemporal resolution of wind erosion parameters allowed us to estimate SFD emissions in Beijing as being ~5-7 times the level calculated by the WEQ. Spring and winter accounted for >85% of SFD emissions, while April was the dustiest month with SFD emissions of PM10 exceeding 11,000 t. Our results highlighted the role of SFD in air pollution during winter and spring in northern China, and suggested that GEE should be further used for image acquisition, data processing, and compilation of gridded SFD inventories. These inventories can help identify the location and intensity of SFD sources while providing supporting information for local authorities working to develop targeted mitigation measures.}, } @article {pmid32467813, year = {2020}, author = {Massaad, E and Cherfan, P}, title = {Social Media Data Analytics on Telehealth During the COVID-19 Pandemic.}, journal = {Cureus}, volume = {12}, number = {4}, pages = {e7838}, pmid = {32467813}, issn = {2168-8184}, abstract = {INTRODUCTION: Physical distancing during the coronavirus Covid-19 pandemic has brought telehealth to the forefront to keep up with patient care amidst an international crisis that is exhausting healthcare resources. Understanding and managing health-related concerns resulting from physical distancing measures are of utmost importance.

OBJECTIVES: To describe and analyze the volume, content, and geospatial distribution of tweets associated with telehealth during the Covid-19 pandemic.

METHODS: We inquired Twitter public data to access tweets related to telehealth from March 30, 2020 to April 6, 2020. We analyzed tweets using natural language processing (NLP) and unsupervised learning methods. Clustering analysis was performed to classify tweets. Geographic tweet distribution was correlated with Covid-19 confirmed cases in the United States. All analyses were carried on the Google Cloud computing service "Google Colab" using Python libraries (Python Software Foundation).

RESULTS: A total of 41,329 tweets containing the term "telehealth" were retrieved. The most common terms appearing alongside 'telehealth' were "covid", "health", "care", "services", "patients", and "pandemic". Mental health was the most common health-related topic that appeared in our search reflecting a high need for mental healthcare during the pandemic. Similarly, Medicare was the most common appearing health plan mirroring the accelerated access to telehealth and change in coverage policies. The geographic distribution of tweets related to telehealth and having a specific location within the United States (n=19,367) was significantly associated with the number of confirmed Covid-19 cases reported in each state (p<0.001).

CONCLUSION: Social media activity is an accurate reflection of disease burden during the Covid-19 pandemic. Widespread adoption of telehealth-favoring policies is necessary and mostly needed to address mental health problems that may arise in areas of high infection and death rates.}, } @article {pmid32466770, year = {2020}, author = {Tian, L and Li, Y and Edmonson, MN and Zhou, X and Newman, S and McLeod, C and Thrasher, A and Liu, Y and Tang, B and Rusch, MC and Easton, J and Ma, J and Davis, E and Trull, A and Michael, JR and Szlachta, K and Mullighan, C and Baker, SJ and Downing, JR and Ellison, DW and Zhang, J}, title = {CICERO: a versatile method for detecting complex and diverse driver fusions using cancer RNA sequencing data.}, journal = {Genome biology}, volume = {21}, number = {1}, pages = {126}, pmid = {32466770}, issn = {1474-760X}, support = {P01 CA096832/CA/NCI NIH HHS/United States ; P01CA09683//Foundation for the National Institutes of Health (US)/International ; R01CA216391//Foundation for the National Institutes of Health/International ; P30CA021765//Foundation for the National Institutes of Health/International ; }, mesh = {Algorithms ; *Gene Fusion ; Humans ; Molecular Sequence Annotation/*methods ; Neoplasms/*genetics ; Sequence Analysis, RNA ; *Software ; }, abstract = {To discover driver fusions beyond canonical exon-to-exon chimeric transcripts, we develop CICERO, a local assembly-based algorithm that integrates RNA-seq read support with extensive annotation for candidate ranking. CICERO outperforms commonly used methods, achieving a 95% detection rate for 184 independently validated driver fusions including internal tandem duplications and other non-canonical events in 170 pediatric cancer transcriptomes. Re-analysis of TCGA glioblastoma RNA-seq unveils previously unreported kinase fusions (KLHL7-BRAF) and a 13% prevalence of EGFR C-terminal truncation. Accessible via standard or cloud-based implementation, CICERO enhances driver fusion detection for research and precision oncology. The CICERO source code is available at https://github.com/stjude/Cicero.}, } @article {pmid32462884, year = {2020}, author = {Zarowitz, BJ}, title = {Emerging Pharmacotherapy and Health Care Needs of Patients in the Age of Artificial Intelligence and Digitalization.}, journal = {The Annals of pharmacotherapy}, volume = {54}, number = {10}, pages = {1038-1046}, doi = {10.1177/1060028020919383}, pmid = {32462884}, issn = {1542-6270}, mesh = {Aged ; Aged, 80 and over ; *Artificial Intelligence ; Decision Support Systems, Clinical ; Delivery of Health Care/*methods ; *Digital Technology ; Drug Therapy/*methods ; Female ; Humans ; Male ; Telemedicine/*methods ; }, abstract = {Advances in the application of artificial intelligence, digitization, technology, iCloud computing, and wearable devices in health care predict an exciting future for health care professionals and our patients. Projections suggest an older, generally healthier, better-informed but financially less secure patient population of wider cultural and ethnic diversity that live throughout the United States. A pragmatic yet structured approach is recommended to prepare health care professionals and patients for emerging pharmacotherapy needs. Clinician training should include genomics, cloud computing, use of large data sets, implementation science, and cultural competence. Patients will need support for wearable devices and reassurance regarding digital medicine.}, } @article {pmid32459811, year = {2020}, author = {Cheng, C and Zhou, H and Chai, X and Li, Y and Wang, D and Ji, Y and Niu, S and Hou, Y}, title = {Adoption of image surface parameters under moving edge computing in the construction of mountain fire warning method.}, journal = {PloS one}, volume = {15}, number = {5}, pages = {e0232433}, pmid = {32459811}, issn = {1932-6203}, mesh = {*Algorithms ; China ; Cloud Computing ; Computer Systems ; Conservation of Natural Resources/methods/statistics & numerical data ; Discriminant Analysis ; Geological Phenomena ; Humans ; Image Processing, Computer-Assisted/*methods/statistics & numerical data ; Software ; Surface Properties ; Wildfires/*prevention & control/statistics & numerical data ; }, abstract = {In order to cope with the problems of high frequency and multiple causes of mountain fires, it is very important to adopt appropriate technologies to monitor and warn mountain fires through a few surface parameters. At the same time, the existing mobile terminal equipment is insufficient in image processing and storage capacity, and the energy consumption is high in the data transmission process, which requires calculation unloading. For this circumstance, first, a hierarchical discriminant analysis algorithm based on image feature extraction is introduced, and the image acquisition software in the mobile edge computing environment in the android system is designed and installed. Based on the remote sensing data, the land surface parameters of mountain fire are obtained, and the application of image recognition optimization algorithm in the mobile edge computing (MEC) environment is realized to solve the problem of transmission delay caused by traditional mobile cloud computing (MCC). Then, according to the forest fire sensitivity index, a forest fire early warning model based on MEC is designed. Finally, the image recognition response time and bandwidth consumption of the algorithm are studied, and the occurrence probability of mountain fire in Muli county, Liangshan prefecture, Sichuan is predicted. The results show that, compared with the MCC architecture, the algorithm presented in this study has shorter recognition and response time to different images in WiFi network environment; compared with MCC, MEC architecture can identify close users and transmit less data, which can effectively reduce the bandwidth pressure of the network. In most areas of Muli county, Liangshan prefecture, the probability of mountain fire is relatively low, the probability of mountain fire caused by non-surface environment is about 8 times that of the surface environment, and the influence of non-surface environment in the period of high incidence of mountain fire is lower than that in the period of low incidence. In conclusion, the surface parameters of MEC can be used to effectively predict the mountain fire and provide preventive measures in time.}, } @article {pmid32457555, year = {2019}, author = {Hylton, A and Henselman-Petrusek, G and Sang, J and Short, R}, title = {Tuning the Performance of a Computational Persistent Homology Package.}, journal = {Software: practice & experience}, volume = {49}, number = {5}, pages = {885-905}, pmid = {32457555}, issn = {0038-0644}, support = {ARMD_629660/ImNASA/Intramural NASA/United States ; }, abstract = {In recent years, persistent homology has become an attractive method for data analysis. It captures topological features, such as connected components, holes, and voids from point cloud data and summarizes the way in which these features appear and disappear in a filtration sequence. In this project, we focus on improving the performance of Eirene, a computational package for persistent homology. Eirene is a 5000-line open-source software library implemented in the dynamic programming language Julia. We use the Julia profiling tools to identify performance bottlenecks and develop novel methods to manage them, including the parallelization of some time-consuming functions on multicore/manycore hardware. Empirical results show that performance can be greatly improved.}, } @article {pmid32455635, year = {2020}, author = {Kim, M and Yu, S and Lee, J and Park, Y and Park, Y}, title = {Design of Secure Protocol for Cloud-Assisted Electronic Health Record System Using Blockchain.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {10}, pages = {}, pmid = {32455635}, issn = {1424-8220}, mesh = {*Blockchain ; *Cloud Computing ; Computer Security ; Computer Systems ; Confidentiality ; *Electronic Health Records ; Humans ; Technology ; }, abstract = {In the traditional electronic health record (EHR) management system, each medical service center manages their own health records, respectively, which are difficult to share on the different medical platforms. Recently, blockchain technology is one of the popular alternatives to enable medical service centers based on different platforms to share EHRs. However, it is hard to store whole EHR data in blockchain because of the size and the price of blockchain. To resolve this problem, cloud computing is considered as a promising solution. Cloud computing offers advantageous properties such as storage availability and scalability. Unfortunately, the EHR system with cloud computing can be vulnerable to various attacks because the sensitive data is sent over a public channel. We propose the secure protocol for cloud-assisted EHR system using blockchain. In the proposed scheme, blockchain technology is used to provide data integrity and access control using log transactions and the cloud server stores and manages the patient's EHRs to provide secure storage resources. We use an elliptic curve cryptosystems (ECC) to provide secure health data sharing with cloud computing. We demonstrate that the proposed EHR system can prevent various attacks by using informal security analysis and automated validation of internet security protocols and applications (AVISPA) simulation. Furthermore, we prove that the proposed EHR system provides secure mutual authentication using BAN logic analysis. We then compare the computation overhead, communication overhead, and security properties with existing schemes. Consequently, the proposed EHR system is suitable for the practical healthcare system considering security and efficiency.}, } @article {pmid32442274, year = {2020}, author = {Xu, Y and Yang-Turner, F and Volk, D and Crook, D}, title = {NanoSPC: a scalable, portable, cloud compatible viral nanopore metagenomic data processing pipeline.}, journal = {Nucleic acids research}, volume = {48}, number = {W1}, pages = {W366-W371}, pmid = {32442274}, issn = {1362-4962}, support = {/DH_/Department of Health/United Kingdom ; }, mesh = {Bacteria/genetics/isolation & purification ; Cloud Computing ; *Genome, Viral ; Metagenomics/*methods ; Nanopore Sequencing/*methods ; *Software ; Viruses/*genetics/isolation & purification ; }, abstract = {Metagenomic sequencing combined with Oxford Nanopore Technology has the potential to become a point-of-care test for infectious disease in public health and clinical settings, providing rapid diagnosis of infection, guiding individual patient management and treatment strategies, and informing infection prevention and control practices. However, publicly available, streamlined, and reproducible pipelines for analyzing Nanopore metagenomic sequencing data are still lacking. Here we introduce NanoSPC, a scalable, portable and cloud compatible pipeline for analyzing Nanopore sequencing data. NanoSPC can identify potentially pathogenic viruses and bacteria simultaneously to provide comprehensive characterization of individual samples. The pipeline can also detect single nucleotide variants and assemble high quality complete consensus genome sequences, permitting high-resolution inference of transmission. We implement NanoSPC using Nextflow manager within Docker images to allow reproducibility and portability of the analysis. Moreover, we deploy NanoSPC to our scalable pathogen pipeline platform, enabling elastic computing for high throughput Nanopore data on HPC cluster as well as multiple cloud platforms, such as Google Cloud, Amazon Elastic Computing Cloud, Microsoft Azure and OpenStack. Users could either access our web interface (https://nanospc.mmmoxford.uk) to run cloud-based analysis, monitor process, and visualize results, as well as download Docker images and run command line to analyse data locally.}, } @article {pmid32406827, year = {2020}, author = {Maeser, R}, title = {Analyzing CSP Trustworthiness and Predicting Cloud Service Performance.}, journal = {IEEE computer graphics and applications}, volume = {}, number = {}, pages = {}, doi = {10.1109/OJCS.2020.2994095}, pmid = {32406827}, issn = {1558-1756}, abstract = {Analytics firm Cyence estimated Amazon's four-hour cloud computing outage in 2017 "cost S&P 500 companies at least $150 million" and traffic monitoring firm Apica claimed "54 of the top 100 online retailers saw site performance slump by at least 20 percent". According to Ponemon, 2015 data center outages cost Fortune 1000 companies between $1.25 and $2.5 billion. Despite potential risks, the cloud computing industry continues to grow. For example, Internet of Things, which is projected to grow 266% between 2013 and 2020, will drive increased demand on cloud computing as data across multiple industries is collected and sent back to cloud data centers for processing. RightScale estimates enterprises will continue to increase cloud demand with 85% having multi-cloud strategies. This growth and dependency will influence risk exposure and potential for impact (e.g. availability, performance, security, financial). The research in this paper and proposed solution calculates cloud service provider (CSP) trustworthiness levels and predicts cloud service and cloud service level agreement (SLA) availability performance. Evolving industry standards (e.g. NIST, ISO/IEC) for cloud SLAs and existing work regarding CSP trustworthiness will be leveraged as regression-based predictive models are constructed to analyze CSP cloud computing services, SLA performance and CSP trustworthiness.}, } @article {pmid32406416, year = {2020}, author = {Greco, L and Percannella, G and Ritrovato, P and Tortorella, F and Vento, M}, title = {Trends in IoT based solutions for health care: Moving AI to the edge.}, journal = {Pattern recognition letters}, volume = {135}, number = {}, pages = {346-353}, pmid = {32406416}, issn = {0167-8655}, abstract = {In recent times, we assist to an ever growing diffusion of smart medical sensors and Internet of things devices that are heavily changing the way healthcare is approached worldwide. In this context, a combination of Cloud and IoT architectures is often exploited to make smart healthcare systems capable of supporting near realtime applications when processing and performing Artificial Intelligence on the huge amount of data produced by wearable sensor networks. Anyway, the response time and the availability of cloud based systems, together with security and privacy, still represent critical issues that prevents Internet of Medical Things (IoMT) devices and architectures from being a reliable and effective solution to the aim. Lately, there is a growing interest towards architectures and approaches that exploit Edge and Fog computing as an answer to compensate the weaknesses of the cloud. In this paper, we propose a short review about the general use of IoT solutions in health care, starting from early health monitoring solutions from wearable sensors up to a discussion about the latest trends in fog/edge computing for smart health.}, } @article {pmid32400988, year = {2020}, author = {Chen, S and Huang, J and Gao, Z}, title = {[Development of Hospital Medical Instrumentation Management System Based on Cloud Computing].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {44}, number = {2}, pages = {141-144}, doi = {10.3969/j.issn.1671-7104.2020.02.010}, pmid = {32400988}, issn = {1671-7104}, mesh = {*Cloud Computing ; *Durable Medical Equipment ; *Materials Management, Hospital ; Software ; }, abstract = {In order to improve the efficiency of medical instrumentation management in hospital, reduce the management cost and save the human cost, this study analyzes the problems in the traditional hospital medical instrumentation management system, and develops a new system based on cloud computing. Through the characters of the SaaS Service Platform, the system improves the flow efficiency of the medical instrumentation in hospital, saves deployment and operating system costs, and improves the people's work efficiency.}, } @article {pmid32399163, year = {2020}, author = {Yu, J and Li, H and Liu, D}, title = {Modified Immune Evolutionary Algorithm for Medical Data Clustering and Feature Extraction under Cloud Computing Environment.}, journal = {Journal of healthcare engineering}, volume = {2020}, number = {}, pages = {1051394}, pmid = {32399163}, issn = {2040-2309}, mesh = {*Algorithms ; Big Data ; *Cloud Computing ; Cluster Analysis ; Data Mining/*methods/statistics & numerical data ; }, abstract = {Medical data have the characteristics of particularity and complexity. Big data clustering plays a significant role in the area of medicine. The traditional clustering algorithms are easily falling into local extreme value. It will generate clustering deviation, and the clustering effect is poor. Therefore, we propose a new medical big data clustering algorithm based on the modified immune evolutionary method under cloud computing environment to overcome the above disadvantages in this paper. Firstly, we analyze the big data structure model under cloud computing environment. Secondly, we give the detailed modified immune evolutionary method to cluster medical data including encoding, constructing fitness function, and selecting genetic operators. Finally, the experiments show that this new approach can improve the accuracy of data classification, reduce the error rate, and improve the performance of data mining and feature extraction for medical data clustering.}, } @article {pmid32397423, year = {2020}, author = {Yangui, S}, title = {A Panorama of Cloud Platforms for IoT Applications Across Industries.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32397423}, issn = {1424-8220}, abstract = {Internet of Things (IoT) applications can play a critical role in business and industry. Industrial IoT (IIoT) refers to the use of IoT technologies in manufacturing. Enabling IIoT applications in cloud environments requires the design of appropriate IIoT Platform as-a-Service (IIoT PaaS) to support and ease their provisioning (i.e., development, deployment and management). This paper critically reviews the IIoT PaaS architectures proposed so far in the relevant literature. It only surveys the architectures that are suitable for IIoT applications provisioning and it excludes regular IoT solutions from its scope. The evaluation is based on a set of well-defined architectural requirements. It also introduces and discusses the future challenges and the research directions. The critical review discusses the PaaS solutions that focus on the whole spectrum of IoT verticals and also the ones dealing with specific IoT verticals. Existing limitations are identified and hints are provided on how to tackle them. As critical research directions, the mechanisms that enable the secure provisioning, and IIoT PaaS interaction with virtualized IoT Infrastructure as-a-Service (IaaS) and fog computing layer are discussed.}, } @article {pmid32396074, year = {2020}, author = {Wu, Q and He, K and Chen, X}, title = {Personalized Federated Learning for Intelligent IoT Applications: A Cloud-Edge based Framework.}, journal = {IEEE computer graphics and applications}, volume = {}, number = {}, pages = {}, doi = {10.1109/OJCS.2020.2993259}, pmid = {32396074}, issn = {1558-1756}, abstract = {Internet of Things (IoT) have widely penetrated in different aspects of modern life and many intelligent IoT services and applications are emerging. Recently, federated learning is proposed to train a globally shared model by exploiting a massive amount of user-generated data samples on IoT devices while preventing data leakage. However, the device, statistical and model heterogeneities inherent in the complex IoT environments pose great challenges to traditional federated learning, making it unsuitable to be directly deployed. In this paper we advocate a personalized federated learning framework in a cloud-edge architecture for intelligent IoT applications. To cope with the heterogeneity issues in IoT environments, we investigate emerging personalized federated learning methods which are able to mitigate the negative effects caused by heterogeneities in different aspects. With the power of edge computing, the requirements for fast-processing capacity and low latency in intelligent IoT applications can also be achieved. We finally provide a case study of IoT based human activity recognition to demonstrate the effectiveness of personalized federated learning for intelligent IoT applications.}, } @article {pmid32382696, year = {2018}, author = {Ahmed, AE and Mpangase, PT and Panji, S and Baichoo, S and Souilmi, Y and Fadlelmola, FM and Alghali, M and Aron, S and Bendou, H and De Beste, E and Mbiyavanga, M and Souiai, O and Yi, L and Zermeno, J and Armstrong, D and O'Connor, BD and Mainzer, LS and Crusoe, MR and Meintjes, A and Van Heusden, P and Botha, G and Joubert, F and Jongeneel, CV and Hazelhurst, S and Mulder, N}, title = {Organizing and running bioinformatics hackathons within Africa: The H3ABioNet cloud computing experience.}, journal = {AAS open research}, volume = {1}, number = {}, pages = {9}, pmid = {32382696}, issn = {2515-9321}, support = {U24 HG006941/HG/NHGRI NIH HHS/United States ; U41 HG006941/HG/NHGRI NIH HHS/United States ; }, abstract = {The need for portable and reproducible genomics analysis pipelines is growing globally as well as in Africa, especially with the growth of collaborative projects like the Human Health and Heredity in Africa Consortium (H3Africa). The Pan-African H3Africa Bioinformatics Network (H3ABioNet) recognized the need for portable, reproducible pipelines adapted to heterogeneous computing environments, and for the nurturing of technical expertise in workflow languages and containerization technologies. Building on the network's Standard Operating Procedures (SOPs) for common genomic analyses, H3ABioNet arranged its first Cloud Computing and Reproducible Workflows Hackathon in 2016, with the purpose of translating those SOPs into analysis pipelines able to run on heterogeneous computing environments and meeting the needs of H3Africa research projects. This paper describes the preparations for this hackathon and reflects upon the lessons learned about its impact on building the technical and scientific expertise of African researchers. The workflows developed were made publicly available in GitHub repositories and deposited as container images on Quay.io.}, } @article {pmid32379124, year = {2020}, author = {Myers, TG and Ramkumar, PN and Ricciardi, BF and Urish, KL and Kipper, J and Ketonis, C}, title = {Artificial Intelligence and Orthopaedics: An Introduction for Clinicians.}, journal = {The Journal of bone and joint surgery. American volume}, volume = {102}, number = {9}, pages = {830-840}, pmid = {32379124}, issn = {1535-1386}, mesh = {*Artificial Intelligence ; Humans ; *Orthopedic Procedures ; *Orthopedics ; }, abstract = {➤. Artificial intelligence (AI) provides machines with the ability to perform tasks using algorithms governed by pattern recognition and self-correction on large amounts of data to narrow options in order to avoid errors. ➤. The 4 things necessary for AI in medicine include big data sets, powerful computers, cloud computing, and open source algorithmic development. ➤. The use of AI in health care continues to expand, and its impact on orthopaedic surgery can already be found in diverse areas such as image recognition, risk prediction, patient-specific payment models, and clinical decision-making. ➤. Just as the business of medicine was once considered outside the domain of the orthopaedic surgeon, emerging technologies such as AI warrant ownership, leverage, and application by the orthopaedic surgeon to improve the care that we provide to the patients we serve. ➤. AI could provide solutions to factors contributing to physician burnout and medical mistakes. However, challenges regarding the ethical deployment, regulation, and the clinical superiority of AI over traditional statistics and decision-making remain to be resolved.}, } @article {pmid32370129, year = {2020}, author = {Celesti, A and Ruggeri, A and Fazio, M and Galletta, A and Villari, M and Romano, A}, title = {Blockchain-Based Healthcare Workflow for Tele-Medical Laboratory in Federated Hospital IoT Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32370129}, issn = {1424-8220}, support = {GR-2016-02361306//Ministero della Salute/ ; }, mesh = {*Betacoronavirus ; Blockchain ; COVID-19 ; Cloud Computing ; Computer Security ; *Coronavirus Infections/prevention & control/transmission ; Humans ; *Pandemics/prevention & control ; *Pneumonia, Viral/prevention & control/transmission ; SARS-CoV-2 ; Telemedicine/*methods/*organization & administration ; }, abstract = {In a pandemic situation such as that we are living at the time of writing of this paper due to the Covid-19 virus, the need of tele-healthcare service becomes dramatically fundamental to reduce the movement of patients, thence reducing the risk of infection. Leveraging the recent Cloud computing and Internet of Things (IoT) technologies, this paper aims at proposing a tele-medical laboratory service where clinical exams are performed on patients directly in a hospital by technicians through IoT medical devices and results are automatically sent via the hospital Cloud to doctors of federated hospitals for validation and/or consultation. In particular, we discuss a distributed scenario where nurses, technicians and medical doctors belonging to different hospitals cooperate through their federated hospital Clouds to form a virtual health team able to carry out a healthcare workflow in secure fashion leveraging the intrinsic security features of the Blockchain technology. In particular, both public and hybrid Blockchain scenarios are discussed and assessed using the Ethereum platform.}, } @article {pmid32365815, year = {2020}, author = {Vilela, PH and Rodrigues, JJPC and Righi, RDR and Kozlov, S and Rodrigues, VF}, title = {Looking at Fog Computing for E-Health through the Lens of Deployment Challenges and Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32365815}, issn = {1424-8220}, support = {Project UIDB/EEA/50008/2020//FCT/MCTES/ ; Grant 08-08//Government of the Russian Federation/ ; Grant No. 431726/2018-3//Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; Grants No. 309335/2017-5//Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; }, mesh = {*Cloud Computing ; Humans ; *Lenses ; Monitoring, Physiologic ; Privacy ; *Telemedicine ; }, abstract = {Fog computing is a distributed infrastructure where specific resources are managed at the network border using cloud computing principles and technologies. In contrast to traditional cloud computing, fog computing supports latency-sensitive applications with less energy consumption and a reduced amount of data traffic. A fog device is placed at the network border, allowing data collection and processing to be physically close to their end-users. This characteristic is essential for applications that can benefit from improved latency and response time. In particular, in the e-Health field, many solutions rely on real-time data to monitor environments, patients, and/or medical staff, aiming at improving processes and safety. Therefore, fog computing can play an important role in such environments, providing a low latency infrastructure. The main goal of the current research is to present fog computing strategies focused on electronic-Health (e-Health) applications. To the best of our knowledge, this article is the first to propose a review in the scope of applications and challenges of e-Health fog computing. We introduce some of the available e-Health solutions in the literature that focus on latency, security, privacy, energy efficiency, and resource management techniques. Additionally, we discuss communication protocols and technologies, detailing both in an architectural overview from the edge devices up to the cloud. Differently from traditional cloud computing, the fog concept demonstrates better performance in terms of time-sensitive requirements and network data traffic. Finally, based on the evaluation of the current technologies for e-Health, open research issues and challenges are identified, and further research directions are proposed.}, } @article {pmid32365040, year = {2022}, author = {Liu, GP}, title = {Coordinated Control of Networked Multiagent Systems via Distributed Cloud Computing Using Multistep State Predictors.}, journal = {IEEE transactions on cybernetics}, volume = {52}, number = {2}, pages = {810-820}, doi = {10.1109/TCYB.2020.2985043}, pmid = {32365040}, issn = {2168-2275}, abstract = {This article studies the coordinated control problem of networked multiagent systems via distributed cloud computing. A distributed cloud predictive control scheme is proposed to achieve desired coordination control performance and compensate actively for communication delays between the cloud computing nodes and between the agents. This scheme includes the design of a multistep state predictor and optimization of control coordination. The multistep state predictor provides a novel way of predicting future immeasurable states of agents in a large horizontal length. The optimization of control coordination minimizes the distributed cost functions which are presented to measure the coordination between the agents so that the optimal design of the coordination controllers is simple with little computational increase for large-scale-networked multiagent systems. Further analysis derives the conditions of simultaneous stability and consensus of the closed-loop-networked multiagent systems using the distributed cloud predictive control scheme. The effectiveness of the proposed scheme is illustrated by an example.}, } @article {pmid32356920, year = {2020}, author = {Luo, J and Chen, C and Li, Q}, title = {White blood cell counting at point-of-care testing: A review.}, journal = {Electrophoresis}, volume = {41}, number = {16-17}, pages = {1450-1468}, doi = {10.1002/elps.202000029}, pmid = {32356920}, issn = {1522-2683}, mesh = {Equipment Design ; Humans ; *Leukocyte Count ; *Microfluidic Analytical Techniques ; *Point-of-Care Systems ; }, abstract = {White blood cells, which are also called leukocytes, are found in the immune system that are involved in protecting the body against infections and foreign invaders. Conventional methods of leukocyte analysis provide valuable and accurate information to medical specialists. Analyzing and diagnosing of a disease requires a combination of multiple biomarkers, in some cases, however, such as personal health care, this will occupy some medical resources and causes unnecessary consumption. Traditional method (such as flow cytometer) for WBC counting is time and labor consuming. Compared to gold standard (flow-based fraction/micropore filtration) or improved filtration methods for WBC counting, this is still a lengthy and time consuming process and can lead to membrane fouling due to the rapid accumulation of biological materials. Therefore, the analysis of WBC counts requires more compact and efficient equipment. The microfluidic technologies, powered by different field (force, thermal, acoustic, optical, magnetic) and other methods for leukocyte counting and analysis, are much cost-efficient and can be used in in-home or in resource-limited areas to achieve Point-of-Care (POC). In this review, we highlight the mainstream devices that have been commercialized and extensively employed for patients for WBC counting, Next, we present some recent development with regards to leucocyte counting (mainly microfluidic technologies) and comment on their relative merits. We aim to focus and discuss the possibility of achieving POC and help researchers to tackle individual challenges accordingly. Finally, we offer some technologies in addition to previous detection devices, such as image recognition technology and cloud computing, which we believe have great potential to further promote real-time detection and improve medical diagnosis.}, } @article {pmid32349242, year = {2020}, author = {Kayes, ASM and Kalaria, R and Sarker, IH and Islam, MS and Watters, PA and Ng, A and Hammoudeh, M and Badsha, S and Kumara, I}, title = {A Survey of Context-Aware Access Control Mechanisms for Cloud and Fog Networks: Taxonomy and Open Research Issues.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32349242}, issn = {1424-8220}, abstract = {Over the last few decades, the proliferation of the Internet of Things (IoT) has produced an overwhelming flow of data and services, which has shifted the access control paradigm from a fixed desktop environment to dynamic cloud environments. Fog computing is associated with a new access control paradigm to reduce the overhead costs by moving the execution of application logic from the centre of the cloud data sources to the periphery of the IoT-oriented sensor networks. Indeed, accessing information and data resources from a variety of IoT sources has been plagued with inherent problems such as data heterogeneity, privacy, security and computational overheads. This paper presents an extensive survey of security, privacy and access control research, while highlighting several specific concerns in a wide range of contextual conditions (e.g., spatial, temporal and environmental contexts) which are gaining a lot of momentum in the area of industrial sensor and cloud networks. We present different taxonomies, such as contextual conditions and authorization models, based on the key issues in this area and discuss the existing context-sensitive access control approaches to tackle the aforementioned issues. With the aim of reducing administrative and computational overheads in the IoT sensor networks, we propose a new generation of Fog-Based Context-Aware Access Control (FB-CAAC) framework, combining the benefits of the cloud, IoT and context-aware computing; and ensuring proper access control and security at the edge of the end-devices. Our goal is not only to control context-sensitive access to data resources in the cloud, but also to move the execution of an application logic from the cloud-level to an intermediary-level where necessary, through adding computational nodes at the edge of the IoT sensor network. A discussion of some open research issues pertaining to context-sensitive access control to data resources is provided, including several real-world case studies. We conclude the paper with an in-depth analysis of the research challenges that have not been adequately addressed in the literature and highlight directions for future work that has not been well aligned with currently available research.}, } @article {pmid32348265, year = {2020}, author = {Ismail, L and Materwala, H and Karduck, AP and Adem, A}, title = {Requirements of Health Data Management Systems for Biomedical Care and Research: Scoping Review.}, journal = {Journal of medical Internet research}, volume = {22}, number = {7}, pages = {e17508}, pmid = {32348265}, issn = {1438-8871}, mesh = {Biomedical Research/*methods ; Data Management/*methods ; Delivery of Health Care/*methods ; Humans ; }, abstract = {BACKGROUND: Over the last century, disruptive incidents in the fields of clinical and biomedical research have yielded a tremendous change in health data management systems. This is due to a number of breakthroughs in the medical field and the need for big data analytics and the Internet of Things (IoT) to be incorporated in a real-time smart health information management system. In addition, the requirements of patient care have evolved over time, allowing for more accurate prognoses and diagnoses. In this paper, we discuss the temporal evolution of health data management systems and capture the requirements that led to the development of a given system over a certain period of time. Consequently, we provide insights into those systems and give suggestions and research directions on how they can be improved for a better health care system.

OBJECTIVE: This study aimed to show that there is a need for a secure and efficient health data management system that will allow physicians and patients to update decentralized medical records and to analyze the medical data for supporting more precise diagnoses, prognoses, and public insights. Limitations of existing health data management systems were analyzed.

METHODS: To study the evolution and requirements of health data management systems over the years, a search was conducted to obtain research articles and information on medical lawsuits, health regulations, and acts. These materials were obtained from the Institute of Electrical and Electronics Engineers, the Association for Computing Machinery, Elsevier, MEDLINE, PubMed, Scopus, and Web of Science databases.

RESULTS: Health data management systems have undergone a disruptive transformation over the years from paper to computer, web, cloud, IoT, big data analytics, and finally to blockchain. The requirements of a health data management system revealed from the evolving definitions of medical records and their management are (1) medical record data, (2) real-time data access, (3) patient participation, (4) data sharing, (5) data security, (6) patient identity privacy, and (7) public insights. This paper reviewed health data management systems based on these 7 requirements across studies conducted over the years. To our knowledge, this is the first analysis of the temporal evolution of health data management systems giving insights into the system requirements for better health care.

CONCLUSIONS: There is a need for a comprehensive real-time health data management system that allows physicians, patients, and external users to input their medical and lifestyle data into the system. The incorporation of big data analytics will aid in better prognosis or diagnosis of the diseases and the prediction of diseases. The prediction results will help in the development of an effective prevention plan.}, } @article {pmid32344803, year = {2020}, author = {Azghiou, K and El Mouhib, M and Koulali, MA and Benali, A}, title = {An End-to-End Reliability Framework of the Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {9}, pages = {}, pmid = {32344803}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) paradigm feeds from many scientific and engineering fields. This involves a diversity and heterogeneity of its underlying systems. When considering End-to-End IoT systems, we can identify the emergence of new classes of problems. The best-known ones are those associated to standardization for better interoperability and compatibility of those systems, and those who gave birth of new paradigms like that of Fog Computing. Predicting the reliability of an End-to-End IoT system is a problem belonging to this category. On one hand, predicting reliability can be mandatory, most times, before the deployment stage. On another hand, it may help engineers at the design and the operational stages to establish effective maintenance policies and may provide the various stakeholders and decision-makers a means to take the relevant actions. We can find in the literature works which consider only fragments of End-to-End IoT systems such as those assessing reliability for Wireless Sensors Networks (WSN) or Cloud subsystems, to cite just a few. Some other works are specific to well-defined industries, like those targeting reliability study of E-health and Smart-Grid infrastructures. Works that aims to assess reliability for an End-to-End IoT system are remarkably rare and particularly restrained in terms of expressiveness, flexibility, and in their implementation time complexity. In this paper, we apply the Reliability Block Diagram (RBD) paradigm to set up a framework for End-to-End IoT system reliability modeling and analysis. Our contribution is four-fold: we propose an IoT network-based layered architecture, we model in depth each layer of the proposed architecture, we suggest a flow chart to deploy the proposed framework, and we perform a numerical investigation of simplified scenarios. We affirm that the proposed framework is expressive, flexible, and scalable. The numerical study reveals mission time intervals which characterize the behavior of an IoT system from the point of view of its reliability.}, } @article {pmid32343907, year = {2020}, author = {Tsur, EE}, title = {Computer-Aided Design of Microfluidic Circuits.}, journal = {Annual review of biomedical engineering}, volume = {22}, number = {}, pages = {285-307}, doi = {10.1146/annurev-bioeng-082219-033358}, pmid = {32343907}, issn = {1545-4274}, mesh = {Algorithms ; Animals ; Benchmarking ; *Computer-Aided Design ; *Diagnosis, Computer-Assisted ; Equipment Design ; Humans ; *Lab-On-A-Chip Devices ; Machine Learning ; Microfluidic Analytical Techniques/methods ; *Microfluidics ; Software ; }, abstract = {Microfluidic devices developed over the past decade feature greater intricacy, increased performance requirements, new materials, and innovative fabrication methods. Consequentially, new algorithmic and design approaches have been developed to introduce optimization and computer-aided design to microfluidic circuits: from conceptualization to specification, synthesis, realization, and refinement. The field includes the development of new description languages, optimization methods, benchmarks, and integrated design tools. Here, recent advancements are reviewed in the computer-aided design of flow-, droplet-, and paper-based microfluidics. A case study of the design of resistive microfluidic networks is discussed in detail. The review concludes with perspectives on the future of computer-aided microfluidics design, including the introduction of cloud computing, machine learning, new ideation processes, and hybrid optimization.}, } @article {pmid32340971, year = {2020}, author = {Yang, G and Pang, Z and Jamal Deen, M and Dong, M and Zhang, YT and Lovell, N and Rahmani, AM}, title = {Homecare Robotic Systems for Healthcare 4.0: Visions and Enabling Technologies.}, journal = {IEEE journal of biomedical and health informatics}, volume = {24}, number = {9}, pages = {2535-2549}, doi = {10.1109/JBHI.2020.2990529}, pmid = {32340971}, issn = {2168-2208}, mesh = {*Artificial Intelligence ; Cloud Computing ; Delivery of Health Care ; Humans ; *Robotic Surgical Procedures ; }, abstract = {Powered by the technologies that have originated from manufacturing, the fourth revolution of healthcare technologies is happening (Healthcare 4.0). As an example of such revolution, new generation homecare robotic systems (HRS) based on the cyber-physical systems (CPS) with higher speed and more intelligent execution are emerging. In this article, the new visions and features of the CPS-based HRS are proposed. The latest progress in related enabling technologies is reviewed, including artificial intelligence, sensing fundamentals, materials and machines, cloud computing and communication, as well as motion capture and mapping. Finally, the future perspectives of the CPS-based HRS and the technical challenges faced in each technical area are discussed.}, } @article {pmid32331291, year = {2020}, author = {Podpora, M and Gardecki, A and Beniak, R and Klin, B and Vicario, JL and Kawala-Sterniuk, A}, title = {Human Interaction Smart Subsystem-Extending Speech-Based Human-Robot Interaction Systems with an Implementation of External Smart Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {8}, pages = {}, pmid = {32331291}, issn = {1424-8220}, support = {RPOP.01.01.00-16-0072/16-00//European Regional Development Fund, Regional Operational Programme of the Opolskie region for the years 2014-2020: Priority Axis 01 Innovation in the Economy, Measure 1.1 Innovation in Enterprises/ ; }, abstract = {This paper presents a more detailed concept of Human-Robot Interaction systems architecture. One of the main differences between the proposed architecture and other ones is the methodology of information acquisition regarding the robot's interlocutor. In order to obtain as much information as possible before the actual interaction took place, a custom Internet-of-Things-based sensor subsystems connected to Smart Infrastructure was designed and implemented, in order to support the interlocutor identification and acquisition of initial interaction parameters. The Artificial Intelligence interaction framework of the developed robotic system (including humanoid Pepper with its sensors and actuators, additional local, remote and cloud computing services) is being extended with the use of custom external subsystems for additional knowledge acquisition: device-based human identification, visual identification and audio-based interlocutor localization subsystems. These subsystems were deeply introduced and evaluated in this paper, presenting the benefits of integrating them into the robotic interaction system. In this paper a more detailed analysis of one of the external subsystems-Bluetooth Human Identification Smart Subsystem-was also included. The idea, use case, and a prototype, integration of elements of Smart Infrastructure systems and the prototype implementation were performed in a small front office of the Weegree company as a decent test-bed application area.}, } @article {pmid32326647, year = {2020}, author = {Chen, C and Tian, Y and Lin, L and Chen, S and Li, H and Wang, Y and Su, K}, title = {Obtaining World Coordinate Information of UAV in GNSS Denied Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {8}, pages = {}, pmid = {32326647}, issn = {1424-8220}, support = {2017YFB0504202//National Basic Research Program of China (973 Program)/ ; }, abstract = {GNSS information is vulnerable to external interference and causes failure when unmanned aerial vehicles (UAVs) are in a fully autonomous flight in complex environments such as high-rise parks and dense forests. This paper presents a pan-tilt-based visual servoing (PBVS) method for obtaining world coordinate information. The system is equipped with an inertial measurement unit (IMU), an air pressure sensor, a magnetometer, and a pan-tilt-zoom (PTZ) camera. In this paper, we explain the physical model and the application method of the PBVS system, which can be briefly summarized as follows. We track the operation target with a UAV carrying a camera and output the information about the UAV's position and the angle between the PTZ and the anchor point. In this way, we can obtain the current absolute position information of the UAV with its absolute altitude collected by the height sensing unit and absolute geographic coordinate information and altitude information of the tracked target. We set up an actual UAV experimental environment. To meet the calculation requirements, some sensor data will be sent to the cloud through the network. Through the field tests, it can be concluded that the systematic deviation of the overall solution is less than the error of GNSS sensor equipment, and it can provide navigation coordinate information for the UAV in complex environments. Compared with traditional visual navigation systems, our scheme has the advantage of obtaining absolute, continuous, accurate, and efficient navigation information at a short distance (within 15 m from the target). This system can be used in scenarios that require autonomous cruise, such as self-powered inspections of UAVs, patrols in parks, etc.}, } @article {pmid32322599, year = {2020}, author = {Tong, Y and Lu, W and Yu, Y and Shen, Y}, title = {Application of machine learning in ophthalmic imaging modalities.}, journal = {Eye and vision (London, England)}, volume = {7}, number = {}, pages = {22}, pmid = {32322599}, issn = {2326-0254}, abstract = {In clinical ophthalmology, a variety of image-related diagnostic techniques have begun to offer unprecedented insights into eye diseases based on morphological datasets with millions of data points. Artificial intelligence (AI), inspired by the human multilayered neuronal system, has shown astonishing success within some visual and auditory recognition tasks. In these tasks, AI can analyze digital data in a comprehensive, rapid and non-invasive manner. Bioinformatics has become a focus particularly in the field of medical imaging, where it is driven by enhanced computing power and cloud storage, as well as utilization of novel algorithms and generation of data in massive quantities. Machine learning (ML) is an important branch in the field of AI. The overall potential of ML to automatically pinpoint, identify and grade pathological features in ocular diseases will empower ophthalmologists to provide high-quality diagnosis and facilitate personalized health care in the near future. This review offers perspectives on the origin, development, and applications of ML technology, particularly regarding its applications in ophthalmic imaging modalities.}, } @article {pmid32316866, year = {2021}, author = {Qaffas, AA and Hoque, R and Almazmomi, N}, title = {The Internet of Things and Big Data Analytics for Chronic Disease Monitoring in Saudi Arabia.}, journal = {Telemedicine journal and e-health : the official journal of the American Telemedicine Association}, volume = {27}, number = {1}, pages = {74-81}, doi = {10.1089/tmj.2019.0289}, pmid = {32316866}, issn = {1556-3669}, mesh = {Aged ; Aged, 80 and over ; Data Science ; Health Services ; Humans ; *Internet of Things ; Monitoring, Physiologic ; Saudi Arabia/epidemiology ; }, abstract = {Background: Saudi Arabia is lagging behind developed countries in devising specific real projects, roadmaps, and policies for the Internet of Things (IoT) and big data adoption despite having a vision for providing the best-quality health care services to its citizens. As a result, Saudi Arabia is going to host an event for the third time, in 2020, promoting the widescale adoption of the IoT. While a nationwide study has identified the risk that many participants were previously undiagnosed for hypertension and other chronic diseases in Saudi Arabia, the application of the IoT and big data technologies could be very useful in minimizing such risks by predicting chronic disease earlier, and on a large scale. Materials and Methods: A framework that consists of four modules, (1) data collection, (2) data storage, (3) Hadoop/Spark cluster, and (4) Google Cloud, was developed in which decision tree and support vector machine (SVM) techniques were used for predicting hypertension. There were 140 participants in total and 20% of participants were used for training the model. Results: The results show that age and diabetes play a very significant part in diagnosing hypertension in older people. Also, it was found that the possibility of hypertension because of smoking is less than that of diabetes, and older people should have a lower intake of salty food. Moreover, it was found that SVM techniques yielded better results than C4.5 in our study. Conclusions: Although it was found that the algorithms examined in this study can be used for disease prediction, the ability to classify and predict disease is not yet sufficiently satisfactory. To achieve this, more training data and a longer duration are required. Finally, by supporting such study for developing custom-made smart wristbands, custom-made smart clothing, and custom-made smart homes that can predict and detect a wide range of chronic diseases, the Saudi government can achieve its health-related goals of Vision 2030.}, } @article {pmid32316465, year = {2020}, author = {Zhai, Z and Xiang, K and Zhao, L and Cheng, B and Qian, J and Wu, J}, title = {IoT-RECSM-Resource-Constrained Smart Service Migration Framework for IoT Edge Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {8}, pages = {}, pmid = {32316465}, issn = {1424-8220}, support = {61862014, 61902086//National Nature Science Foundation of China/ ; 2018GXNSFBA281142//Guangxi Natural Science Foundation of China/ ; AD18281054//Guangxi Innovation Project of Young Talent of China/ ; }, abstract = {The edge-based computing paradigm (ECP) becomes one of the most innovative modes of processing distributed Interneit of Things (IoT) sensor data. However, the edge nodes in ECP are usually resource-constrained. When more services are executed on an edge node, the resources required by these services may exceed the edge node's, so as to fail to maintain the normal running of the edge node. In order to solve this problem, this paper proposes a resource-constrained smart service migration framework for edge computing environment in IoT (IoT-RECSM) and a dynamic edge service migration algorithm. Based on this algorithm, the framework can dynamically migrate services of resource-critical edge nodes to resource-rich nodes. In the framework, four abstract models are presented to quantificationally evaluate the resource usage of edge nodes and the resource consumption of edge service in real-time. Finally, an edge smart services migration prototype system is implemented to simulate the edge service migration in IoT environment. Based on the system, an IoT case including 10 edge nodes is simulated to evaluate the proposed approach. According to the experiment results, service migration among edge nodes not only maintains the stability of service execution on edge nodes, but also reduces the sensor data traffic between edge nodes and cloud center.}, } @article {pmid32316247, year = {2020}, author = {Tan, QW and Goh, W and Mutwil, M}, title = {LSTrAP-Cloud: A User-Friendly Cloud Computing Pipeline to Infer Coexpression Networks.}, journal = {Genes}, volume = {11}, number = {4}, pages = {}, pmid = {32316247}, issn = {2073-4425}, support = {1/CX/CSRD VA/United States ; }, mesh = {Cloud Computing/*statistics & numerical data ; Gene Expression Profiling/*methods ; *Gene Regulatory Networks ; Genome, Human ; High-Throughput Nucleotide Sequencing ; Humans ; Sequence Analysis, RNA/*methods ; *Software ; *User-Computer Interface ; }, abstract = {As genomes become more and more available, gene function prediction presents itself as one of the major hurdles in our quest to extract meaningful information on the biological processes genes participate in. In order to facilitate gene function prediction, we show how our user-friendly pipeline, the Large-Scale Transcriptomic Analysis Pipeline in Cloud (LSTrAP-Cloud), can be useful in helping biologists make a shortlist of genes involved in a biological process that they might be interested in, by using a single gene of interest as bait. The LSTrAP-Cloud is based on Google Colaboratory, and provides user-friendly tools that process quality-control RNA sequencing data streamed from the European Nucleotide Archive. The LSTRAP-Cloud outputs a gene coexpression network that can be used to identify functionally related genes for any organism with a sequenced genome and publicly available RNA sequencing data. Here, we used the biosynthesis pathway of Nicotiana tabacum as a case study to demonstrate how enzymes, transporters, and transcription factors involved in the synthesis, transport, and regulation of nicotine can be identified using our pipeline.}, } @article {pmid32310989, year = {2020}, author = {Swathy, R and Vinayagasundaram, B and Rajesh, G and Nayyar, A and Abouhawwash, M and Abu Elsoud, M}, title = {Game theoretical approach for load balancing using SGMLB model in cloud environment.}, journal = {PloS one}, volume = {15}, number = {4}, pages = {e0231708}, pmid = {32310989}, issn = {1932-6203}, mesh = {Cloud Computing/*standards ; *Game Theory ; *Models, Theoretical ; }, abstract = {On-demand cloud computing is one of the rapidly evolving technologies that is being widely used in the industries now. With the increase in IoT devices and real-time business analytics requirements, enterprises that ought to scale up and scale down their services have started coming towards on-demand cloud computing service providers. In a cloud data center, a high volume of continuous incoming task requests to physical hosts makes an imbalance in the cloud data center load. Most existing works balance the load by optimizing the algorithm in selecting the optimal host and achieves instantaneous load balancing but with execution inefficiency for tasks when carried out in the long run. Considering the long-term perspective of load balancing, the research paper proposes Stackelberg (leader-follower) game-theoretical model reinforced with the satisfaction factor for selecting the optimal physical host for deploying the tasks arriving at the data center in a balanced way. Stackelberg Game Theoretical Model for Load Balancing (SGMLB) algorithm deploys the tasks on the host in the data center by considering the utilization factor of every individual host, which helps in achieving high resource utilization on an average of 60%. Experimental results show that the Stackelberg equilibrium incorporated with a satisfaction index has been very useful in balancing the loading across the cluster by choosing the optimal hosts. The results show better execution efficiency in terms of the reduced number of task failures by 47%, decreased 'makespan' value by 17%, increased throughput by 6%, and a decreased front-end error rate as compared to the traditional random allocation algorithms and flow-shop scheduling algorithm.}, } @article {pmid32306927, year = {2020}, author = {Yukselen, O and Turkyilmaz, O and Ozturk, AR and Garber, M and Kucukural, A}, title = {DolphinNext: a distributed data processing platform for high throughput genomics.}, journal = {BMC genomics}, volume = {21}, number = {1}, pages = {310}, pmid = {32306927}, issn = {1471-2164}, support = {UL1 TR001453/TR/NCATS NIH HHS/United States ; #UL1 TR001453-01/TR/NCATS NIH HHS/United States ; #U01 HG007910-01/HG/NHGRI NIH HHS/United States ; }, mesh = {Algorithms ; *Chromatin Immunoprecipitation Sequencing ; Databases, Factual ; Genomics/*methods ; Programming Languages ; *RNA-Seq ; Reproducibility of Results ; *Software ; User-Computer Interface ; Workflow ; }, abstract = {BACKGROUND: The emergence of high throughput technologies that produce vast amounts of genomic data, such as next-generation sequencing (NGS) is transforming biological research. The dramatic increase in the volume of data, the variety and continuous change of data processing tools, algorithms and databases make analysis the main bottleneck for scientific discovery. The processing of high throughput datasets typically involves many different computational programs, each of which performs a specific step in a pipeline. Given the wide range of applications and organizational infrastructures, there is a great need for highly parallel, flexible, portable, and reproducible data processing frameworks. Several platforms currently exist for the design and execution of complex pipelines. Unfortunately, current platforms lack the necessary combination of parallelism, portability, flexibility and/or reproducibility that are required by the current research environment. To address these shortcomings, workflow frameworks that provide a platform to develop and share portable pipelines have recently arisen. We complement these new platforms by providing a graphical user interface to create, maintain, and execute complex pipelines. Such a platform will simplify robust and reproducible workflow creation for non-technical users as well as provide a robust platform to maintain pipelines for large organizations.

RESULTS: To simplify development, maintenance, and execution of complex pipelines we created DolphinNext. DolphinNext facilitates building and deployment of complex pipelines using a modular approach implemented in a graphical interface that relies on the powerful Nextflow workflow framework by providing 1. A drag and drop user interface that visualizes pipelines and allows users to create pipelines without familiarity in underlying programming languages. 2. Modules to execute and monitor pipelines in distributed computing environments such as high-performance clusters and/or cloud 3. Reproducible pipelines with version tracking and stand-alone versions that can be run independently. 4. Modular process design with process revisioning support to increase reusability and pipeline development efficiency. 5. Pipeline sharing with GitHub and automated testing 6. Extensive reports with R-markdown and shiny support for interactive data visualization and analysis.

CONCLUSION: DolphinNext is a flexible, intuitive, web-based data processing and analysis platform that enables creating, deploying, sharing, and executing complex Nextflow pipelines with extensive revisioning and interactive reporting to enhance reproducible results.}, } @article {pmid32300668, year = {2020}, author = {Caiza, G and Saeteros, M and Oñate, W and Garcia, MV}, title = {Fog computing at industrial level, architecture, latency, energy, and security: A review.}, journal = {Heliyon}, volume = {6}, number = {4}, pages = {e03706}, doi = {10.1016/j.heliyon.2020.e03706}, pmid = {32300668}, issn = {2405-8440}, abstract = {The industrial applications in the cloud do not meet the requirements of low latency and reliability since variables must be continuously monitored. For this reason, industrial internet of things (IIoT) is a challenge for the current infrastructure because it generates a large amount of data making cloud computing reach the edge and become fog computing (FC). FC can be considered as a new component of Industry 4.0, which aims to solve the problem of big data, reduce energy consumption in industrial sensor networks, improve the security, processing and storage real-time data. It is a promising growing paradigm that offers new opportunities and challenges, beside the ones inherited from cloud computing, which requires a new heterogeneous architecture to improve the network capacity for delivering edge services, that is, providing computing resources closer to the end user. The purpose of this research is to show a systematic review of the most recent studies about the architecture, security, latency, and energy consumption that FC presents at industrial level and thus provide an overview of the current characteristics and challenges of this new technology.}, } @article {pmid32296450, year = {2020}, author = {Wang, L and Lu, Z and delaBastide, M and Van Buren, P and Wang, X and Ghiban, C and Regulski, M and Drenkow, J and Xu, X and Ortiz-Ramirez, C and Marco, CF and Goodwin, S and Dobin, A and Birnbaum, KD and Jackson, DP and Martienssen, RA and McCombie, WR and Micklos, DA and Schatz, MC and Ware, DH and Gingeras, TR}, title = {Management, Analyses, and Distribution of the MaizeCODE Data on the Cloud.}, journal = {Frontiers in plant science}, volume = {11}, number = {}, pages = {289}, pmid = {32296450}, issn = {1664-462X}, support = {R50 CA243890/CA/NCI NIH HHS/United States ; }, abstract = {MaizeCODE is a project aimed at identifying and analyzing functional elements in the maize genome. In its initial phase, MaizeCODE assayed up to five tissues from four maize strains (B73, NC350, W22, TIL11) by RNA-Seq, Chip-Seq, RAMPAGE, and small RNA sequencing. To facilitate reproducible science and provide both human and machine access to the MaizeCODE data, we enhanced SciApps, a cloud-based portal, for analysis and distribution of both raw data and analysis results. Based on the SciApps workflow platform, we generated new components to support the complete cycle of MaizeCODE data management. These include publicly accessible scientific workflows for the reproducible and shareable analysis of various functional data, a RESTful API for batch processing and distribution of data and metadata, a searchable data page that lists each MaizeCODE experiment as a reproducible workflow, and integrated JBrowse genome browser tracks linked with workflows and metadata. The SciApps portal is a flexible platform that allows the integration of new analysis tools, workflows, and genomic data from multiple projects. Through metadata and a ready-to-compute cloud-based platform, the portal experience improves access to the MaizeCODE data and facilitates its analysis.}, } @article {pmid32288231, year = {2019}, author = {Feng, M and Shaw, SL and Fang, Z and Cheng, H}, title = {Relative space-based GIS data model to analyze the group dynamics of moving objects.}, journal = {ISPRS journal of photogrammetry and remote sensing : official publication of the International Society for Photogrammetry and Remote Sensing (ISPRS)}, volume = {153}, number = {}, pages = {74-95}, pmid = {32288231}, issn = {0924-2716}, abstract = {The relative motion of moving objects is an essential research topic in geographical information science (GIScience), which supports the innovation of geodatabases, spatial indexing, and geospatial services. This analysis is very popular in the domains of urban governance, transportation engineering, logistics and geospatial information services for individuals or industrials. Importantly, data models of moving objects are one of the most crucial approaches to support the analysis for dynamic relative motion between moving objects, even in the age of big data and cloud computing. Traditional geographic information systems (GIS) usually organize moving objects as point objects in absolute coordinated space. The derivation of relative motions among moving objects is not efficient because of the additional geo-computation of transformation between absolute space and relative space. Therefore, current GISs require an innovative approach to directly store, analyze and interpret the relative relationships of moving objects to support their efficient analysis. This paper proposes a relative space-based GIS data model of moving objects (RSMO) to construct, operate and analyze moving objects' relationships and introduces two algorithms (relationship querying and relative relationship dynamic pattern matching) to derive and analyze the dynamic relationships of moving objects. Three scenarios (epidemic spreading, tracker finding, and motion-trend derivation of nearby crowds) are implemented to demonstrate the feasibility of the proposed model. The experimental results indicates the execution times of the proposed model are approximately 5-50% those of the absolute GIS method for the same function of these three scenarios. It's better computational performance of the proposed model when analyzing the relative relationships of moving objects than the absolute methods in a famous commercial GIS software based on this experimental results. The proposed approach fills the gap of traditional GIS and shows promise for relative space-based geo-computation, analysis and service.}, } @article {pmid32279878, year = {2020}, author = {Sethi, A and Ting, J and Allen, M and Clark, W and Weber, D}, title = {Advances in motion and electromyography based wearable technology for upper extremity function rehabilitation: A review.}, journal = {Journal of hand therapy : official journal of the American Society of Hand Therapists}, volume = {33}, number = {2}, pages = {180-187}, doi = {10.1016/j.jht.2019.12.021}, pmid = {32279878}, issn = {1545-004X}, mesh = {Electromyography/*instrumentation ; Humans ; Monitoring, Physiologic/*instrumentation ; *Motion ; Physical Therapy Modalities/*instrumentation ; *Upper Extremity ; *Wearable Electronic Devices ; }, abstract = {STUDY DESIGN: Scoping review.

INTRODUCTION: With the recent advances in technologies, interactive wearable technologies including inertial motion sensors and e-textiles are emerging in the field of rehabilitation to monitor and provide feedback and therapy remotely.

PURPOSE OF THE STUDY: This review article focuses on inertial measurement unit motion sensor and e-textiles-based technologies and proposes approaches to augment these interactive wearable technologies.

METHODS: We conducted a comprehensive search of relevant electronic databases (eg, PubMed, the Cumulative Index to Nursing and Allied Health Literature, Embase, PsycINFO, The Cochrane Central Register of Controlled Trial, and the Physiotherapy Evidence Database). The scoping review included all study designs.

RESULTS: Currently, there are a numerous research groups and companies investigating inertial motion sensors and e-textiles-based interactive wearable technologies. However, translation of these technologies to the clinic would need further research to increase ease of use and improve clinical validity of the outcomes of these technologies.

DISCUSSION: The current review discusses the limitations of the interactive wearable technologies such as, limited clinical utility, bulky equipment, difficulty in setting up equipment inertial motion sensors and e-textiles.

CONCLUSION: There is tremendous potential for interactive wearable technologies in rehabilitation. With the evolution of cloud computing, interactive wearable systems can remotely provide intervention and monitor patient progress using models of telerehabilitation. This will revolutionize the delivery of rehabilitation and make rehabilitation more accessible and affordable to millions of individuals.}, } @article {pmid32278250, year = {2020}, author = {Wen, T and Liu, H and Lin, L and Wang, B and Hou, J and Huang, C and Pan, T and Du, Y}, title = {Multiswarm Artificial Bee Colony algorithm based on spark cloud computing platform for medical image registration.}, journal = {Computer methods and programs in biomedicine}, volume = {192}, number = {}, pages = {105432}, doi = {10.1016/j.cmpb.2020.105432}, pmid = {32278250}, issn = {1872-7565}, mesh = {*Algorithms ; *Cloud Computing ; *Diagnostic Imaging ; *Image Processing, Computer-Assisted/statistics & numerical data ; }, abstract = {BACKGROUND: Over the years, medical image registration has been widely used in various fields. However, different application characteristics, such as scale, computational complexity, and optimization goals, can cause problems. Therefore, developing an optimization algorithm based on clustering calculation is crucial.

METHOD: To solve the aforementioned problem, a multiswarm artificial bee colony (MS-ABC) multi-objective optimization algorithm based on clustering calculation is proposed. This algorithm can accelerate the resolution of complex problems on the Spark platform. Experiments show that the algorithm can optimize certain conventional complex problems and perform medical image registration tests.

RESULT: Results show that the MS-ABC algorithm demonstrates excellent performance in medical image registration tests. The optimization results of the MS-ABC algorithm for conventional problems are similar to those of existing algorithms; however, its performance is more time efficient for complex problems, especially when additional goals are needed.

CONCLUSION: The MS-ABC algorithm is applied to the Spark platform to accelerate the resolution of complex application problems. It can solve the problem of traditional algorithms regarding long calculation time, especially in the case of highly complex and large amounts of data, which can substantially improve data-processing efficiency.}, } @article {pmid32275956, year = {2020}, author = {Inupakutika, D and Kaghyan, S and Akopian, D and Chalela, P and Ramirez, AG}, title = {Facilitating the development of cross-platform mHealth applications for chronic supportive care and a case study.}, journal = {Journal of biomedical informatics}, volume = {105}, number = {}, pages = {103420}, doi = {10.1016/j.jbi.2020.103420}, pmid = {32275956}, issn = {1532-0480}, support = {P30 CA054174/CA/NCI NIH HHS/United States ; U54 CA153511/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; Long-Term Care ; *Mobile Applications ; Research ; *Telemedicine ; }, abstract = {Mobile health (mHealth) apps have received increasing attention, due to their abilities to support patients who suffer from various conditions. mHealth apps may be especially helpful for patients with chronic diseases, by providing pertinent information, tracking symptoms, and inspiring adherence to medication regimens. To achieve these objectives, researchers need to prototype mHealth apps with dedicated software architectures. In this paper, a cloud-based mHealth application development concept is presented for chronic patient supportive care apps. The concept integrates existing software platforms and services for simplified app development that can be reused for other target applications. This developmental method also facilitates app portability, through the use of common components found across multiple mobile platforms, and scalability, through the loose coupling of services. The results are demonstrated by the development of native Android and cross-platform web apps, in a case study that presents an mHealth solution for endocrine hormone therapy (EHT). A performance analysis methodology, an app usability evaluation, based on focus group responses, and alpha and pre-beta testing results are provided.}, } @article {pmid32275656, year = {2020}, author = {Xie, L and Yang, S and Squirrell, D and Vaghefi, E}, title = {Towards implementation of AI in New Zealand national diabetic screening program: Cloud-based, robust, and bespoke.}, journal = {PloS one}, volume = {15}, number = {4}, pages = {e0225015}, pmid = {32275656}, issn = {1932-6203}, mesh = {*Cloud Computing ; Diabetes Mellitus/diagnosis/epidemiology ; Diabetic Retinopathy/*diagnosis/epidemiology ; Fundus Oculi ; Humans ; Mass Screening ; *Neural Networks, Computer ; New Zealand/epidemiology ; Retina/pathology ; }, abstract = {Convolutional Neural Networks (CNNs) have become a prominent method of AI implementation in medical classification tasks. Grading Diabetic Retinopathy (DR) has been at the forefront of the development of AI for ophthalmology. However, major obstacles remain in the generalization of these CNNs onto real-world DR screening programs. We believe these difficulties are due to use of 1) small training datasets (<5,000 images), 2) private and 'curated' repositories, 3) locally implemented CNN implementation methods, while 4) relying on measured Area Under the Curve (AUC) as the sole measure of CNN performance. To address these issues, the public EyePACS Kaggle Diabetic Retinopathy dataset was uploaded onto Microsoft Azure™ cloud platform. Two CNNs were trained; 1 a "Quality Assurance", and 2. a "Classifier". The Diabetic Retinopathy classifier CNN (DRCNN) performance was then tested both on 'un-curated' as well as the 'curated' test set created by the "Quality Assessment" CNN model. Finally, the sensitivity of the DRCNNs was boosted using two post-training techniques. Our DRCNN proved to be robust, as its performance was similar on 'curated' and 'un-curated' test sets. The implementation of 'cascading thresholds' and 'max margin' techniques led to significant improvements in the DRCNN's sensitivity, while also enhancing the specificity of other grades.}, } @article {pmid32273922, year = {2020}, author = {Miyahara, K and Hoshina, K and Akai, T and Isaji, T and Yamamoto, K and Takayama, T}, title = {Development of a Web Application That Evaluates Suture Performance in Off-the-Job Training.}, journal = {Annals of vascular diseases}, volume = {13}, number = {1}, pages = {52-55}, pmid = {32273922}, issn = {1881-641X}, abstract = {Objective: To create a web application that can evaluate suture performance and assess its quality. Material and Methods: We developed the web application using a few cloud computing systems, servers, database, and computing languages. We used 20 anastomosed graft samples for optimizing the application. The images of the anastomosed grafts were captured two-dimensionally. Five vascular surgeons utilized the application to compute the objective score and rank the score of the anastomoses subjectively. Results: Steps for using the application include uploading a two-dimensional image of sutures, tracing the stitch line manually, and pushing the button to have the score displayed. After using this system for more than 1,000 times without server issues or failures, we confirmed its stability and easy accessibility. The system calculated the score within several seconds. The score of the three factors (bite, pitch, and skewness of angle) ranged from 0.25 to 0.76. The error range of the application was acceptable. The interclass correlation coefficient (ICC (2,1)) of the three factors was 0.92. Conclusion: The quality of the application was acceptable considering the low range of interoperator variations in the scores.}, } @article {pmid32272675, year = {2020}, author = {G Lopes, AP and Gondim, PRL}, title = {Mutual Authentication Protocol for D2D Communications in a Cloud-Based E-Health System.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {7}, pages = {}, pmid = {32272675}, issn = {1424-8220}, support = {Scholarship - Ana Paula G. Lopes//Coordenação de Aperfeiçoamento de Pessoal de Nível Superior/ ; }, mesh = {*Cloud Computing ; Computer Security ; Confidentiality ; Humans ; *Internet of Things ; Telemedicine/*methods ; Wearable Electronic Devices ; Wireless Technology ; }, abstract = {The development of the Internet of Things (IoT) predicts several new applications, some of which are designed to be incorporated into e-health systems, and some technologies, like cloud computing and device-to-device communication (D2D), are promising for use in the support of resource-constrained devices employed in Mobile-health (m-health) and Telecare Medicine Information Systems (TMIS). In a scenario with billions of devices predicted for the IoT, it is essential to avoid performance and security problems, among others. Security is fundamental for the achievement of optimal performance regarding the sensibility of e-health shared data and, especially, the anonymity of patients and other entities, while it is also essential to consider the scarcity of bandwidth in wireless networks. This paper proposes a new mutual authentication protocol for m-health systems, which supports D2D communication, ensuring security and surpassing the performance and security of other authentication procedures reported in the literature.}, } @article {pmid32271788, year = {2020}, author = {Wu, B and Wang, C and Yao, H}, title = {Security analysis and secure channel-free certificateless searchable public key authenticated encryption for a cloud-based Internet of things.}, journal = {PloS one}, volume = {15}, number = {4}, pages = {e0230722}, pmid = {32271788}, issn = {1932-6203}, mesh = {Algorithms ; Cloud Computing/*standards ; Computer Security/*standards ; Confidentiality ; Data Management/methods/organization & administration/standards ; Efficiency, Organizational ; Electronic Health Records/organization & administration/standards ; Health Information Exchange/standards ; Humans ; *Information Storage and Retrieval/methods/standards ; *Internet of Things/organization & administration/standards ; Outsourced Services/organization & administration/standards ; *Public Sector/organization & administration/standards ; Wireless Technology/organization & administration/standards ; }, abstract = {With the rapid development of informatization, an increasing number of industries and organizations outsource their data to cloud servers, to avoid the cost of local data management and to share data. For example, industrial Internet of things systems and mobile healthcare systems rely on cloud computing's powerful data storage and processing capabilities to address the storage, provision, and maintenance of massive amounts of industrial and medical data. One of the major challenges facing cloud-based storage environments is how to ensure the confidentiality and security of outsourced sensitive data. To mitigate these issues, He et al. and Ma et al. have recently independently proposed two certificateless public key searchable encryption schemes. In this paper, we analyze the security of these two schemes and show that the reduction proof of He et al.'s CLPAEKS scheme is incorrect, and that Ma et al.'s CLPEKS scheme is not secure against keyword guessing attacks. We then propose a channel-free certificateless searchable public key authenticated encryption (dCLPAEKS) scheme and prove that it is secure against inside keyword guessing attacks under the enhanced security model. Compared with other certificateless public key searchable encryption schemes, this scheme has higher security and comparable efficiency.}, } @article {pmid32267693, year = {2020}, author = {Seritan, S and Thompson, K and Martínez, TJ}, title = {TeraChem Cloud: A High-Performance Computing Service for Scalable Distributed GPU-Accelerated Electronic Structure Calculations.}, journal = {Journal of chemical information and modeling}, volume = {60}, number = {4}, pages = {2126-2137}, doi = {10.1021/acs.jcim.9b01152}, pmid = {32267693}, issn = {1549-960X}, mesh = {Algorithms ; *Cloud Computing ; Computers ; *Computing Methodologies ; Electronics ; }, abstract = {The encapsulation and commoditization of electronic structure arise naturally as interoperability, and the use of nontraditional compute resources (e.g., new hardware accelerators, cloud computing) remains important for the computational chemistry community. We present TeraChem Cloud, a high-performance computing service (HPCS) that offers on-demand electronic structure calculations on both traditional HPC clusters and cloud-based hardware. The framework is designed using off-the-shelf web technologies and containerization to be extremely scalable and portable. Within the HPCS model, users can quickly develop new methods and algorithms in an interactive environment on their laptop while allowing TeraChem Cloud to distribute ab initio calculations across all available resources. This approach greatly increases the accessibility of hardware accelerators such as graphics processing units (GPUs) and flexibility for the development of new methods as additional electronic structure packages are integrated into the framework as alternative backends. Cost-performance analysis indicates that traditional nodes are the most cost-effective long-term solution, but commercial cloud providers offer cutting-edge hardware with competitive rates for short-term large-scale calculations. We demonstrate the power of the TeraChem Cloud framework by carrying out several showcase calculations, including the generation of 300,000 density functional theory energy and gradient evaluations on medium-sized organic molecules and reproducing 300 fs of nonadiabatic dynamics on the B800-B850 antenna complex in LH2, with the latter demonstration using over 50 Tesla V100 GPUs in a commercial cloud environment in 8 h for approximately $1250.}, } @article {pmid32256551, year = {2020}, author = {Attiya, I and Abd Elaziz, M and Xiong, S}, title = {Job Scheduling in Cloud Computing Using a Modified Harris Hawks Optimization and Simulated Annealing Algorithm.}, journal = {Computational intelligence and neuroscience}, volume = {2020}, number = {}, pages = {3504642}, pmid = {32256551}, issn = {1687-5273}, mesh = {*Algorithms ; *Cloud Computing ; Humans ; *Personnel Staffing and Scheduling ; }, abstract = {In recent years, cloud computing technology has attracted extensive attention from both academia and industry. The popularity of cloud computing was originated from its ability to deliver global IT services such as core infrastructure, platforms, and applications to cloud customers over the web. Furthermore, it promises on-demand services with new forms of the pricing package. However, cloud job scheduling is still NP-complete and became more complicated due to some factors such as resource dynamicity and on-demand consumer application requirements. To fill this gap, this paper presents a modified Harris hawks optimization (HHO) algorithm based on the simulated annealing (SA) for scheduling jobs in the cloud environment. In the proposed HHOSA approach, SA is employed as a local search algorithm to improve the rate of convergence and quality of solution generated by the standard HHO algorithm. The performance of the HHOSA method is compared with that of state-of-the-art job scheduling algorithms, by having them all implemented on the CloudSim toolkit. Both standard and synthetic workloads are employed to analyze the performance of the proposed HHOSA algorithm. The obtained results demonstrate that HHOSA can achieve significant reductions in makespan of the job scheduling problem as compared to the standard HHO and other existing scheduling algorithms. Moreover, it converges faster when the search space becomes larger which makes it appropriate for large-scale scheduling problems.}, } @article {pmid32246291, year = {2020}, author = {Martí-Bonmatí, L and Alberich-Bayarri, Á and Ladenstein, R and Blanquer, I and Segrelles, JD and Cerdá-Alberich, L and Gkontra, P and Hero, B and García-Aznar, JM and Keim, D and Jentner, W and Seymour, K and Jiménez-Pastor, A and González-Valverde, I and Martínez de Las Heras, B and Essiaf, S and Walker, D and Rochette, M and Bubak, M and Mestres, J and Viceconti, M and Martí-Besa, G and Cañete, A and Richmond, P and Wertheim, KY and Gubala, T and Kasztelnik, M and Meizner, J and Nowakowski, P and Gilpérez, S and Suárez, A and Aznar, M and Restante, G and Neri, E}, title = {PRIMAGE project: predictive in silico multiscale analytics to support childhood cancer personalised evaluation empowered by imaging biomarkers.}, journal = {European radiology experimental}, volume = {4}, number = {1}, pages = {22}, pmid = {32246291}, issn = {2509-9280}, support = {GA-826494//Horizon 2020 Framework Programme/International ; }, mesh = {*Artificial Intelligence ; Biomarkers/*analysis ; Brain Neoplasms/*diagnostic imaging/*therapy ; Child ; Cloud Computing ; Decision Support Techniques ; Disease Progression ; Europe ; Female ; Glioma/*diagnostic imaging/*therapy ; Humans ; Male ; Neuroblastoma/*diagnostic imaging/*therapy ; Phenotype ; Prognosis ; Tumor Burden ; }, abstract = {PRIMAGE is one of the largest and more ambitious research projects dealing with medical imaging, artificial intelligence and cancer treatment in children. It is a 4-year European Commission-financed project that has 16 European partners in the consortium, including the European Society for Paediatric Oncology, two imaging biobanks, and three prominent European paediatric oncology units. The project is constructed as an observational in silico study involving high-quality anonymised datasets (imaging, clinical, molecular, and genetics) for the training and validation of machine learning and multiscale algorithms. The open cloud-based platform will offer precise clinical assistance for phenotyping (diagnosis), treatment allocation (prediction), and patient endpoints (prognosis), based on the use of imaging biomarkers, tumour growth simulation, advanced visualisation of confidence scores, and machine-learning approaches. The decision support prototype will be constructed and validated on two paediatric cancers: neuroblastoma and diffuse intrinsic pontine glioma. External validation will be performed on data recruited from independent collaborative centres. Final results will be available for the scientific community at the end of the project, and ready for translation to other malignant solid tumours.}, } @article {pmid32244458, year = {2020}, author = {Al-Aqrabi, H and Johnson, AP and Hill, R and Lane, P and Alsboui, T}, title = {Hardware-Intrinsic Multi-Layer Security: A New Frontier for 5G Enabled IIoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {7}, pages = {}, pmid = {32244458}, issn = {1424-8220}, abstract = {The introduction of 5G communication capabilities presents additional challenges for the development of products and services that can fully exploit the opportunities offered by high bandwidth, low latency networking. This is particularly relevant to an emerging interest in the Industrial Internet of Things (IIoT), which is a foundation stone of recent technological revolutions such as Digital Manufacturing. A crucial aspect of this is to securely authenticate complex transactions between IIoT devices, whilst marshalling adversarial requests for system authorisation, without the need for a centralised authentication mechanism which cannot scale to the size needed. In this article we combine Physically Unclonable Function (PUF) hardware (using Field Programmable Gate Arrays-FPGAs), together with a multi-layer approach to cloud computing from the National Institute of Standards and Technology (NIST). Through this, we demonstrate an approach to facilitate the development of improved multi-layer authentication mechanisms. We extend prior work to utilise hardware security primitives for adversarial trojan detection, which is inspired by a biological approach to parameter analysis. This approach is an effective demonstration of attack prevention, both from internal and external adversaries. The security is further hardened through observation of the device parameters of connected IIoT equipment. We demonstrate that the proposed architecture can service a significantly high load of device authentication requests using a multi-layer architecture in an arbitrarily acceptable time of less than 1 second.}, } @article {pmid32241208, year = {2021}, author = {Mittal, N and Tayal, S}, title = {Advance computer analysis of magnetic resonance imaging (MRI) for early brain tumor detection.}, journal = {The International journal of neuroscience}, volume = {131}, number = {6}, pages = {555-570}, doi = {10.1080/00207454.2020.1750390}, pmid = {32241208}, issn = {1563-5279}, mesh = {Brain Neoplasms/*diagnostic imaging ; *Early Detection of Cancer ; Humans ; Image Interpretation, Computer-Assisted/*methods/standards ; Image Processing, Computer-Assisted/*methods/standards ; Magnetic Resonance Imaging/*methods/standards ; Neuroimaging/*methods/standards ; }, abstract = {PURPOSE: The brain tumor grows inside the skull and interposes with regular brain functioning. The tumor growth may possibly result in cancer at a later stage. The early detection of brain tumor is crucial for successful treatment of fatal disease. The tumor presence is normally detected by Computed Tomography (CT) or Magnetic Resonance Imaging (MRI) images. The MRI/CT images are highly complex and involve huge data. This requires highly tedious and time-consuming process for detection of small tumors for the neurologists. Thus, there is a need to develop an effective and less time-consuming imaging technique for early detection of brain tumors.

MATERIALS AND METHODS: This paper mainly focuses on early detecting and localizing the brain tumor region using segmentation of patient's MRI images. The Matlab software experiments are performed on a set of fifteen tumorous MRI images. In the proposed work, four image segmentation modalities namely watershed transform, k-means clustering, thresholding and Fuzzy C Means Clustering techniques with median filtering have been implemented.

RESULTS: The results are verified by quantitative comparison of results in terms of image quality evaluation parameters-Entropy, standard deviation and Naturalness Image Quality Evaluator. A remarkable rise in the entropy and standard deviation values has been noticed.

CONCLUSIONS: The watershed transform segmentation with median filtering yields the best quality brain tumor images. The noteworthy improvement in visibility of the MRI images may highly increase the possibilities of early detection and successful treatment of brain tumor disease and thereby assists the clinicians to decide the precise therapies.}, } @article {pmid32235548, year = {2020}, author = {Wang, R and Liu, Y and Zhang, P and Li, X and Kang, X}, title = {Edge and Cloud Collaborative Entity Recommendation Method towards the IoT Search.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {7}, pages = {}, pmid = {32235548}, issn = {1424-8220}, abstract = {There are massive entities with strong denaturation of state in the physical world, and users have urgent needs for real-time and intelligent acquisition of entity information, thus recommendation technologies that can actively provide instant and precise entity state information come into being. Existing IoT data recommendation methods ignore the characteristics of IoT data and user search behavior; thus the recommendation performances are relatively limited. Considering the time-varying characteristics of the IoT entity state and the characteristics of user search behavior, an edge-cloud collaborative entity recommendation method is proposed via combining the advantages of edge computing and cloud computing. First, an entity recommendation system architecture based on the collaboration between edge and cloud is designed. Then, an entity identification method suitable for edge is presented, which takes into account the feature information of entities and carries out effective entity identification based on the deep clustering model, so as to improve the real-time and accuracy of entity state information search. Furthermore, an interest group division method applied in cloud is devised, which fully considers user's potential search needs and divides user interest groups based on clustering model for enhancing the quality of recommendation system. Simulation results demonstrate that the proposed recommendation method can effectively improve the real-time and accuracy performance of entity recommendation in comparison with traditional methods.}, } @article {pmid32234701, year = {2020}, author = {Kalantarian, H and Jedoui, K and Dunlap, K and Schwartz, J and Washington, P and Husic, A and Tariq, Q and Ning, M and Kline, A and Wall, DP}, title = {The Performance of Emotion Classifiers for Children With Parent-Reported Autism: Quantitative Feasibility Study.}, journal = {JMIR mental health}, volume = {7}, number = {4}, pages = {e13174}, pmid = {32234701}, issn = {2368-7959}, support = {R01 EB025025/EB/NIBIB NIH HHS/United States ; R01 LM013083/LM/NLM NIH HHS/United States ; R21 HD091500/HD/NICHD NIH HHS/United States ; T15 LM007033/LM/NLM NIH HHS/United States ; }, abstract = {BACKGROUND: Autism spectrum disorder (ASD) is a developmental disorder characterized by deficits in social communication and interaction, and restricted and repetitive behaviors and interests. The incidence of ASD has increased in recent years; it is now estimated that approximately 1 in 40 children in the United States are affected. Due in part to increasing prevalence, access to treatment has become constrained. Hope lies in mobile solutions that provide therapy through artificial intelligence (AI) approaches, including facial and emotion detection AI models developed by mainstream cloud providers, available directly to consumers. However, these solutions may not be sufficiently trained for use in pediatric populations.

OBJECTIVE: Emotion classifiers available off-the-shelf to the general public through Microsoft, Amazon, Google, and Sighthound are well-suited to the pediatric population, and could be used for developing mobile therapies targeting aspects of social communication and interaction, perhaps accelerating innovation in this space. This study aimed to test these classifiers directly with image data from children with parent-reported ASD recruited through crowdsourcing.

METHODS: We used a mobile game called Guess What? that challenges a child to act out a series of prompts displayed on the screen of the smartphone held on the forehead of his or her care provider. The game is intended to be a fun and engaging way for the child and parent to interact socially, for example, the parent attempting to guess what emotion the child is acting out (eg, surprised, scared, or disgusted). During a 90-second game session, as many as 50 prompts are shown while the child acts, and the video records the actions and expressions of the child. Due in part to the fun nature of the game, it is a viable way to remotely engage pediatric populations, including the autism population through crowdsourcing. We recruited 21 children with ASD to play the game and gathered 2602 emotive frames following their game sessions. These data were used to evaluate the accuracy and performance of four state-of-the-art facial emotion classifiers to develop an understanding of the feasibility of these platforms for pediatric research.

RESULTS: All classifiers performed poorly for every evaluated emotion except happy. None of the classifiers correctly labeled over 60.18% (1566/2602) of the evaluated frames. Moreover, none of the classifiers correctly identified more than 11% (6/51) of the angry frames and 14% (10/69) of the disgust frames.

CONCLUSIONS: The findings suggest that commercial emotion classifiers may be insufficiently trained for use in digital approaches to autism treatment and treatment tracking. Secure, privacy-preserving methods to increase labeled training data are needed to boost the models' performance before they can be used in AI-enabled approaches to social therapy of the kind that is common in autism treatments.}, } @article {pmid32230843, year = {2020}, author = {Mutlag, AA and Khanapi Abd Ghani, M and Mohammed, MA and Maashi, MS and Mohd, O and Mostafa, SA and Abdulkareem, KH and Marques, G and de la Torre Díez, I}, title = {MAFC: Multi-Agent Fog Computing Model for Healthcare Critical Tasks Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {7}, pages = {}, pmid = {32230843}, issn = {1424-8220}, mesh = {Algorithms ; *Biosensing Techniques ; Cloud Computing ; *Computer Simulation ; Delivery of Health Care/*trends ; Humans ; }, abstract = {In healthcare applications, numerous sensors and devices produce massive amounts of data which are the focus of critical tasks. Their management at the edge of the network can be done by Fog computing implementation. However, Fog Nodes suffer from lake of resources That could limit the time needed for final outcome/analytics. Fog Nodes could perform just a small number of tasks. A difficult decision concerns which tasks will perform locally by Fog Nodes. Each node should select such tasks carefully based on the current contextual information, for example, tasks' priority, resource load, and resource availability. We suggest in this paper a Multi-Agent Fog Computing model for healthcare critical tasks management. The main role of the multi-agent system is mapping between three decision tables to optimize scheduling the critical tasks by assigning tasks with their priority, load in the network, and network resource availability. The first step is to decide whether a critical task can be processed locally; otherwise, the second step involves the sophisticated selection of the most suitable neighbor Fog Node to allocate it. If no Fog Node is capable of processing the task throughout the network, it is then sent to the Cloud facing the highest latency. We test the proposed scheme thoroughly, demonstrating its applicability and optimality at the edge of the network using iFogSim simulator and UTeM clinic data.}, } @article {pmid32227074, year = {2021}, author = {Oh, M and Park, S and Kim, S and Chae, H}, title = {Machine learning-based analysis of multi-omics data on the cloud for investigating gene regulations.}, journal = {Briefings in bioinformatics}, volume = {22}, number = {1}, pages = {66-76}, doi = {10.1093/bib/bbaa032}, pmid = {32227074}, issn = {1477-4054}, mesh = {Animals ; *Cloud Computing ; Computational Biology/*methods ; Gene Expression Regulation ; Humans ; Machine Learning ; }, abstract = {Gene expressions are subtly regulated by quantifiable measures of genetic molecules such as interaction with other genes, methylation, mutations, transcription factor and histone modifications. Integrative analysis of multi-omics data can help scientists understand the condition or patient-specific gene regulation mechanisms. However, analysis of multi-omics data is challenging since it requires not only the analysis of multiple omics data sets but also mining complex relations among different genetic molecules by using state-of-the-art machine learning methods. In addition, analysis of multi-omics data needs quite large computing infrastructure. Moreover, interpretation of the analysis results requires collaboration among many scientists, often requiring reperforming analysis from different perspectives. Many of the aforementioned technical issues can be nicely handled when machine learning tools are deployed on the cloud. In this survey article, we first survey machine learning methods that can be used for gene regulation study, and we categorize them according to five different goals: gene regulatory subnetwork discovery, disease subtype analysis, survival analysis, clinical prediction and visualization. We also summarize the methods in terms of multi-omics input types. Then, we explain why the cloud is potentially a good solution for the analysis of multi-omics data, followed by a survey of two state-of-the-art cloud systems, Galaxy and BioVLAB. Finally, we discuss important issues when the cloud is used for the analysis of multi-omics data for the gene regulation study.}, } @article {pmid32225987, year = {2020}, author = {Jeong, J and Han, H and Park, Y}, title = {Geometric accuracy analysis of the Geostationary Ocean Color Imager (GOCI) Level 1B (L1B) product.}, journal = {Optics express}, volume = {28}, number = {5}, pages = {7634-7653}, doi = {10.1364/OE.370717}, pmid = {32225987}, issn = {1094-4087}, abstract = {The Geostationary Ocean Color Imager (GOCI) has been used for many remote sensing applications to observe and monitor the ocean color of East Asia around the Korean Peninsula. However, to date, its geometric accuracy has not been thoroughly investigated; the only studies conducted so far have focused on verifying its radiometric quality. This study investigates the geometric accuracy of the Level 1B (L1B) product created from the GOCI geometric correction. The paper contains a brief description of the geometric correction process and an analysis of the positioning accuracy of GOCI L1B. Independent check points to assess accuracy were extracted from L1B and compared to their corresponding features in Google Maps, whose positioning accuracy has been thoroughly verified. Our analysis showed that, on average, the positioning accuracy of the GOCI L1B is ∼500-600 m, although there are differences in accuracy throughout the coverage area. It was confirmed that the GOCI L1B's overall accuracy fully meets geometrical image quality requirements (about 1 km). No specific bias pattern was identified, and there was little difference in accuracy throughout the acquisition time. The accuracy of the GOCI when landmark determination failed, e.g. due to a cloud, was further analyzed, and analysis showed that geometric quality was maintained even in the case of failure, although a slightly higher number of errors was observed. The experimental results support the hypothesis that the GOCI's geometric correction works well and provides sufficiently accurate positional information on ocean properties to be used for remote sensing applications.}, } @article {pmid32224922, year = {2020}, author = {Zhang, MZ and Wang, LM and Xiong, SM}, title = {Using Machine Learning Methods to Provision Virtual Sensors in Sensor-Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {7}, pages = {}, pmid = {32224922}, issn = {1424-8220}, support = {U1736216//National Natural Science Foundation of China/ ; }, abstract = {The advent of sensor-cloud technology alleviates the limitations of traditional wireless sensor networks (WSNs) in terms of energy, storage, and computing, which has tremendous potential in various agricultural internet of things (IoT) applications. In the sensor-cloud environment, virtual sensor provisioning is an essential task. It chooses physical sensors to create virtual sensors in response to the users' requests. Considering the capricious meteorological environment of the outdoors, this paper presents an measurements similarity-based virtual-sensor provisioning scheme by taking advantage of machine learning in data analysis. First, to distinguish the changing trends, we classified all the physical sensors into several categories using historical data. Then, the k-means clustering algorithm was exploited for each class to cluster the physical sensors with high similarity. Finally, one representative physical sensor from each cluster was selected to create the corresponding virtual sensors. The experimental results show the reformation of our scheme with respect to energy efficiency, network lifetime, and data accuracy compared with the benchmark schemes.}, } @article {pmid32224841, year = {2020}, author = {Ko, G and Kim, PG and Cho, Y and Jeong, S and Kim, JY and Kim, KH and Lee, HY and Han, J and Yu, N and Ham, S and Jang, I and Kang, B and Shin, S and Kim, L and Lee, SW and Nam, D and Kim, JF and Kim, N and Kim, SY and Lee, S and Roh, TY and Lee, B}, title = {Bioinformatics services for analyzing massive genomic datasets.}, journal = {Genomics & informatics}, volume = {18}, number = {1}, pages = {e8}, pmid = {32224841}, issn = {1598-866X}, support = {2014M3C9A3064552//National Research Foundation of Korea/ ; 2014M3C9A3065221//National Research Foundation of Korea/ ; 2014M3C9A3064548//National Research Foundation of Korea/ ; 2014M3C9A3068554//National Research Foundation of Korea/ ; 2014M3C9A3068822//National Research Foundation of Korea/ ; 2019M3C9A5069653//National Research Foundation of Korea/ ; }, abstract = {The explosive growth of next-generation sequencing data has resulted in ultra-large-scale datasets and ensuing computational problems. In Korea, the amount of genomic data has been increasing rapidly in the recent years. Leveraging these big data requires researchers to use large-scale computational resources and analysis pipelines. A promising solution for addressing this computational challenge is cloud computing, where CPUs, memory, storage, and programs are accessible in the form of virtual machines. Here, we present a cloud computing-based system, Bio-Express, that provides user-friendly, cost-effective analysis of massive genomic datasets. Bio-Express is loaded with predefined multi-omics data analysis pipelines, which are divided into genome, transcriptome, epigenome, and metagenome pipelines. Users can employ predefined pipelines or create a new pipeline for analyzing their own omics data. We also developed several web-based services for facilitating downstream analysis of genome data. Bio-Express web service is freely available at https://www.bioexpress.re.kr/.}, } @article {pmid32204390, year = {2020}, author = {Pérez de Prado, R and García-Galán, S and Muñoz-Expósito, JE and Marchewka, A and Ruiz-Reyes, N}, title = {Smart Containers Schedulers for Microservices Provision in Cloud-Fog-IoT Networks. Challenges and Opportunities.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {6}, pages = {}, pmid = {32204390}, issn = {1424-8220}, support = {P18-RT-4046//Consejería de Economía, Innovación, Ciencia y Empleo, Junta de Andalucía/ ; }, abstract = {Docker containers are the lightweight-virtualization technology prevailing today for the provision of microservices. This work raises and discusses two main challenges in Docker containers' scheduling in cloud-fog-internet of things (IoT) networks. First, the convenience to integrate intelligent containers' schedulers based on soft-computing in the dominant open-source containers' management platforms: Docker Swarm, Google Kubernetes and Apache Mesos. Secondly, the need for specific intelligent containers' schedulers for the different interfaces in cloud-fog-IoT networks: cloud-to-fog, fog-to-IoT and cloud-to-fog. The goal of this work is to support the optimal allocation of microservices provided by the main cloud service providers today and used by millions of users worldwide in applications such as smart health, content delivery networks, smart health, etc. Particularly, the improvement is studied in terms of quality of service (QoS) parameters such as latency, load balance, energy consumption and runtime, based on the analysis of previous works and implementations. Moreover, the scientific-technical impact of smart containers' scheduling in the market is also discussed, showing the possible repercussion of the raised opportunities in the research line.}, } @article {pmid32178300, year = {2020}, author = {Fan, X and Zheng, H and Jiang, R and Zhang, J}, title = {Optimal Design of Hierarchical Cloud-Fog&Edge Computing Networks with Caching.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {6}, pages = {}, pmid = {32178300}, issn = {1424-8220}, support = {2018YJS197//Fundamental Research Funds for the Central Universities/ ; 2019YJS035//Fundamental Research Funds for the Central Universities/ ; 61071077//National Natural Science Foundation of China/ ; }, abstract = {This paper investigates the optimal design of a hierarchical cloud-fog&edge computing (FEC) network, which consists of three tiers, i.e., the cloud tier, the fog&edge tier, and the device tier. The device in the device tier processes its task via three computing modes, i.e., cache-assisted computing mode, cloud-assisted computing mode, and joint device-fog&edge computing mode. Specifically, the task corresponds to being completed via the content caching in the FEC tier, the computation offloading to the cloud tier, and the joint computing in the fog&edge and device tier, respectively. For such a system, an energy minimization problem is formulated by jointly optimizing the computing mode selection, the local computing ratio, the computation frequency, and the transmit power, while guaranteeing multiple system constraints, including the task completion deadline time, the achievable computation capability, and the achievable transmit power threshold. Since the problem is a mixed integer nonlinear programming problem, which is hard to solve with known standard methods, it is decomposed into three subproblems, and the optimal solution to each subproblem is derived. Then, an efficient optimal caching, cloud, and joint computing (CCJ) algorithm to solve the primary problem is proposed. Simulation results show that the system performance achieved by our proposed optimal design outperforms that achieved by the benchmark schemes. Moreover, the smaller the achievable transmit power threshold of the device, the more energy is saved. Besides, with the increment of the data size of the task, the lesser is the local computing ratio.}, } @article {pmid32175864, year = {2021}, author = {Ma, R and Mei, H and Guan, H and Huang, W and Zhang, F and Xin, C and Dai, W and Wen, X and Chen, W}, title = {LADV: Deep Learning Assisted Authoring of Dashboard Visualizations From Images and Sketches.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {27}, number = {9}, pages = {3717-3732}, doi = {10.1109/TVCG.2020.2980227}, pmid = {32175864}, issn = {1941-0506}, abstract = {Dashboard visualizations are widely used in data-intensive applications such as business intelligence, operation monitoring, and urban planning. However, existing visualization authoring tools are inefficient in the rapid prototyping of dashboards because visualization expertise and user intention need to be integrated. We propose a novel approach to rapid conceptualization that can construct dashboard templates from exemplars to mitigate the burden of designing, implementing, and evaluating dashboard visualizations. The kernel of our approach is a novel deep learning-based model that can identify and locate charts of various categories and extract colors from an input image or sketch. We design and implement a web-based authoring tool for learning, composing, and customizing dashboard visualizations in a cloud computing environment. Examples, user studies, and user feedback from real scenarios in Alibaba Cloud verify the usability and efficiency of the proposed approach.}, } @article {pmid32164160, year = {2020}, author = {Xiao, D and Li, M and Zheng, H}, title = {Smart Privacy Protection for Big Video Data Storage Based on Hierarchical Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {5}, pages = {}, pmid = {32164160}, issn = {1424-8220}, support = {61572089, 61633005//National Natural Science Foundation of China/ ; cstc2017jcyjBX0008)//Chongqing Research Program of Basic Research and Frontier Technology/ ; yjg183018//Chongqing Postgraduate Education Reform Project/ ; }, abstract = {Recently, the rapid development of the Internet of Things (IoT) has led to an increasing exponential growth of non-scalar data (e.g., images, videos). Local services are far from satisfying storage requirements, and the cloud computing fails to effectively support heterogeneous distributed IoT environments, such as wireless sensor network. To effectively provide smart privacy protection for video data storage, we take full advantage of three patterns (multi-access edge computing, cloudlets and fog computing) of edge computing to design the hierarchical edge computing architecture, and propose a low-complexity and high-secure scheme based on it. The video is divided into three parts and stored in completely different facilities. Specifically, the most significant bits of key frames are directly stored in local sensor devices while the least significant bits of key frames are encrypted and sent to the semi-trusted cloudlets. The non-key frame is compressed with the two-layer parallel compressive sensing and encrypted by the 2D logistic-skew tent map and then transmitted to the cloud. Simulation experiments and theoretical analysis demonstrate that our proposed scheme can not only provide smart privacy protection for big video data storage based on the hierarchical edge computing, but also avoid increasing additional computation burden and storage pressure.}, } @article {pmid32155222, year = {2020}, author = {Yin, S and Wu, W and Zhao, X and Gong, C and Li, X and Zhang, L}, title = {Understanding spatiotemporal patterns of global forest NPP using a data-driven method based on GEE.}, journal = {PloS one}, volume = {15}, number = {3}, pages = {e0230098}, pmid = {32155222}, issn = {1932-6203}, mesh = {Climate Change ; Environmental Monitoring/*methods ; *Forests ; *Internationality ; Internet ; Rain ; *Software ; *Spatio-Temporal Analysis ; Statistics as Topic ; }, abstract = {Spatiotemporal patterns of global forest net primary productivity (NPP) are pivotal for us to understand the interaction between the climate and the terrestrial carbon cycle. In this study, we use Google Earth Engine (GEE), which is a powerful cloud platform, to study the dynamics of the global forest NPP with remote sensing and climate datasets. In contrast with traditional analyses that divide forest areas according to geographical location or climate types to retrieve general conclusions, we categorize forest regions based on their NPP levels. Nine categories of forests are obtained with the self-organizing map (SOM) method, and eight relative factors are considered in the analysis. We found that although forests can achieve higher NPP with taller, denser and more broad-leaved trees, the influence of the climate is stronger on the NPP; for the high-NPP categories, precipitation shows a weak or negative correlation with vegetation greenness, while lacking water may correspond to decrease in productivity for low-NPP categories. The low-NPP categories responded mainly to the La Niña event with an increase in the NPP, while the NPP of the high-NPP categories increased at the onset of the El Niño event and decreased soon afterwards when the warm phase of the El Niño-Southern Oscillation (ENSO) wore off. The influence of the ENSO changes correspondingly with different NPP levels, which infers that the pattern of climate oscillation and forest growth conditions have some degree of synchronization. These findings may facilitate the understanding of global forest NPP variation from a different perspective.}, } @article {pmid32143389, year = {2020}, author = {Silvestre-Blanes, J and Sempere-Payá, V and Albero-Albero, T}, title = {Smart Sensor Architectures for Multimedia Sensing in IoMT.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {5}, pages = {}, pmid = {32143389}, issn = {1424-8220}, support = {PGC2018-094151-B-I00//Ministerio de Ciencia, Innovación y Universidades/ ; }, abstract = {Today, a wide range of developments and paradigms require the use of embedded systems characterized by restrictions on their computing capacity, consumption, cost, and network connection. The evolution of the Internet of Things (IoT) towards Industrial IoT (IIoT) or the Internet of Multimedia Things (IoMT), its impact within the 4.0 industry, the evolution of cloud computing towards edge or fog computing, also called near-sensor computing, or the increase in the use of embedded vision, are current examples of this trend. One of the most common methods of reducing energy consumption is the use of processor frequency scaling, based on a particular policy. The algorithms to define this policy are intended to obtain good responses to the workloads that occur in smarthphones. There has been no study that allows a correct definition of these algorithms for workloads such as those expected in the above scenarios. This paper presents a method to determine the operating parameters of the dynamic governor algorithm called Interactive, which offers significant improvements in power consumption, without reducing the performance of the application. These improvements depend on the load that the system has to support, so the results are evaluated against three different loads, from higher to lower, showing improvements ranging from 62% to 26%.}, } @article {pmid32143073, year = {2020}, author = {Lin, HC and Kuo, YC and Liu, MY}, title = {A health informatics transformation model based on intelligent cloud computing - exemplified by type 2 diabetes mellitus with related cardiovascular diseases.}, journal = {Computer methods and programs in biomedicine}, volume = {191}, number = {}, pages = {105409}, doi = {10.1016/j.cmpb.2020.105409}, pmid = {32143073}, issn = {1872-7565}, mesh = {Algorithms ; *Artificial Intelligence ; *Cardiovascular Diseases ; *Cloud Computing ; *Diabetes Mellitus, Type 2 ; Humans ; Machine Learning ; Medical Informatics/*organization & administration ; }, abstract = {BACKGROUND AND OBJECTIVE: Many studies regarding health analysis request structured datasets but the legacy resources provide scattered data. This study aims to establish a health informatics transformation model (HITM) based upon intelligent cloud computing with the self-developed analytics modules by open source technique. The model was exemplified by the open data of type 2 diabetes mellitus (DM2) with related cardiovascular diseases.

METHODS: The Apache-SPARK framework was employed to generate the infrastructure of the HITM, which enables the machine learning (ML) algorithms including random forest, multi-layer perceptron classifier, support vector machine, and naïve Bayes classifier as well as the regression analysis for intelligent cloud computing. The modeling applied the MIMIC-III open database as an example to design the health informatics data warehouse, which embeds the PL/SQL-based modules to extract the analytical data for the training processes. A coupling analysis flow can drive the ML modules to train the sample data and validate the results.

RESULTS: The four modes of cloud computation were compared to evaluate the feasibility of the cloud platform in accordance with its system performance for more than 11,500 datasets. Then, the modeling adaptability was validated by simulating the featured datasets of obesity and cardiovascular-related diseases for patients with DM2 and its complications. The results showed that the run-time efficiency of the platform performed in around one minute and the prediction accuracy of the featured datasets reached 90%.

CONCLUSIONS: This study helped contribute the modeling for efficient transformation of health informatics. The HITM can be customized for the actual clinical database, which provides big data for training, with the proper ML modules for a predictable process in the cloud platform. The feedback of intelligent computing can be referred to risk assessment in health promotion.}, } @article {pmid32138286, year = {2020}, author = {Li, X and Zhang, X and Qiu, C and Duan, Y and Liu, S and Chen, D and Zhang, L and Zhu, C}, title = {Rapid Loss of Tidal Flats in the Yangtze River Delta since 1974.}, journal = {International journal of environmental research and public health}, volume = {17}, number = {5}, pages = {}, pmid = {32138286}, issn = {1660-4601}, mesh = {China ; *Ecosystem ; Environmental Monitoring ; Estuaries ; *Rivers ; Wetlands ; }, abstract = {As the home to national nature reserves and a Ramsar wetland, the tidal flats of the Yangtze River Delta are of great significance for ecological security, at both the local and global scales. However, a comprehensive understanding of the spatiotemporal conditions of the tidal flats in the Yangtze River Delta remains lacking. Here, we propose using remote sensing to obtain a detailed spatiotemporal profile of the tidal flats, using all available Landsat images from 1974 to 2018 with the help of the Google Earth Engine cloud platform. In addition, reclamation data were manually extracted from time series Landsat images for the same period. We found that approximately 40.0% (34.9-43.1%) of the tidal flats in the study area have been lost since 1980, the year in which the tidal flat area was maximal. The change in the tidal flat areas was consistent with the change in the riverine sediment supply. We also found that the cumulative reclamation areas totaled 816.6 km[2] and 431.9 km[2] in the Yangtze estuary zone and along the Jiangsu coast, respectively, between 1974 and 2018. Because of reclamation, some areas (e.g., the Hengsha eastern shoal and Pudong bank), which used to be quite rich, have lost most of their tidal flats. Currently, almost 70% of the remaining tidal flats are located in the shrinking branch (North Branch) and the two National Nature Reserves (Chongming Dongtan and Jiuduansha) in the Yangtze estuary zone. Consequently, the large-scale loss of tidal flats observed was primarily associated with reduced sediment supply and land reclamation at the time scale of the study. Because increasing demand for land and rising sea levels are expected in the future, immediate steps should be taken to prevent the further deterioration of this valuable ecosystem.}, } @article {pmid32134915, year = {2020}, author = {Schwengers, O and Hoek, A and Fritzenwanker, M and Falgenhauer, L and Hain, T and Chakraborty, T and Goesmann, A}, title = {ASA3P: An automatic and scalable pipeline for the assembly, annotation and higher-level analysis of closely related bacterial isolates.}, journal = {PLoS computational biology}, volume = {16}, number = {3}, pages = {e1007134}, pmid = {32134915}, issn = {1553-7358}, mesh = {Algorithms ; Bacteria/genetics ; Base Sequence/genetics ; Chromosome Mapping/methods ; Cloud Computing ; Computational Biology/*methods ; Genome, Bacterial/genetics ; Sequence Analysis, DNA/*methods/statistics & numerical data ; Software ; Whole Genome Sequencing/methods ; }, abstract = {Whole genome sequencing of bacteria has become daily routine in many fields. Advances in DNA sequencing technologies and continuously dropping costs have resulted in a tremendous increase in the amounts of available sequence data. However, comprehensive in-depth analysis of the resulting data remains an arduous and time-consuming task. In order to keep pace with these promising but challenging developments and to transform raw data into valuable information, standardized analyses and scalable software tools are needed. Here, we introduce ASA3P, a fully automatic, locally executable and scalable assembly, annotation and analysis pipeline for bacterial genomes. The pipeline automatically executes necessary data processing steps, i.e. quality clipping and assembly of raw sequencing reads, scaffolding of contigs and annotation of the resulting genome sequences. Furthermore, ASA3P conducts comprehensive genome characterizations and analyses, e.g. taxonomic classification, detection of antibiotic resistance genes and identification of virulence factors. All results are presented via an HTML5 user interface providing aggregated information, interactive visualizations and access to intermediate results in standard bioinformatics file formats. We distribute ASA3P in two versions: a locally executable Docker container for small-to-medium-scale projects and an OpenStack based cloud computing version able to automatically create and manage self-scaling compute clusters. Thus, automatic and standardized analysis of hundreds of bacterial genomes becomes feasible within hours. The software and further information is available at: asap.computational.bio.}, } @article {pmid32131494, year = {2020}, author = {Guzzi, F and De Bortoli, L and Molina, RS and Marsi, S and Carrato, S and Ramponi, G}, title = {Distillation of an End-to-End Oracle for Face Verification and Recognition Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {5}, pages = {}, pmid = {32131494}, issn = {1424-8220}, mesh = {Algorithms ; Biometric Identification/methods ; Biometry/methods ; Confidentiality ; Databases, Factual ; Face/*physiology ; Facial Recognition/*physiology ; Humans ; Machine Learning ; Neural Networks, Computer ; }, abstract = {Face recognition functions are today exploited through biometric sensors in many applications, from extended security systems to inclusion devices; deep neural network methods are reaching in this field stunning performances. The main limitation of the deep learning approach is an inconvenient relation between the accuracy of the results and the needed computing power. When a personal device is employed, in particular, many algorithms require a cloud computing approach to achieve the expected performances; other algorithms adopt models that are simple by design. A third viable option consists of model (oracle) distillation. This is the most intriguing among the compression techniques since it permits to devise of the minimal structure that will enforce the same I/O relation as the original model. In this paper, a distillation technique is applied to a complex model, enabling the introduction of fast state-of-the-art recognition capabilities on a low-end hardware face recognition sensor module. Two distilled models are presented in this contribution: the former can be directly used in place of the original oracle, while the latter incarnates better the end-to-end approach, removing the need for a separate alignment procedure. The presented biometric systems are examined on the two problems of face verification and face recognition in an open set by using well-agreed training/testing methodologies and datasets.}, } @article {pmid32130115, year = {2020}, author = {Gu, D and Yang, X and Deng, S and Liang, C and Wang, X and Wu, J and Guo, J}, title = {Tracking Knowledge Evolution in Cloud Health Care Research: Knowledge Map and Common Word Analysis.}, journal = {Journal of medical Internet research}, volume = {22}, number = {2}, pages = {e15142}, pmid = {32130115}, issn = {1438-8871}, mesh = {Artificial Intelligence/*standards ; Biomedical Research/*methods ; Cloud Computing/*standards ; Humans ; Word Processing/*methods ; }, abstract = {BACKGROUND: With the continuous development of the internet and the explosive growth in data, big data technology has emerged. With its ongoing development and application, cloud computing technology provides better data storage and analysis. The development of cloud health care provides a more convenient and effective solution for health. Studying the evolution of knowledge and research hotspots in the field of cloud health care is increasingly important for medical informatics. Scholars in the medical informatics community need to understand the extent of the evolution of and possible trends in cloud health care research to inform their future research.

OBJECTIVE: Drawing on the cloud health care literature, this study aimed to describe the development and evolution of research themes in cloud health care through a knowledge map and common word analysis.

METHODS: A total of 2878 articles about cloud health care was retrieved from the Web of Science database. We used cybermetrics to analyze and visualize the keywords in these articles. We created a knowledge map to show the evolution of cloud health care research. We used co-word analysis to identify the hotspots and their evolution in cloud health care research.

RESULTS: The evolution and development of cloud health care services are described. In 2007-2009 (Phase I), most scholars used cloud computing in the medical field mainly to reduce costs, and grid computing and cloud computing were the primary technologies. In 2010-2012 (Phase II), the security of cloud systems became of interest to scholars. In 2013-2015 (Phase III), medical informatization enabled big data for health services. In 2016-2017 (Phase IV), machine learning and mobile technologies were introduced to the medical field.

CONCLUSIONS: Cloud health care research has been rapidly developing worldwide, and technologies used in cloud health research are simultaneously diverging and becoming smarter. Cloud-based mobile health, cloud-based smart health, and the security of cloud health data and systems are three possible trends in the future development of the cloud health care field.}, } @article {pmid32123635, year = {2020}, author = {Hadley, TD and Pettit, RW and Malik, T and Khoei, AA and Salihu, HM}, title = {Artificial Intelligence in Global Health -A Framework and Strategy for Adoption and Sustainability.}, journal = {International journal of MCH and AIDS}, volume = {9}, number = {1}, pages = {121-127}, pmid = {32123635}, issn = {2161-8674}, abstract = {Artificial Intelligence (AI) applications in medicine have grown considerably in recent years. AI in the forms of Machine Learning, Natural Language Processing, Expert Systems, Planning and Logistics methods, and Image Processing networks provide great analytical aptitude. While AI methods were first conceptualized for radiology, investigations today are established across all medical specialties. The necessity for proper infrastructure, skilled labor, and access to large, well-organized data sets has kept the majority of medical AI applications in higher-income countries. However, critical technological improvements, such as cloud computing and the near-ubiquity of smartphones, have paved the way for use of medical AI applications in resource-poor areas. Global health initiatives (GHI) have already begun to explore ways to leverage medical AI technologies to detect and mitigate public health inequities. For example, AI tools can help optimize vaccine delivery and community healthcare worker routes, thus enabling limited resources to have a maximal impact. Other promising AI tools have demonstrated an ability to: predict burn healing time from smartphone photos; track regions of socioeconomic disparity combined with environmental trends to predict communicable disease outbreaks; and accurately predict pregnancy complications such as birth asphyxia in low resource settings with limited patient clinical data. In this commentary, we discuss the current state of AI-driven GHI and explore relevant lessons from past technology-centered GHI. Additionally, we propose a conceptual framework to guide the development of sustainable strategies for AI-driven GHI, and we outline areas for future research.}, } @article {pmid32121185, year = {2020}, author = {Tariq, MI and Ahmed, S and Memon, NA and Tayyaba, S and Ashraf, MW and Nazir, M and Hussain, A and Balas, VE and Balas, MM}, title = {Prioritization of Information Security Controls through Fuzzy AHP for Cloud Computing Networks and Wireless Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {5}, pages = {}, pmid = {32121185}, issn = {1424-8220}, abstract = {With the advent of cloud computing and wireless sensor networks, the number of cyberattacks has rapidly increased. Therefore, the proportionate security of networks has become a challenge for organizations. Information security advisors of organizations face difficult and complex decisions in the evaluation and selection of information security controls that permit the defense of their resources and assets. Information security controls must be selected based on an appropriate level of security. However, their selection needs intensive investigation regarding vulnerabilities, risks, and threats prevailing in the organization as well as consideration of the implementation, mitigation, and budgetary constraints of the organization. The goal of this paper was to improve the information security control analysis method by proposing a formalized approach, i.e., fuzzy Analytical Hierarchy Process (AHP). This approach was used to prioritize and select the most relevant set of information security controls to satisfy the information security requirements of an organization. We argue that the prioritization of the information security controls using fuzzy AHP leads to an efficient and cost-effective assessment and evaluation of information security controls for an organization in order to select the most appropriate ones. The proposed formalized approach and prioritization processes are based on International Organization for Standardization and the International Electrotechnical Commission (ISO/IEC) 27001:2013. But in practice, organizations may apply this approach to any information security baseline manual.}, } @article {pmid32120874, year = {2020}, author = {Liu, Z and Zhang, J and Li, Y and Ji, Y}, title = {Hierarchical MEC Servers Deployment and User-MEC Server Association in C-RANs over WDM Ring Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {5}, pages = {}, pmid = {32120874}, issn = {1424-8220}, support = {No. 2018YFB1800802//the National Key R&D Program of China/ ; No. 61771073//the National Nature Science Foundation of China Projects/ ; No. 4192039//the Beijing Natural Science Foundation/ ; IPOC2019ZT05//the fund of State Key Laboratory of Information Photonics and Optical Communications, China,/ ; CX2019310//BUPT Excellent Ph.D. Students Foundation/ ; }, abstract = {With the increasing number of Internet of Things (IoT) devices, a huge amount of latency-sensitive and computation-intensive IoT applications have been injected into the network. Deploying mobile edge computing (MEC) servers in cloud radio access network (C-RAN) is a promising candidate, which brings a number of critical IoT applications to the edge network, to reduce the heavy traffic load and the end-to-end latency. The MEC server's deployment mechanism is highly related to the user allocation. Therefore, in this paper, we study hierarchical deployment of MEC servers and user allocation problem. We first formulate the problem as a mixed integer nonlinear programming (MINLP) model to minimize the deployment cost and average latency. In terms of the MINLP model, we then propose an enumeration algorithm and approximate algorithm based on the improved entropy weight and TOPSIS methods. Numerical results show that the proposed algorithms can reduce the total cost, and the approximate algorithm has lower total cost comparing the heaviest-location first and the latency-based algorithms.}, } @article {pmid32119067, year = {2020}, author = {Dumont, ELP and Tycko, B and Do, C}, title = {CloudASM: an ultra-efficient cloud-based pipeline for mapping allele-specific DNA methylation.}, journal = {Bioinformatics (Oxford, England)}, volume = {36}, number = {11}, pages = {3558-3560}, pmid = {32119067}, issn = {1367-4811}, support = {R21 AI133140/AI/NIAID NIH HHS/United States ; }, mesh = {Alleles ; Cloud Computing ; *DNA Methylation ; *Software ; }, abstract = {SUMMARY: Methods for quantifying the imbalance in CpG methylation between alleles genome-wide have been described but their algorithmic time complexity is quadratic and their practical use requires painstaking attention to infrastructure choice, implementation and execution. To solve this problem, we developed CloudASM, a scalable, ultra-efficient, turn-key, portable pipeline on Google Cloud Platform (GCP) that uses a novel pipeline manager and GCP's serverless enterprise data warehouse.

CloudASM is freely available in the GitHub repository https://github.com/TyckoLab/CloudASM and a sample dataset and its results are also freely available at https://console.cloud.google.com/storage/browser/cloudasm.

CONTACT: emmanuel.dumont@hmh-cdi.org.}, } @article {pmid32114911, year = {2020}, author = {Alsulami, OZ and Alahmadi, AA and Saeed, SOM and Mohamed, SH and El-Gorashi, TEH and Alresheedi, MT and Elmirghani, JMH}, title = {Optimum resource allocation in optical wireless systems with energy-efficient fog and cloud architectures.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {378}, number = {2169}, pages = {20190188}, pmid = {32114911}, issn = {1471-2962}, abstract = {Optical wireless communication (OWC) is a promising technology that can provide high data rates while supporting multiple users. The optical wireless (OW) physical layer has been researched extensively, however, less work was devoted to multiple access and how the OW front end is connected to the network. In this paper, an OWC system which employs a wavelength division multiple access (WDMA) scheme is studied, for the purpose of supporting multiple users. In addition, a cloud/fog architecture is proposed for the first time for OWC to provide processing capabilities. The cloud/fog-integrated architecture uses visible indoor light to create high data rate connections with potential mobile nodes. These OW nodes are further clustered and used as fog mini servers to provide processing services through the OW channel for other users. Additional fog-processing units are located in the room, the building, the campus and at the metro level. Further processing capabilities are provided by remote cloud sites. Two mixed-integer linear programming (MILP) models were proposed to numerically study networking and processing in OW systems. The first MILP model was developed and used to optimize resource allocation in the indoor OWC systems, in particular, the allocation of access points (APs) and wavelengths to users, while the second MILP model was developed to optimize the placement of processing tasks in the different fog and cloud nodes available. The optimization of tasks placement in the cloud/fog-integrated architecture was analysed using the MILP models. Multiple scenarios were considered where the mobile node locations were varied in the room and the amount of processing and data rate requested by each OW node was varied. The results help to identify the optimum colour and AP to use for communication for a given mobile node location and OWC system configuration, the optimum location to place processing and the impact of the network architecture. This article is part of the theme issue 'Optical wireless communication'.}, } @article {pmid32112271, year = {2020}, author = {Fozoonmayeh, D and Le, HV and Wittfoth, E and Geng, C and Ha, N and Wang, J and Vasilenko, M and Ahn, Y and Woodbridge, DM}, title = {A Scalable Smartwatch-Based Medication Intake Detection System Using Distributed Machine Learning.}, journal = {Journal of medical systems}, volume = {44}, number = {4}, pages = {76}, pmid = {32112271}, issn = {1573-689X}, support = {NA//Anita Borg Institute for Women and Technology/ ; None//Jesuit Foundation, University of San Francisco/ ; }, mesh = {Accelerometry ; Bayes Theorem ; Cloud Computing ; Humans ; *Machine Learning ; *Medication Adherence ; *Mobile Applications ; Smartphone ; *Wearable Electronic Devices ; }, abstract = {Poor Medication adherence causes significant economic impact resulting in hospital readmission, hospital visits and other healthcare costs. The authors developed a smartwatch application and a cloud based data pipeline for developing a user-friendly medication intake monitoring system that can contribute to improving medication adherence. The developed Android smartwatch application collects activity sensor data using accelerometer and gyroscope. The cloud-based data pipeline includes distributed data storage, distributed database management system and distributed computing frameworks in order to build a machine learning model which identifies activity types using sensor data. With the proposed sensor data extraction, preprocessing and machine learning algorithms, this study successfully achieved a high F1 score of 0.977 with 13.313 seconds of training time and 0.139 seconds for testing.}, } @article {pmid32109951, year = {2020}, author = {Campbell, AD and Wang, Y}, title = {Salt marsh monitoring along the mid-Atlantic coast by Google Earth Engine enabled time series.}, journal = {PloS one}, volume = {15}, number = {2}, pages = {e0229605}, pmid = {32109951}, issn = {1932-6203}, mesh = {Atlantic Ocean ; *Biomass ; Ecological Parameter Monitoring ; *Ecosystem ; *Wetlands ; }, abstract = {Salt marshes provide a bulwark against sea-level rise (SLR), an interface between aquatic and terrestrial habitats, important nursery grounds for many species, a buffer against extreme storm impacts, and vast blue carbon repositories. However, salt marshes are at risk of loss from a variety of stressors such as SLR, nutrient enrichment, sediment deficits, herbivory, and anthropogenic disturbances. Determining the dynamics of salt marsh change with remote sensing requires high temporal resolution due to the spectral variability caused by disturbance, tides, and seasonality. Time series analysis of salt marshes can broaden our understanding of these changing environments. This study analyzed aboveground green biomass (AGB) in seven mid-Atlantic Hydrological Unit Code 8 (HUC-8) watersheds. The study revealed that the Eastern Lower Delmarva watershed had the highest average loss and the largest net reduction in salt marsh AGB from 1999-2018. The study developed a method that used Google Earth Engine (GEE) enabled time series of the Landsat archive for regional analysis of salt marsh change and identified at-risk watersheds and salt marshes providing insight into the resilience and management of these ecosystems. The time series were filtered by cloud cover and the Tidal Marsh Inundation Index (TMII). The combination of GEE enabled Landsat time series, and TMII filtering demonstrated a promising method for historic assessment and continued monitoring of salt marsh dynamics.}, } @article {pmid32109684, year = {2020}, author = {Rodrigues, VF and Paim, EP and Kunst, R and Antunes, RS and Costa, CAD and Righi, RDR}, title = {Exploring publish/subscribe, multilevel cloud elasticity, and data compression in telemedicine.}, journal = {Computer methods and programs in biomedicine}, volume = {191}, number = {}, pages = {105403}, doi = {10.1016/j.cmpb.2020.105403}, pmid = {32109684}, issn = {1872-7565}, mesh = {Algorithms ; Cloud Computing/*standards ; *Data Compression ; Humans ; *Publishing ; Quality Improvement ; *Telemedicine ; }, abstract = {BACKGROUND AND OBJECTIVE: Multiple medical specialties rely on image data, typically following the Digital Imaging and Communications in Medicine (DICOM) ISO 12052 standard, to support diagnosis through telemedicine. Remote analysis by different physicians requires the same image to be transmitted simultaneously to different destinations in real-time. This scenario poses a need for a large number of resources to store and transmit DICOM images in real-time, which has been explored using some cloud-based solutions. However, these solutions lack strategies to improve the performance through the cloud elasticity feature. In this context, this article proposes a cloud-based publish/subscribe (PubSub) model, called PS2DICOM, which employs multilevel resource elasticity to improve the performance of DICOM data transmissions.

METHODS: A prototype is implemented to evaluate PS2DICOM. A PubSub communication model is adopted, considering the coexistence of two classes of users: (i) image data producers (publishers); and (ii) image data consumers (subscribers). PS2DICOM employs a cloud infrastructure to guarantee service availability and performance through resource elasticity in two levels of the cloud: (i) brokers and (ii) data storage. In addition, images are compressed prior to the transmission to reduce the demand for network resources using one of three different algorithms: (i) DEFLATE, (ii) LZMA, and (iii) BZIP2. PS2DICOM employs dynamic data compression levels at the client side to improve network performance according to the current available network throughput.

RESULTS: Results indicate that PS2DICOM can improve transmission quality, storage capabilities, querying, and retrieving of DICOM images. The general efficiency gain is approximately 35% in data sending and receiving operations. This gain is resultant from the two levels of elasticity, allowing resources to be scaled up or down automatically in a transparent manner.

CONCLUSIONS: The contributions of PS2DICOM are twofold: (i) multilevel cloud elasticity to adapt the computing resources on demand; (ii) adaptive data compression to meet the network quality and optimize data transmission. Results suggest that the use of compression in medical image data using PS2DICOM can improve the transmission efficiency, allowing the team of specialists to communicate in real-time, even when they are geographically distant.}, } @article {pmid32093476, year = {2020}, author = {Li, M and Tian, T and Zeng, Y and Zhu, S and Lu, J and Yang, J and Li, C and Yin, Y and Li, G}, title = {Individual Cloud-Based Fingerprint Operation Platform for Latent Fingerprint Identification Using Perovskite Nanocrystals as Eikonogen.}, journal = {ACS applied materials & interfaces}, volume = {12}, number = {11}, pages = {13494-13502}, doi = {10.1021/acsami.9b22251}, pmid = {32093476}, issn = {1944-8252}, abstract = {Fingerprint formed through lifted papillary ridges is considered the best reference for personal identification. However, the currently available latent fingerprint (LFP) images often suffer from poor resolution, have a low degree of information, and require multifarious steps for identification. Herein, an individual Cloud-based fingerprint operation platform has been designed and fabricated to achieve high-definition LFPs analysis by using CsPbBr3 perovskite nanocrystals (NCs) as eikonogen. Moreover, since CsPbBr3 NCs have a special response to some fingerprint-associated amino acids, the proposed platform can be further used to detect metabolites on LFPs. Consequently, in virtue of Cloud computing and artificial intelligence (AI), this study has demonstrated a champion platform to realize the whole LFP identification analysis. In a double-blind simulative crime game, the enhanced LFP images can be easily obtained and used to lock the suspect accurately within one second on a smartphone, which can help investigators track the criminal clue and handle cases efficiently.}, } @article {pmid32079352, year = {2020}, author = {Nkenyereye, L and Nkenyereye, L and Tama, BA and Reddy, AG and Song, J}, title = {Software-Defined Vehicular Cloud Networks: Architecture, Applications and Virtual Machine Migration.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {4}, pages = {}, pmid = {32079352}, issn = {1424-8220}, abstract = {Cloud computing supports many unprecedented cloud-based vehicular applications. To improve connectivity and bandwidth through programmable networking architectures, Software-Defined (SD) Vehicular Network (SDVN) is introduced. SDVN architecture enables vehicles to be equipped with SDN OpenFlow switch on which the routing rules are updated from a SDN OpenFlow controller. From SDVN, new vehicular architectures are introduced, for instance SD Vehicular Cloud (SDVC). In SDVC, vehicles are SDN devices that host virtualization technology for enabling deployment of cloud-based vehicular applications. In addition, the migration of Virtual Machines (VM) over SDVC challenges the performance of cloud-based vehicular applications due the highly mobility of vehicles. However, the current literature that discusses VM migration in SDVC is very limited. In this paper, we first analyze the evolution of computation and networking technologies of SDVC with a focus on its architecture within the cloud-based vehicular environment. Then, we discuss the potential cloud-based vehicular applications assisted by the SDVC along with its ability to manage several VM migration scenarios. Lastly, we provide a detailed comparison of existing frameworks in SDVC that integrate the VM migration approach and different emulators or simulators network used to evaluate VM frameworks' use cases.}, } @article {pmid32079301, year = {2020}, author = {Ali, M and Sadeghi, MR and Liu, X}, title = {Lightweight Fine-Grained Access Control for Wireless Body Area Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {4}, pages = {}, pmid = {32079301}, issn = {1424-8220}, support = {U1804263//National Natural Science Foundation of China/ ; 61702105//National Natural Science Foundation of China/ ; 2017B030301004-12//Opening Project of Guangdong Provincial Key Laboratory of Data Security and Privacy Protection/ ; }, mesh = {*Computer Security ; Humans ; Patients ; Privacy ; *Remote Sensing Technology ; *Wearable Electronic Devices ; *Wireless Technology ; }, abstract = {Wireless Body Area Network (WBAN) is a highly promising technology enabling health providers to remotely monitor vital parameters of patients via tiny wearable and implantable sensors. In a WBAN, medical data is collected by several tiny sensors and usually transmitted to a server-side (e.g., a cloud service provider) for long-term storage and online/offline processing. However, as the health data includes several sensitive information, providing confidentiality and fine-grained access control is necessary to preserve the privacy of patients. In this paper, we design an attribute-based encryption (ABE) scheme with lightweight encryption and decryption mechanisms. Our scheme enables tiny sensors to encrypt the collected data under an access control policy by performing very few computational operations. Also, the computational overhead on the users in the decryption phase is lightweight, and most of the operations are performed by the cloud server. In comparison with some excellent ABE schemes, our encryption mechanism is more than 100 times faster, and the communication overhead in our scheme decreases significantly. We provide the security definition for the new primitive and prove its security in the standard model and under the hardness assumption of the decisional bilinear Diffie-Hellman (DBDH) problem.}, } @article {pmid32070983, year = {2020}, author = {Han, L and Zheng, T and Zhu, Y and Xu, L and Fang, L}, title = {Live Semantic 3D Perception for Immersive Augmented Reality.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {26}, number = {5}, pages = {2012-2022}, doi = {10.1109/TVCG.2020.2973477}, pmid = {32070983}, issn = {1941-0506}, mesh = {*Augmented Reality ; Computer Graphics ; Humans ; Imaging, Three-Dimensional/*methods ; *Neural Networks, Computer ; *Semantics ; Virtual Reality ; }, abstract = {Semantic understanding of 3D environments is critical for both the unmanned system and the human involved virtual/augmented reality (VR/AR) immersive experience. Spatially-sparse convolution, taking advantage of the intrinsic sparsity of 3D point cloud data, makes high resolution 3D convolutional neural networks tractable with state-of-the-art results on 3D semantic segmentation problems. However, the exhaustive computations limits the practical usage of semantic 3D perception for VR/AR applications in portable devices. In this paper, we identify that the efficiency bottleneck lies in the unorganized memory access of the sparse convolution steps, i.e., the points are stored independently based on a predefined dictionary, which is inefficient due to the limited memory bandwidth of parallel computing devices (GPU). With the insight that points are continuous as 2D surfaces in 3D space, a chunk-based sparse convolution scheme is proposed to reuse the neighboring points within each spatially organized chunk. An efficient multi-layer adaptive fusion module is further proposed for employing the spatial consistency cue of 3D data to further reduce the computational burden. Quantitative experiments on public datasets demonstrate that our approach works 11× faster than previous approaches with competitive accuracy. By implementing both semantic and geometric 3D reconstruction simultaneously on a portable tablet device, we demo a foundation platform for immersive AR applications.}, } @article {pmid32069298, year = {2020}, author = {Alexander, K and Hanif, M and Lee, C and Kim, E and Helal, S}, title = {Cost-aware orchestration of applications over heterogeneous clouds.}, journal = {PloS one}, volume = {15}, number = {2}, pages = {e0228086}, pmid = {32069298}, issn = {1932-6203}, mesh = {*Cloud Computing ; Models, Theoretical ; Time Factors ; }, abstract = {The orchestration of applications and their components over heterogeneous clouds is recognized as being critical in solving the problem of vendor lock-in with regards to distributed and cloud computing. There have been recent strides made in the area of cloud application orchestration with emergence of the TOSCA standard being a definitive one. Although orchestration by itself provides a considerable amount of benefit to consumers of cloud computing services, it remains impractical without a compelling reason to ensure its utilization by cloud computing consumers. If there is no measurable benefit in using orchestration, then it is likely that clients may opt out of using it altogether. In this paper, we present an approach to cloud orchestration that aims to combine an orchestration model with a cost and policy model in order to allow for cost-aware application orchestration across heterogeneous clouds. Our approach takes into consideration the operating cost of the application on each provider, while performing a forward projection of the operating cost over a period of time to ensure that cost constraints remain unviolated. This allows us to leverage the existing state of the art with regards to orchestration and model-driven approaches as well as tie it to the operations of cloud clients in order to improve utility. Through this study, we were able to show that our approach was capable of providing not only scaling features but also orchestration features of application components distributed across heterogeneous cloud platforms.}, } @article {pmid32069044, year = {2020}, author = {Matsuzawa, NN and Arai, H and Sasago, M and Fujii, E and Goldberg, A and Mustard, TJ and Kwak, HS and Giesen, DJ and Ranalli, F and Halls, MD}, title = {Massive Theoretical Screen of Hole Conducting Organic Materials in the Heteroacene Family by Using a Cloud-Computing Environment.}, journal = {The journal of physical chemistry. A}, volume = {124}, number = {10}, pages = {1981-1992}, doi = {10.1021/acs.jpca.9b10998}, pmid = {32069044}, issn = {1520-5215}, abstract = {Materials exhibiting higher mobilities than conventional organic semiconducting materials such as fullerenes and fused thiophenes are in high demand for applications in printed electronics. To discover new molecules in the heteroacene family that might show improved charge mobility, a massive theoretical screen of hole conducting properties of molecules was performed by using a cloud-computing environment. Over 7 000 000 structures of fused furans, thiophenes and selenophenes were generated and 250 000 structures were randomly selected to perform density functional theory (DFT) calculations of hole reorganization energies. The lowest hole reorganization energy calculated was 0.0548 eV for a fused thioacene having 8 aromatics rings. Hole mobilities of compounds with the lowest 130 reorganization energy were further processed by applying combined DFT and molecular dynamics (MD) methods. The highest mobility calculated was 1.02 and 9.65 cm[2]/(V s) based on percolation and disorder theory, respectively, for compounds containing selenium atoms with 8 aromatic rings. These values are about 20 times higher than those for dinaphthothienothiophene (DNTT).}, } @article {pmid32046133, year = {2020}, author = {Marah, BD and Jing, Z and Ma, T and Alsabri, R and Anaadumba, R and Al-Dhelaan, A and Al-Dhelaan, M}, title = {Smartphone Architecture for Edge-Centric IoT Analytics.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {3}, pages = {}, pmid = {32046133}, issn = {1424-8220}, support = {U1736105//National Natural Science Foundation of China/ ; }, abstract = {The current baseline architectures in the field of the Internet of Things (IoT) strongly recommends the use of edge computing in the design of the solution applications instead of the traditional approach which solely uses the cloud/core for analysis and data storage. This research, therefore, focuses on formulating an edge-centric IoT architecture for smartphones which are very popular electronic devices that are capable of executing complex computational tasks at the network edge. A novel smartphone IoT architecture (SMIoT) is introduced that supports data capture and preprocessing, model (i.e., machine learning models) deployment, model evaluation and model updating tasks. Moreover, a novel model evaluation and updating scheme is provided which ensures model validation in real-time. This ensures a sustainable and reliable model at the network edge that automatically adjusts to changes in the IoT data subspace. Finally, the proposed architecture is tested and evaluated using an IoT use case.}, } @article {pmid32042829, year = {2019}, author = {Chattopadhyay, A and Lu, TP}, title = {Gene-gene interaction: the curse of dimensionality.}, journal = {Annals of translational medicine}, volume = {7}, number = {24}, pages = {813}, pmid = {32042829}, issn = {2305-5839}, abstract = {Identified genetic variants from genome wide association studies frequently show only modest effects on the disease risk, leading to the "missing heritability" problem. An avenue, to account for a part of this "missingness" is to evaluate gene-gene interactions (epistasis) thereby elucidating their effect on complex diseases. This can potentially help with identifying gene functions, pathways, and drug targets. However, the exhaustive evaluation of all possible genetic interactions among millions of single nucleotide polymorphisms (SNPs) raises several issues, otherwise known as the "curse of dimensionality". The dimensionality involved in the epistatic analysis of such exponentially growing SNPs diminishes the usefulness of traditional, parametric statistical methods. With the immense popularity of multifactor dimensionality reduction (MDR), a non-parametric method, proposed in 2001, that classifies multi-dimensional genotypes into one- dimensional binary approaches, led to the emergence of a fast-growing collection of methods that were based on the MDR approach. Moreover, machine-learning (ML) methods such as random forests and neural networks (NNs), deep-learning (DL) approaches, and hybrid approaches have also been applied profusely, in the recent years, to tackle this dimensionality issue associated with whole genome gene-gene interaction studies. However, exhaustive searching in MDR based approaches or variable selection in ML methods, still pose the risk of missing out on relevant SNPs. Furthermore, interpretability issues are a major hindrance for DL methods. To minimize this loss of information, Python based tools such as PySpark can potentially take advantage of distributed computing resources in the cloud, to bring back smaller subsets of data for further local analysis. Parallel computing can be a powerful resource that stands to fight this "curse". PySpark supports all standard Python libraries and C extensions thus making it convenient to write codes to deliver dramatic improvements in processing speed for extraordinarily large sets of data.}, } @article {pmid32038716, year = {2019}, author = {Peri, S and Roberts, S and Kreko, IR and McHan, LB and Naron, A and Ram, A and Murphy, RL and Lyons, E and Gregory, BD and Devisetty, UK and Nelson, ADL}, title = {Read Mapping and Transcript Assembly: A Scalable and High-Throughput Workflow for the Processing and Analysis of Ribonucleic Acid Sequencing Data.}, journal = {Frontiers in genetics}, volume = {10}, number = {}, pages = {1361}, pmid = {32038716}, issn = {1664-8021}, abstract = {Next-generation RNA-sequencing is an incredibly powerful means of generating a snapshot of the transcriptomic state within a cell, tissue, or whole organism. As the questions addressed by RNA-sequencing (RNA-seq) become both more complex and greater in number, there is a need to simplify RNA-seq processing workflows, make them more efficient and interoperable, and capable of handling both large and small datasets. This is especially important for researchers who need to process hundreds to tens of thousands of RNA-seq datasets. To address these needs, we have developed a scalable, user-friendly, and easily deployable analysis suite called RMTA (Read Mapping, Transcript Assembly). RMTA can easily process thousands of RNA-seq datasets with features that include automated read quality analysis, filters for lowly expressed transcripts, and read counting for differential expression analysis. RMTA is containerized using Docker for easy deployment within any compute environment [cloud, local, or high-performance computing (HPC)] and is available as two apps in CyVerse's Discovery Environment, one for normal use and one specifically designed for introducing undergraduates and high school to RNA-seq analysis. For extremely large datasets (tens of thousands of FASTq files) we developed a high-throughput, scalable, and parallelized version of RMTA optimized for launching on the Open Science Grid (OSG) from within the Discovery Environment. OSG-RMTA allows users to utilize the Discovery Environment for data management, parallelization, and submitting jobs to OSG, and finally, employ the OSG for distributed, high throughput computing. Alternatively, OSG-RMTA can be run directly on the OSG through the command line. RMTA is designed to be useful for data scientists, of any skill level, interested in rapidly and reproducibly analyzing their large RNA-seq data sets.}, } @article {pmid32033345, year = {2020}, author = {Ramírez-Faz, J and Fernández-Ahumada, LM and Fernández-Ahumada, E and López-Luque, R}, title = {Monitoring of Temperature in Retail Refrigerated Cabinets Applying IoT Over Open-Source Hardware and Software.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {3}, pages = {}, pmid = {32033345}, issn = {1424-8220}, abstract = {The control of refrigeration in the food chain is fundamental at all stages, with special emphasis on the retail stage. The implementation of information and communication technologies (IoT, open-source hardware and software, cloud computing, etc.) is representing a revolution in the operational paradigm of food control. This paper presents a low-cost IoT solution, based on free hardware and software, for monitoring the temperature in refrigerated retail cabinets. Specifically, the use of the ESP-8266-Wi-Fi microcontroller with DS18B20 temperature sensors is proposed. The ThingSpeak IoT platform is used to store and process data in the cloud. The solution presented is robust, affordable, and flexible, allowing to extend the scope of supervising other relevant parameters in the operating process (light control, energy efficiency, consumer presence, etc.).}, } @article {pmid32024221, year = {2020}, author = {Xu, J and Yang, S and Lu, W and Xu, L and Yang, D}, title = {Incentivizing for Truth Discovery in Edge-assisted Large-scale Mobile Crowdsensing.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {3}, pages = {}, pmid = {32024221}, issn = {1424-8220}, support = {61872193//National Natural Science Foundation of China/ ; 1717315//National Science Foundation/ ; }, mesh = {Algorithms ; *Cell Phone ; Cloud Computing/trends ; Computer Security/trends ; Data Collection/trends ; Humans ; Records ; }, abstract = {The recent development of human-carried mobile devices has promoted the great development of mobile crowdsensing systems. Most existing mobile crowdsensing systems depend on the crowdsensing service of the deep cloud. With the increasing scale and complexity, there is a tendency to enhance mobile crowdsensing with the edge computing paradigm to reduce latency and computational complexity, and improve the expandability and security. In this paper, we propose an integrated solution to stimulate the strategic users to contribute more for truth discovery in the edge-assisted mobile crowdsensing. We design an incentive mechanism consisting of truth discovery stage and budget feasible reverse auction stage. In truth discovery stage, we estimate the truth for each task in both deep cloud and edge cloud. In budget feasible reverse auction stage, we design a greedy algorithm to select the winners to maximize the quality function under the budget constraint. Through extensive simulations, we demonstrate that the proposed mechanism is computationally efficient, individually rational, truthful, budget feasible and constant approximate. Moreover, the proposed mechanism shows great superiority in terms of estimation precision and expandability.}, } @article {pmid31992273, year = {2020}, author = {Hettige, S and Dasanayaka, E and Ediriweera, DS}, title = {Usage of cloud storage facilities by medical students in a low-middle income country, Sri Lanka: a cross sectional study.}, journal = {BMC medical informatics and decision making}, volume = {20}, number = {1}, pages = {10}, pmid = {31992273}, issn = {1472-6947}, mesh = {Cloud Computing/*statistics & numerical data ; Cross-Sectional Studies ; Female ; Humans ; Male ; Sri Lanka ; Students, Medical/*psychology ; Surveys and Questionnaires ; }, abstract = {BACKGROUND: Cloud storage facilities (CSF) has become popular among the internet users. There is limited data on CSF usage among university students in low middle-income countries including Sri Lanka. In this study we present the CSF usage among medical students at the Faculty of Medicine, University of Kelaniya.

METHODS: We undertook a cross sectional study at the Faculty of Medicine, University of Kelaniya, Sri Lanka. Stratified random sampling was used to recruit students representing all the batches. A self-administrated questionnaire was given.

RESULTS: Of 261 (90.9%) respondents, 181 (69.3%) were females. CSF awareness was 56.5% (95%CI: 50.3-62.6%) and CSF usage was 50.8% (95%CI: 44.4-57.2%). Awareness was higher in males (P = 0.003) and was low in senior students. Of CSF aware students, 85% knew about Google Drive and 70.6% used it. 73.6 and 42.1% knew about Dropbox and OneDrive. 50.0 and 22.0% used them respectively. There was no association between CSF awareness and pre-university entrance or undergraduate examination performance. Inadequate knowledge, time, accessibility, security and privacy concerns limited CSF usage. 69.8% indicated that they would like to undergo training on CSF as an effective tool for education.

CONCLUSION: CSF awareness and usage among the students were 56.5 and 50.8%. Google drive is the most popular CSF. Lack of knowledge, accessibility, concerns on security and privacy limited CSF usage among students. Majority were interested to undergo training on CSF and undergraduate Information Communication Technology (ICT) curricula should introduce CSF as effective educational tools.}, } @article {pmid31989908, year = {2019}, author = {Alghazo, JM}, title = {Intelligent Security and Privacy of Electronic Health Records Using Biometric Images.}, journal = {Current medical imaging reviews}, volume = {15}, number = {4}, pages = {386-394}, doi = {10.2174/1573405615666181228121535}, pmid = {31989908}, mesh = {Biometry ; *Computer Security ; *Electronic Health Records ; Humans ; *Information Management ; *Privacy ; }, abstract = {BACKGROUND: In the presence of Cloud Environment and the migration of Electronic Health Systems and records to the Cloud, patient privacy has become an emergent problem for healthcare institutions. Government bylaws, electronic health documentation, and innovative internet health services generate numerous security issues for healthcare conformity and information security groups. To deal with these issues, healthcare institutes must protect essential IT infrastructure from unauthorized use by insiders and hackers. The Cloud Computing archetype allows for EHealth methods that improve the features and functionality of systems on the cloud. On the other hand, sending patients' medical information and records to the Cloud entails a number of risks in the protection and privacy of the health records during the communication process.

AIM: In this paper, a solution is proposed for the security of Electronic Health Records (EHRs) in cloud environment during the process of sending the data to the cloud. In addition, the proposed method uses biometric images that allow for unified patient identification across cloud-based EHRs and across medical institutions.

METHOD: To protect the privacy of patients' information and streamline the migration process, a watermarking-based method is proposed for health care providers to ensure that patients' data are only accessible to authorized personnel. Patients' information, such as name, id, symptoms, diseases, and previous history, is secured in biometric images of patients as an encrypted watermark.

RESULTS: Quality and impeccability analysis and robustness were performed to test the proposed method. The PSNR values show that the proposed method produced excellent results.

CONCLUSION: The robustness and impressibility of the proposed method were tested by subjecting the watermarked images to different simulated attacks. The watermarks were largely impermeable to varied and repeated attacks.}, } @article {pmid31979184, year = {2020}, author = {Shao, M and Zhou, Z and Bin, G and Bai, Y and Wu, S}, title = {A Wearable Electrocardiogram Telemonitoring System for Atrial Fibrillation Detection.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {3}, pages = {}, pmid = {31979184}, issn = {1424-8220}, support = {71661167001//National Natural Science Foundation of China/ ; 71781260096//National Natural Science Foundation of China/ ; 61871005//National Natural Science Foundation of China/ ; 4184081//Beijing Municipal Natural Science Foundation/ ; KM201911417011//Beijing Municipal Education Commission/ ; }, mesh = {Algorithms ; Atrial Fibrillation/*diagnosis ; Cloud Computing ; Electrocardiography/*instrumentation/*methods ; Electrocardiography, Ambulatory/*instrumentation/*methods ; Humans ; Machine Learning ; Signal Processing, Computer-Assisted/instrumentation ; Smartphone ; Wearable Electronic Devices ; Wireless Technology/instrumentation ; }, abstract = {In this paper we proposed a wearable electrocardiogram (ECG) telemonitoring system for atrial fibrillation (AF) detection based on a smartphone and cloud computing. A wearable ECG patch was designed to collect ECG signals and send the signals to an Android smartphone via Bluetooth. An Android APP was developed to display the ECG waveforms in real time and transmit every 30 s ECG data to a remote cloud server. A machine learning (CatBoost)-based ECG classification method was proposed to detect AF in the cloud server. In case of detected AF, the cloud server pushed the ECG data and classification results to the web browser of a doctor. Finally, the Android APP displayed the doctor's diagnosis for the ECG signals. Experimental results showed the proposed CatBoost classifier trained with 17 selected features achieved an overall F1 score of 0.92 on the test set (n = 7,270). The proposed wearable ECG monitoring system may potentially be useful for long-term ECG telemonitoring for AF detection.}, } @article {pmid31979168, year = {2020}, author = {Vanus, J and Fiedorova, K and Kubicek, J and Gorjani, OM and Augustynek, M}, title = {Wavelet-Based Filtration Procedure for Denoising the Predicted CO2 Waveforms in Smart Home within the Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {3}, pages = {}, pmid = {31979168}, issn = {1424-8220}, support = {CZ.02.1.01/0.0/0.0/16_019/0000867//European Regional Development Fund/ ; }, abstract = {The operating cost minimization of smart homes can be achieved with the optimization of the management of the building's technical functions by determination of the current occupancy status of the individual monitored spaces of a smart home. To respect the privacy of the smart home residents, indirect methods (without using cameras and microphones) are possible for occupancy recognition of space in smart homes. This article describes a newly proposed indirect method to increase the accuracy of the occupancy recognition of monitored spaces of smart homes. The proposed procedure uses the prediction of the course of CO2 concentration from operationally measured quantities (temperature indoor and relative humidity indoor) using artificial neural networks with a multilayer perceptron algorithm. The mathematical wavelet transformation method is used for additive noise canceling from the predicted course of the CO2 concentration signal with an objective increase accuracy of the prediction. The calculated accuracy of CO2 concentration waveform prediction in the additive noise-canceling application was higher than 98% in selected experiments.}, } @article {pmid31979135, year = {2020}, author = {Wei, H and Luo, H and Sun, Y}, title = {Mobility-Aware Service Caching in Mobile Edge Computing for Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {3}, pages = {}, pmid = {31979135}, issn = {1424-8220}, support = {61772085, 61532012, 61672109//National Natural Science Foundation of China/ ; 2018YFB2100300//National Key R&D Program of China/ ; }, abstract = {The mobile edge computing architecture successfully solves the problem of high latency in cloud computing. However, current research focuses on computation offloading and lacks research on service caching issues. To solve the service caching problem, especially for scenarios with high mobility in the Sensor Networks environment, we study the mobility-aware service caching mechanism. Our goal is to maximize the number of users who are served by the local edge-cloud, and we need to make predictions about the user's target location to avoid invalid service requests. First, we propose an idealized geometric model to predict the target area of a user's movement. Since it is difficult to obtain all the data needed by the model in practical applications, we use frequent patterns to mine local moving track information. Then, by using the results of the trajectory data mining and the proposed geometric model, we make predictions about the user's target location. Based on the prediction result and existing service cache, the service request is forwarded to the appropriate base station through the service allocation algorithm. Finally, to be able to train and predict the most popular services online, we propose a service cache selection algorithm based on back-propagation (BP) neural network. The simulation experiments show that our service cache algorithm reduces the service response time by about 13.21% on average compared to other algorithms, and increases the local service proportion by about 15.19% on average compared to the algorithm without mobility prediction.}, } @article {pmid31971940, year = {2020}, author = {Blatti, C and Emad, A and Berry, MJ and Gatzke, L and Epstein, M and Lanier, D and Rizal, P and Ge, J and Liao, X and Sobh, O and Lambert, M and Post, CS and Xiao, J and Groves, P and Epstein, AT and Chen, X and Srinivasan, S and Lehnert, E and Kalari, KR and Wang, L and Weinshilboum, RM and Song, JS and Jongeneel, CV and Han, J and Ravaioli, U and Sobh, N and Bushell, CB and Sinha, S}, title = {Knowledge-guided analysis of "omics" data using the KnowEnG cloud platform.}, journal = {PLoS biology}, volume = {18}, number = {1}, pages = {e3000583}, pmid = {31971940}, issn = {1545-7885}, support = {U54 GM114838/GM/NIGMS NIH HHS/United States ; }, mesh = {*Algorithms ; *Cloud Computing ; Cluster Analysis ; Computational Biology/methods ; Data Analysis ; Data Mining/*methods ; Datasets as Topic ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/methods ; Humans ; Knowledge ; Machine Learning ; Metabolomics/methods ; *Software ; }, abstract = {We present Knowledge Engine for Genomics (KnowEnG), a free-to-use computational system for analysis of genomics data sets, designed to accelerate biomedical discovery. It includes tools for popular bioinformatics tasks such as gene prioritization, sample clustering, gene set analysis, and expression signature analysis. The system specializes in "knowledge-guided" data mining and machine learning algorithms, in which user-provided data are analyzed in light of prior information about genes, aggregated from numerous knowledge bases and encoded in a massive "Knowledge Network." KnowEnG adheres to "FAIR" principles (findable, accessible, interoperable, and reuseable): its tools are easily portable to diverse computing environments, run on the cloud for scalable and cost-effective execution, and are interoperable with other computing platforms. The analysis tools are made available through multiple access modes, including a web portal with specialized visualization modules. We demonstrate the KnowEnG system's potential value in democratization of advanced tools for the modern genomics era through several case studies that use its tools to recreate and expand upon the published analysis of cancer data sets.}, } @article {pmid31968669, year = {2020}, author = {Moleda, M and Momot, A and Mrozek, D}, title = {Predictive Maintenance of Boiler Feed Water Pumps Using SCADA Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {2}, pages = {}, pmid = {31968669}, issn = {1424-8220}, support = {0053/DW/2018//Politechnika Śląska/ ; BK/204/RAU2/2019//Politechnika Śląska/ ; 02/020/RGPL/0184//Politechnika Śląska/ ; }, abstract = {IoT enabled predictive maintenance allows companies in the energy sector to identify potential problems in the production devices far before the failure occurs. In this paper, we propose a method for early detection of faults in boiler feed pumps using existing measurements currently captured by control devices. In the experimental part, we work on real measurement data and events from a coal fired power plant. The main research objective is to implement a model that detects deviations from the normal operation state based on regression and to check which events or failures can be detected by it. The presented technique allows the creation of a predictive system working on the basis of the available data with a minimal requirement of expert knowledge, in particular the knowledge related to the categorization of failures and the exact time of their occurrence, which is sometimes difficult to identify. The paper shows that with modern technologies, such as the Internet of Things, big data, and cloud computing, it is possible to integrate automation systems, designed in the past only to control the production process, with IT systems that make all processes more efficient through the use of advanced analytic tools.}, } @article {pmid31963336, year = {2020}, author = {Ming, Y and Yu, X}, title = {Efficient Privacy-Preserving Data Sharing for Fog-Assisted Vehicular Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {2}, pages = {}, pmid = {31963336}, issn = {1424-8220}, support = {2018JM6081//Natural Science Foundation of Shaanxi Province/ ; 300102249204//Fundamental Research Funds for the Central Universities, CHD/ ; }, abstract = {Vehicular sensor networks (VSNs) have emerged as a paradigm for improving traffic safety in urban cities. However, there are still several issues with VSNs. Vehicles equipped with sensing devices usually upload large amounts of data reports to a remote cloud center for processing and analyzing, causing heavy computation and communication costs. Additionally, to choose an optimal route, it is required for vehicles to query the remote cloud center to obtain road conditions of the potential moving route, leading to an increased communication delay and leakage of location privacy. To solve these problems, this paper proposes an efficient privacy-preserving data sharing (EP 2 DS) scheme for fog-assisted vehicular sensor networks. Specifically, the proposed scheme utilizes fog computing to provide local data sharing with low latency; furthermore, it exploits a super-increasing sequence to format the sensing data of different road segments into one report, thus saving on the resources of communication and computation. In addition, using the modified oblivious transfer technology, the proposed scheme can query the road conditions of the potential moving route without disclosing the query location. Finally, an analysis of security suggests that the proposed scheme can satisfy all the requirements for security and privacy, with the evaluation results indicating that the proposed scheme leads to low costs in computation and communication.}, } @article {pmid31963169, year = {2020}, author = {Kaya, MC and Saeedi Nikoo, M and Schwartz, ML and Oguztuzun, H}, title = {Internet of Measurement Things Architecture: Proof of Concept with Scope of Accreditation.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {2}, pages = {}, pmid = {31963169}, issn = {1424-8220}, abstract = {Many industries, such as manufacturing, aviation, and power generation, employ sensitive measurement devices to be calibrated by certified experts. The diversity and sophistication of measurement devices and their calibration needs require networked and automated solutions. Internet of Measurement Things (IoMT) is an architectural framework that is based on the Industrial Internet of Things for the calibration industry. This architecture involves a layered model with a cloud-centric middle layer. In this article, the realization of this conceptual architecture is described. The applicability of the IoMT architecture in the calibration industry is shown through an editor application for Scope of Accreditation. The cloud side of the implementation is deployed to Microsoft Azure. The editor itself is created as a cloud service, and IoT Hub is used to collect data from calibration laboratories. By adapting the IoMT architecture to a commonly used cloud platform, considerable progress is achieved to encompass Metrology data and serve the majority of the stakeholders.}, } @article {pmid31953822, year = {2020}, author = {Pittard, WS and Villaveces, CK and Li, S}, title = {A Bioinformatics Primer to Data Science, with Examples for Metabolomics.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2104}, number = {}, pages = {245-263}, doi = {10.1007/978-1-0716-0239-3_14}, pmid = {31953822}, issn = {1940-6029}, support = {U2C ES030163/ES/NIEHS NIH HHS/United States ; U2C ES026560/ES/NIEHS NIH HHS/United States ; P50 ES026071/ES/NIEHS NIH HHS/United States ; P30 ES019776/ES/NIEHS NIH HHS/United States ; UH2 AI132345/AI/NIAID NIH HHS/United States ; U01 CA235493/CA/NCI NIH HHS/United States ; }, mesh = {Cloud Computing ; Computational Biology/*methods/standards ; Data Management ; *Data Science/methods/standards ; Database Management Systems ; Databases, Factual ; Humans ; *Metabolomics/standards/statistics & numerical data ; *Software ; }, abstract = {With the increasing importance of big data in biomedicine, skills in data science are a foundation for the individual career development and for the progress of science. This chapter is a practical guide to working with high-throughput biomedical data. It covers how to understand and set up the computing environment, to start a research project with proper and effective data management, and to perform common bioinformatics tasks such as data wrangling, quality control, statistical analysis, and visualization, with examples on metabolomics data. Concepts and tools related to coding and scripting are discussed. Version control, knitr and Jupyter notebooks are important to project management, collaboration, and research reproducibility. Overall, this chapter describes a core set of skills to work in bioinformatics, and can serve as a reference text at the level of a graduate course and interfacing with data science.}, } @article {pmid31947334, year = {2019}, author = {Nguyen, DC and Nguyen, KD and Pathirana, PN}, title = {A Mobile Cloud based IoMT Framework for Automated Health Assessment and Management.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2019}, number = {}, pages = {6517-6520}, doi = {10.1109/EMBC.2019.8856631}, pmid = {31947334}, issn = {2694-0604}, mesh = {*Cloud Computing ; Computers ; *Confidentiality ; Delivery of Health Care ; Internet ; Monitoring, Physiologic ; }, abstract = {In recent years, there has been growing interest in the use of mobile cloud and Internet of Medical Things (IoMT) in automated diagnosis and health monitoring. These applications play a significant role in providing smart medical services in modern healthcare systems. In this paper, we deploy a mobile cloud-based IoMT scheme to monitor the progression of a neurological disorder using a test of motor coordination. The computing and storage capabilities of cloud server is employed to facilitate the estimation of the severity levels given by an established quantitative assessment. An Android application is used for data acquisition and communication with the cloud. Further, we integrate the proposed system with a data sharing framework in a blockchain network as an innovative solution that allows reliable data exchange among healthcare users. The experimental results show the feasibility of implementing the proposed system in a wide range of healthcare applications.}, } @article {pmid31946857, year = {2019}, author = {Ellis, CA and Gu, P and Sendi, MSE and Huddleston, D and Sharma, A and Mahmoudi, B}, title = {A Cloud-based Framework for Implementing Portable Machine Learning Pipelines for Neural Data Analysis.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2019}, number = {}, pages = {4466-4469}, pmid = {31946857}, issn = {2694-0604}, support = {K23 NS105944/NS/NINDS NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Computational Biology ; *Data Analysis ; Machine Learning ; *Software ; }, abstract = {Cloud-based computing has created new avenues for innovative research. In recent years, numerous cloud-based, data analysis projects within the biomedical domain have been implemented. As this field is likely to grow, there is a need for a unified platform for the developing and testing of advanced analytic and modeling tools that enables those tools to be easily reused for biomedical data analysis by a broad set of users with diverse technical skills. A cloud-based platform of this nature could greatly assist future research endeavors. In this paper, we take the first step towards building such a platform. We define an approach by which containerized analytic pipelines can be distributed for use on cloud-based or on-premise computing platforms. We demonstrate our approach by implementing a portable biomarker identification pipeline using a logistic regression model with elastic net regularization (LR-ENR) and running it on Google Cloud. We used this pipeline for the diagnosis of Parkinson's disease based on a combination of clinical, demographic, and MRI-based features and for the identification of the most predictive biomarkers.}, } @article {pmid31945096, year = {2020}, author = {Vitkin, E and Gillis, A and Polikovsky, M and Bender, B and Golberg, A and Yakhini, Z}, title = {Distributed flux balance analysis simulations of serial biomass fermentation by two organisms.}, journal = {PloS one}, volume = {15}, number = {1}, pages = {e0227363}, pmid = {31945096}, issn = {1932-6203}, mesh = {*Computer Simulation ; *Computer-Aided Design ; Escherichia coli/*growth & development ; Ethanol/*metabolism ; *Fermentation ; Hydrolysis ; *Models, Biological ; Rhodophyta/metabolism ; Saccharomyces cerevisiae/*growth & development ; Ulva/metabolism ; Zea mays/metabolism ; }, abstract = {Intelligent biorefinery design that addresses both the composition of the biomass feedstock as well as fermentation microorganisms could benefit from dedicated tools for computational simulation and computer-assisted optimization. Here we present the BioLego Vn2.0 framework, based on Microsoft Azure Cloud, which supports large-scale simulations of biomass serial fermentation processes by two different organisms. BioLego enables the simultaneous analysis of multiple fermentation scenarios and the comparison of fermentation potential of multiple feedstock compositions. Thanks to the effective use of cloud computing it further allows resource intensive analysis and exploration of media and organism modifications. We use BioLego to obtain biological and validation results, including (1) exploratory search for the optimal utilization of corn biomasses-corn cobs, corn fiber and corn stover-in fermentation biorefineries; (2) analysis of the possible effects of changes in the composition of K. alvarezi biomass on the ethanol production yield in an anaerobic two-step process (S. cerevisiae followed by E. coli); (3) analysis of the impact, on the estimated ethanol production yield, of knocking out single organism reactions either in one or in both organisms in an anaerobic two-step fermentation process of Ulva sp. into ethanol (S. cerevisiae followed by E. coli); and (4) comparison of several experimentally measured ethanol fermentation rates with the predictions of BioLego.}, } @article {pmid31939789, year = {2020}, author = {Reyna, MA and Josef, CS and Jeter, R and Shashikumar, SP and Westover, MB and Nemati, S and Clifford, GD and Sharma, A}, title = {Early Prediction of Sepsis From Clinical Data: The PhysioNet/Computing in Cardiology Challenge 2019.}, journal = {Critical care medicine}, volume = {48}, number = {2}, pages = {210-217}, pmid = {31939789}, issn = {1530-0293}, support = {T32 GM095442/GM/NIGMS NIH HHS/United States ; K01 ES025445/ES/NIEHS NIH HHS/United States ; U24 CA215109/CA/NCI NIH HHS/United States ; UL1 TR002378/TR/NCATS NIH HHS/United States ; R01 GM104987/GM/NIGMS NIH HHS/United States ; }, mesh = {*Algorithms ; *Early Diagnosis ; Electronic Health Records ; Female ; Humans ; *Intensive Care Units ; Male ; Sepsis/*diagnosis/physiopathology ; Severity of Illness Index ; Time Factors ; United States ; }, abstract = {OBJECTIVES: Sepsis is a major public health concern with significant morbidity, mortality, and healthcare expenses. Early detection and antibiotic treatment of sepsis improve outcomes. However, although professional critical care societies have proposed new clinical criteria that aid sepsis recognition, the fundamental need for early detection and treatment remains unmet. In response, researchers have proposed algorithms for early sepsis detection, but directly comparing such methods has not been possible because of different patient cohorts, clinical variables and sepsis criteria, prediction tasks, evaluation metrics, and other differences. To address these issues, the PhysioNet/Computing in Cardiology Challenge 2019 facilitated the development of automated, open-source algorithms for the early detection of sepsis from clinical data.

DESIGN: Participants submitted containerized algorithms to a cloud-based testing environment, where we graded entries for their binary classification performance using a novel clinical utility-based evaluation metric. We designed this scoring function specifically for the Challenge to reward algorithms for early predictions and penalize them for late or missed predictions and for false alarms.

SETTING: ICUs in three separate hospital systems. We shared data from two systems publicly and sequestered data from all three systems for scoring.

PATIENTS: We sourced over 60,000 ICU patients with up to 40 clinical variables for each hour of a patient's ICU stay. We applied Sepsis-3 clinical criteria for sepsis onset.

INTERVENTIONS: None.

MEASUREMENTS AND MAIN RESULTS: A total of 104 groups from academia and industry participated, contributing 853 submissions. Furthermore, 90 abstracts based on Challenge entries were accepted for presentation at Computing in Cardiology.

CONCLUSIONS: Diverse computational approaches predict the onset of sepsis several hours before clinical recognition, but generalizability to different hospital systems remains a challenge.}, } @article {pmid31936006, year = {2020}, author = {Jones, M and DeRuyter, F and Morris, J}, title = {The Digital Health Revolution and People with Disabilities: Perspective from the United States.}, journal = {International journal of environmental research and public health}, volume = {17}, number = {2}, pages = {}, pmid = {31936006}, issn = {1660-4601}, mesh = {*Artificial Intelligence ; Chronic Disease/*rehabilitation ; Delivery of Health Care/*methods ; Disabled Persons/*rehabilitation ; Humans ; Telemedicine/*methods ; United States ; }, abstract = {This article serves as the introduction to this special issue on Mobile Health and Mobile Rehabilitation for People with Disabilities. Social, technological and policy trends are reviewed. Needs, opportunities and challenges for the emerging fields of mobile health (mHealth, aka eHealth) and mobile rehabilitation (mRehab) are discussed. Healthcare in the United States (U.S.) is at a critical juncture characterized by: (1) a growing need for healthcare and rehabilitation services; (2) maturing technological capabilities to support more effective and efficient health services; (3) evolving public policies designed, by turns, to contain cost and support new models of care; and (4) a growing need to ensure acceptance and usability of new health technologies by people with disabilities and chronic conditions, clinicians and health delivery systems. Discussion of demographic and population health data, healthcare service delivery and a public policy primarily focuses on the U.S. However, trends identified (aging populations, growing prevalence of chronic conditions and disability, labor shortages in healthcare) apply to most countries with advanced economies and others. Furthermore, technologies that enable mRehab (wearable sensors, in-home environmental monitors, cloud computing, artificial intelligence) transcend national boundaries. Remote and mobile healthcare delivery is needed and inevitable. Proactive engagement is critical to ensure acceptance and effectiveness for all stakeholders.}, } @article {pmid31934500, year = {2020}, author = {Kuzniar, A and Maassen, J and Verhoeven, S and Santuari, L and Shneider, C and Kloosterman, WP and de Ridder, J}, title = {sv-callers: a highly portable parallel workflow for structural variant detection in whole-genome sequence data.}, journal = {PeerJ}, volume = {8}, number = {}, pages = {e8214}, pmid = {31934500}, issn = {2167-8359}, abstract = {Structural variants (SVs) are an important class of genetic variation implicated in a wide array of genetic diseases including cancer. Despite the advances in whole genome sequencing, comprehensive and accurate detection of SVs in short-read data still poses some practical and computational challenges. We present sv-callers, a highly portable workflow that enables parallel execution of multiple SV detection tools, as well as provide users with example analyses of detected SV callsets in a Jupyter Notebook. This workflow supports easy deployment of software dependencies, configuration and addition of new analysis tools. Moreover, porting it to different computing systems requires minimal effort. Finally, we demonstrate the utility of the workflow by performing both somatic and germline SV analyses on different high-performance computing systems.}, } @article {pmid31929952, year = {2020}, author = {Masood, A and Yang, P and Sheng, B and Li, H and Li, P and Qin, J and Lanfranchi, V and Kim, J and Feng, DD}, title = {Cloud-Based Automated Clinical Decision Support System for Detection and Diagnosis of Lung Cancer in Chest CT.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {8}, number = {}, pages = {4300113}, pmid = {31929952}, issn = {2168-2372}, abstract = {Lung cancer is a major cause for cancer-related deaths. The detection of pulmonary cancer in the early stages can highly increase survival rate. Manual delineation of lung nodules by radiologists is a tedious task. We developed a novel computer-aided decision support system for lung nodule detection based on a 3D Deep Convolutional Neural Network (3DDCNN) for assisting the radiologists. Our decision support system provides a second opinion to the radiologists in lung cancer diagnostic decision making. In order to leverage 3-dimensional information from Computed Tomography (CT) scans, we applied median intensity projection and multi-Region Proposal Network (mRPN) for automatic selection of potential region-of-interests. Our Computer Aided Diagnosis (CAD) system has been trained and validated using LUNA16, ANODE09, and LIDC-IDR datasets; the experiments demonstrate the superior performance of our system, attaining sensitivity, specificity, AUROC, accuracy, of 98.4%, 92%, 96% and 98.51% with 2.1 FPs per scan. We integrated cloud computing, trained and validated our Cloud-Based 3DDCNN on the datasets provided by Shanghai Sixth People's Hospital, as well as LUNA16, ANODE09, and LIDC-IDR. Our system outperformed the state-of-the-art systems and obtained an impressive 98.7% sensitivity at 1.97 FPs per scan. This shows the potentials of deep learning, in combination with cloud computing, for accurate and efficient lung nodule detection via CT imaging, which could help doctors and radiologists in treating lung cancer patients.}, } @article {pmid31923284, year = {2020}, author = {de Sousa, C and Fatoyinbo, L and Neigh, C and Boucka, F and Angoue, V and Larsen, T}, title = {Cloud-computing and machine learning in support of country-level land cover and ecosystem extent mapping in Liberia and Gabon.}, journal = {PloS one}, volume = {15}, number = {1}, pages = {e0227438}, pmid = {31923284}, issn = {1932-6203}, mesh = {*Cloud Computing ; Datasets as Topic ; Ecosystem ; Gabon ; Liberia ; *Machine Learning ; *Maps as Topic ; }, abstract = {Liberia and Gabon joined the Gaborone Declaration for Sustainability in Africa (GDSA), established in 2012, with the goal of incorporating the value of nature into national decision making by estimating the multiple services obtained from ecosystems using the natural capital accounting framework. In this study, we produced 30-m resolution 10 classes land cover maps for the 2015 epoch for Liberia and Gabon using the Google Earth Engine (GEE) cloud platform to support the ongoing natural capital accounting efforts in these nations. We propose an integrated method of pixel-based classification using Landsat 8 data, the Random Forest (RF) classifier and ancillary data to produce high quality land cover products to fit a broad range of applications, including natural capital accounting. Our approach focuses on a pre-classification filtering (Masking Phase) based on spectral signature and ancillary data to reduce the number of pixels prone to be misclassified; therefore, increasing the quality of the final product. The proposed approach yields an overall accuracy of 83% and 81% for Liberia and Gabon, respectively, outperforming prior land cover products for these countries in both thematic content and accuracy. Our approach, while relatively simple and highly replicable, was able to produce high quality land cover products to fill an observational gap in up to date land cover data at national scale for Liberia and Gabon.}, } @article {pmid31918454, year = {2020}, author = {Anderson, K and Fawcett, D and Cugulliere, A and Benford, S and Jones, D and Leng, R}, title = {Vegetation expansion in the subnival Hindu Kush Himalaya.}, journal = {Global change biology}, volume = {26}, number = {3}, pages = {1608-1625}, pmid = {31918454}, issn = {1365-2486}, support = {721995//European Union/International ; //Natural Environment Great Western 4+ Research PhD Studentship/International ; }, mesh = {*Climate Change ; Ecology ; *Ecosystem ; Environmental Monitoring ; Plants ; Snow ; }, abstract = {The mountain systems of the Hindu Kush Himalaya (HKH) are changing rapidly due to climatic change, but an overlooked component is the subnival ecosystem (between the treeline and snow line), characterized by short-stature plants and seasonal snow. Basic information about subnival vegetation distribution and rates of ecosystem change are not known, yet such information is needed to understand relationships between subnival ecology and water/carbon cycles. We show that HKH subnival ecosystems cover five to 15 times the area of permanent glaciers and snow, highlighting their eco-hydrological importance. Using satellite data from the Landsat 5, 7 and 8 missions, we measured change in the spatial extent of subnival vegetation from 1993 to 2018. The Landsat surface reflectance-derived Normalized Difference Vegetation Index product was thresholded at 0.1 to indicate the presence/absence of vegetation. Using this product, the strength and direction of time-series trends in the green pixel fraction were measured within three regions of interest. We controlled for cloud cover, snow cover and evaluated the impact of sensor radiometric differences between Landsat 7 and Landsat 8. Using Google Earth Engine to expedite data processing tasks, we show that there has been a weakly positive increase in the extent of subnival vegetation since 1993. Strongest and most significant trends were found in the height region of 5,000-5,500 m a.s.l. across the HKH extent: R[2] = .302, Kendall's τ = 0.424, p < .05, but this varied regionally, with height, and according to the sensors included in the time series. Positive trends at lower elevations occurred on steeper slopes whilst at higher elevations, flatter areas exhibited stronger trends. We validated our findings using online photographs. Subnival ecological changes have likely impacted HKH carbon and water cycles with impacts on millions of people living downstream, but the strength and direction of impacts of vegetation expansion remain unknown.}, } @article {pmid31915982, year = {2020}, author = {Cheng, X and Chen, F and Xie, D and Sun, H and Huang, C}, title = {Design of a Secure Medical Data Sharing Scheme Based on Blockchain.}, journal = {Journal of medical systems}, volume = {44}, number = {2}, pages = {52}, pmid = {31915982}, issn = {1573-689X}, support = {61972438//National Natural Science Foundation of China/ ; }, mesh = {Blockchain/*standards ; Computer Security/*standards ; Confidentiality ; Health Information Exchange/*standards ; Humans ; Information Storage and Retrieval/methods ; }, abstract = {With the rapid development of technologies such as artificial intelligence, blockchain, cloud computing, and big data, Medical Cyber Physical Systems (MCPS) are increasingly demanding data security, while cloud storage solves the storage problem of complex medical data. However, it is difficult to realize data security sharing. The decentralization feature of blockchain is helpful to solve the problem that the secure authentication process is highly dependent on the trusted third party and implement data security transmission. In this paper, the blockchain technology is used to describe the security requirements in authentication process, and a network model of MCPS based on blockchain is proposed. Through analysis of medical data storage architecture, it can ensure that data can't be tampered and untrackable. In the security authentication phase, bilinear mapping and intractable problems can be used to solve the security threat in the authentication process of medical data providers and users. It can avoid the credibility problem of the trusted third party, and also can realize the ?thyc=10?>two-way authentication between the hospital and blockchain node. Then, BAN logic is used to analyze security protocols, and formal analysis and comparison of security protocols are also made. The results show that the MCPS based on blockchain not only realizes medical treatment data sharing, but also meet the various security requirements in the security authentication phase. In addition, the storage and computing overhead costs is ideal. Therefore, the proposed scheme is more suitable for secure sharing of medical big data.}, } @article {pmid31906139, year = {2019}, author = {Rady, A and Fischer, J and Reeves, S and Logan, B and Watson, NJ}, title = {The Effect of Light Intensity, Sensor Height, and Spectral Pre-Processing Methods when using NIR Spectroscopy to Identify Different Allergen-Containing Powdered Foods.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {1}, pages = {}, pmid = {31906139}, issn = {1424-8220}, support = {EP/M02315X/1//EPSRC/ ; EP/R045127/1//Engineering and Physical Sciences Research Council/ ; }, mesh = {Allergens/*analysis ; Biosensing Techniques/*instrumentation ; Flour/analysis ; *Food ; *Light ; Neural Networks, Computer ; Powders ; Principal Component Analysis ; Spectroscopy, Near-Infrared/*methods ; }, abstract = {: Food allergens present a significant health risk to the human population, so their presence must be monitored and controlled within food production environments. This is especially important for powdered food, which can contain nearly all known food allergens. Manufacturing is experiencing the fourth industrial revolution (Industry 4.0), which is the use of digital technologies, such as sensors, Internet of Things (IoT), artificial intelligence, and cloud computing, to improve the productivity, efficiency, and safety of manufacturing processes. This work studied the potential of small low-cost sensors and machine learning to identify different powdered foods which naturally contain allergens. The research utilised a near-infrared (NIR) sensor and measurements were performed on over 50 different powdered food materials. This work focussed on several measurement and data processing parameters, which must be determined when using these sensors. These included sensor light intensity, height between sensor and food sample, and the most suitable spectra pre-processing method. It was found that the K-nearest neighbour and linear discriminant analysis machine learning methods had the highest classification prediction accuracy for identifying samples containing allergens of all methods studied. The height between the sensor and the sample had a greater effect than the sensor light intensity and the classification models performed much better when the sensor was positioned closer to the sample with the highest light intensity. The spectra pre-processing methods, which had the largest positive impact on the classification prediction accuracy, were the standard normal variate (SNV) and multiplicative scattering correction (MSC) methods. It was found that with the optimal combination of sensor height, light intensity, and spectra pre-processing, a classification prediction accuracy of 100% could be achieved, making the technique suitable for use within production environments.}, } @article {pmid31905910, year = {2019}, author = {Ren, Y and Zhu, F and Sharma, PK and Wang, T and Wang, J and Alfarraj, O and Tolba, A}, title = {Data Query Mechanism Based on Hash Computing Power of Blockchain in Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {1}, pages = {}, pmid = {31905910}, issn = {1424-8220}, support = {61772280, 61772454, 61811530332, 61811540410//NSFC/ ; }, abstract = {In the IoT (Internet of Things) environment, smart homes, smart grids, and telematics constantly generate data with complex attributes. These data have low heterogeneity and poor interoperability, which brings difficulties to data management and value mining. The promising combination of blockchain and the Internet of things as BCoT (blockchain of things) can solve these problems. This paper introduces an innovative method DCOMB (dual combination Bloom filter) to firstly convert the computational power of bitcoin mining into the computational power of query. Furthermore, this article uses the DCOMB method to build blockchain-based IoT data query model. DCOMB can implement queries only through mining hash calculation. This model combines the data stream of the IoT with the timestamp of the blockchain, improving the interoperability of data and the versatility of the IoT database system. The experiment results show that the random reading performance of DCOMB query is higher than that of COMB (combination Bloom filter), and the error rate of DCOMB is lower. Meanwhile, both DCOMB and COMB query performance are better than MySQL (My Structured Query Language).}, } @article {pmid31888474, year = {2019}, author = {Yang, A and Kishore, A and Phipps, B and Ho, JWK}, title = {Cloud accelerated alignment and assembly of full-length single-cell RNA-seq data using Falco.}, journal = {BMC genomics}, volume = {20}, number = {Suppl 10}, pages = {927}, pmid = {31888474}, issn = {1471-2164}, mesh = {*Cloud Computing ; Exons/genetics ; Humans ; Introns/genetics ; *RNA-Seq ; Sequence Alignment/*methods ; *Single-Cell Analysis ; }, abstract = {BACKGROUND: Read alignment and transcript assembly are the core of RNA-seq analysis for transcript isoform discovery. Nonetheless, current tools are not designed to be scalable for analysis of full-length bulk or single cell RNA-seq (scRNA-seq) data. The previous version of our cloud-based tool Falco only focuses on RNA-seq read counting, but does not allow for more flexible steps such as alignment and read assembly.

RESULTS: The Falco framework can harness the parallel and distributed computing environment in modern cloud platforms to accelerate read alignment and transcript assembly of full-length bulk RNA-seq and scRNA-seq data. There are two new modes in Falco: alignment-only and transcript assembly. In the alignment-only mode, Falco can speed up the alignment process by 2.5-16.4x based on two public scRNA-seq datasets when compared to alignment on a highly optimised standalone computer. Furthermore, it also provides a 10x average speed-up compared to alignment using published cloud-enabled tool for read alignment, Rail-RNA. In the transcript assembly mode, Falco can speed up the transcript assembly process by 1.7-16.5x compared to performing transcript assembly on a highly optimised computer.

CONCLUSION: Falco is a significantly updated open source big data processing framework that enables scalable and accelerated alignment and assembly of full-length scRNA-seq data on the cloud. The source code can be found at https://github.com/VCCRI/Falco.}, } @article {pmid31888203, year = {2019}, author = {Fu, HP and Chang, TS and Yeh, HP and Chen, YX}, title = {Analysis of Factors Influencing Hospitals' Implementation of a Green E-Procurement System Using a Cloud Model.}, journal = {International journal of environmental research and public health}, volume = {16}, number = {24}, pages = {}, pmid = {31888203}, issn = {1660-4601}, mesh = {Cloud Computing/*economics/*statistics & numerical data ; Financial Management, Hospital/*organization & administration/*statistics & numerical data ; Humans ; Materials Management, Hospital/*organization & administration/*statistics & numerical data ; Surveys and Questionnaires ; Taiwan ; }, abstract = {Currently, the green procurement activities of private hospitals in Taiwan follow the self-built green electronic-procurement (e-procurement) system. This requires professional personnel to take the time to regularly update the green specification and software and hardware of the e-procurement system, and the information system maintenance cost is high. In the case of a green e-procurement system crash, the efficiency of green procurement activities for hospitals is affected. If the green e-procurement can be moved to a convenient and trusty cloud computing model, this will enhance the efficiency of procurement activities and reduce the information maintenance cost for private hospitals. However, implementing a cloud model is an issue of technology innovation application and the technology-organization-environment (TOE) framework has been widely applied as the theoretical framework in technology innovation application. In addition, finding the weight of factors is a multi-criteria decision-making (MCDM) issue. Therefore, the present study first collected factors influencing implementation of the cloud mode together with the TOE as the theoretical framework, by reviewing the literature. Therefore, an expert questionnaire was designed and distributed to top managers of 20 private hospitals in southern Taiwan. The fuzzy analysis hierarchical process (FAHP), which is a MCDM tool, finds the weights of the factors influencing private hospitals in southern Taiwan when they implement a cloud green e-procurement system. The research results can enable private hospitals to successfully implement a green e-procurement system through a cloud model by optimizing resource allocation according to the weight of each factor. In addition, the results of this research can help cloud service providers of green e-procurement understand users' needs and develop relevant cloud solutions and marketing strategies.}, } @article {pmid31878140, year = {2019}, author = {Luo, Y and Li, W and Qiu, S}, title = {Anomaly Detection Based Latency-Aware Energy Consumption Optimization For IoT Data-Flow Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {1}, pages = {}, pmid = {31878140}, issn = {1424-8220}, support = {61303043,61802030,61772087//National Natural Science Foundation of China/ ; XJK015QJG001//Hunan Provincial Education Science '12th Five-Year' planning subject/ ; K1705091//Changsha Science project/ ; }, abstract = {The continuous data-flow application in the IoT integrates the functions of fog, edge, and cloud computing. Its typical paradigm is the E-Health system. Like other IoT applications, the energy consumption optimization of IoT devices in continuous data-flow applications is a challenging problem. Since the anomalous nodes in the network will cause the increase of energy consumption, it is necessary to make continuous data flows bypass these nodes as much as possible. At present, the existing research work related to the performance of continuous data-flow is often optimized from system architecture design and deployment. In this paper, a mathematical programming method is proposed for the first time to optimize the runtime performance of continuous data flow applications. A lightweight anomaly detection method is proposed to evaluate the reliability of nodes. Then the node reliability is input into the optimization algorithm to estimate the task latency. The latency-aware energy consumption optimization for continuous data-flow is modeled as a mixed integer nonlinear programming problem. A block coordinate descend-based max-flow algorithm is proposed to solve this problem. Based on the real-life datasets, the numerical simulation is carried out. The simulation results show that the proposed strategy has better performance than the benchmark strategy.}, } @article {pmid31877812, year = {2019}, author = {Zyrianoff, I and Heideker, A and Silva, D and Kleinschmidt, J and Soininen, JP and Salmon Cinotti, T and Kamienski, C}, title = {Architecting and Deploying IoT Smart Applications: A Performance-Oriented Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {20}, number = {1}, pages = {}, pmid = {31877812}, issn = {1424-8220}, support = {XXXXXX//Ministério da Ciência, Tecnologia, Inovações e Comunicações/ ; YYYYYYYYY//European Commission/ ; }, abstract = {Layered internet of things (IoT) architectures have been proposed over the last years as they facilitate understanding the roles of different networking, hardware, and software components of smart applications. These are inherently distributed, spanning from devices installed in the field up to a cloud datacenter and further to a user smartphone, passing by intermediary stages at different levels of fog computing infrastructure. However, IoT architectures provide almost no hints on where components should be deployed. IoT Software Platforms derived from the layered architectures are expected to adapt to scenarios with different characteristics, requirements, and constraints from stakeholders and applications. In such a complex environment, a one-size-fits-all approach does not adapt well to varying demands and may hinder the adoption of IoT Smart Applications. In this paper, we propose a 5-layer IoT Architecture and a 5-stage IoT Computing Continuum, as well as provide insights on the mapping of software components of the former into physical locations of the latter. Also, we conduct a performance analysis study with six configurations where components are deployed into different stages. Our results show that different deployment configurations of layered components into staged locations generate bottlenecks that affect system performance and scalability. Based on that, policies for static deployment and dynamic migration of layered components into staged locations can be identified.}, } @article {pmid31871003, year = {2021}, author = {Feng, L and Zhou, L and Gupta, A and Zhong, J and Zhu, Z and Tan, KC and Qin, K}, title = {Solving Generalized Vehicle Routing Problem With Occasional Drivers via Evolutionary Multitasking.}, journal = {IEEE transactions on cybernetics}, volume = {51}, number = {6}, pages = {3171-3184}, doi = {10.1109/TCYB.2019.2955599}, pmid = {31871003}, issn = {2168-2275}, abstract = {With the emergence of crowdshipping and sharing economy, vehicle routing problem with occasional drivers (VRPOD) has been recently proposed to involve occasional drivers with private vehicles for the delivery of goods. In this article, we present a generalized variant of VRPOD, namely, the vehicle routing problem with heterogeneous capacity, time window, and occasional driver (VRPHTO), by taking the capacity heterogeneity and time window of vehicles into consideration. Furthermore, to meet the requirement in today's cloud computing service, wherein multiple optimization tasks may need to be solved at the same time, we propose a novel evolutionary multitasking algorithm (EMA) to optimize multiple VRPHTOs simultaneously with a single population. Finally, 56 new VRPHTO instances are generated based on the existing common vehicle routing benchmarks. Comprehensive empirical studies are conducted to illustrate the benefits of the new VRPHTOs and to verify the efficacy of the proposed EMA for multitasking against a state-of-art single task evolutionary solver. The obtained results showed that the employment of occasional drivers could significantly reduce the routing cost, and the proposed EMA is not only able to solve multiple VRPHTOs simultaneously but also can achieve enhanced optimization performance via the knowledge transfer between tasks along the evolutionary search process.}, } @article {pmid31866677, year = {2019}, author = {Peeler, EJ and Ernst, I}, title = {A new approach to the management of emerging diseases of aquatic animals.}, journal = {Revue scientifique et technique (International Office of Epizootics)}, volume = {38}, number = {2}, pages = {537-551}, doi = {10.20506/rst.38.2.3003}, pmid = {31866677}, issn = {0253-1933}, mesh = {*Animal Diseases/prevention & control/transmission ; Animals ; Animals, Wild ; *Aquaculture ; Communicable Diseases, Emerging/therapy/transmission/*veterinary ; Global Health ; }, abstract = {Since 1970, aquaculture has grown at a rate of between 5% and 10% per annum. It has achieved this by expanding into new areas, farming new (often non-native) species and intensifying production. These features of aquaculture, combined with large-scale movements of animals, have driven disease emergence, with negative consequences for both production and biodiversity. Efforts to improve the management of emerging diseases of aquatic animals must include actions to reduce the rate of disease emergence, enhance disease detection and reporting, and improve responses to prevent disease spread. The rate of disease emergence can be reduced by understanding the underpinning mechanisms and developing measures to mitigate them. The three principal mechanisms of disease emergence, namely, host switching, decreased host immunocompetence and increased pathogen virulence, have many drivers. The most important of these drivers are those that expose susceptible hosts to novel pathogens (e.g. the introduction of non-native hosts, translocation of pathogens, and increased interaction between wild and farmed populations), followed by host switching. Exposure to wild populations can be reduced through infrastructure and management measures to reduce escapes or exclude wild animals (e.g. barrier nets, filtration and closed-confinement technology). A high standard of health management ensures immunocompetence and resistance to putative new pathogens and strains, and thus reduces the rate of emergence. Appropriate site selection and husbandry can reduce the likelihood of pathogens developing increased virulence by preventing their continuous cycling in geographically or temporally linked populations. The under-reporting of emerging aquatic animal diseases constrains appropriate investigation and timely response. At the producer level, employing information and communications technology (e.g. smartphone applications and Cloud computing) to collect and manage data, coupled with a farmer-centric approach to surveillance, could improve reporting. In addition, reporting behaviours must be understood and disincentives mitigated. At the international level, improving the reporting of emerging diseases to the World Organisation for Animal Health allows Member Countries to implement appropriate measures to reduce transboundary spread. Reporting would be incentivised if the global response included the provision of support to low-income countries to, in the short term, control a reported emerging disease, and, in the longer term, develop aquatic animal health services. Early detection and reporting of emerging diseases are only of benefit if Competent Authorities' responses prevent disease spread. Effective responses to emerging diseases are challenging because basic information and tools are often lacking. Consequently, responses are likely to be sub-optimal unless contingency plans have been developed and tested, and decision-making arrangements have been well established.}, } @article {pmid31854537, year = {2019}, author = {Yan, H and Lu, H and Zhang, X}, title = {[Research on Risk Control of Hysteroscopy and Laparoscopy Based on Cloud Computing].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {43}, number = {6}, pages = {459-461}, doi = {10.3969/j.issn.1671-7104.2019.06.018}, pmid = {31854537}, issn = {1671-7104}, mesh = {Cloud Computing ; Female ; Humans ; *Hysteroscopy ; *Laparoscopy ; }, abstract = {This paper discusses the use of Medatc System for the inspection and failure statistics of hysteroscopy and laparoscopy equipment. We add up our hospital one year of hysteroscopy and laparoscopy repair failure about 200 cases, more than 20 cases of patrol inspection. The equipment is inspected by professional quality control tools. The purpose is to summarize experience, improve maintenance efficiency, reduce the risk of using instruments, and serve clinical departments well.}, } @article {pmid31847431, year = {2019}, author = {Wazid, M and Das, AK and Shetty, S and J P C Rodrigues, J and Park, Y}, title = {LDAKM-EIoT: Lightweight Device Authentication and Key Management Mechanism for Edge-Based IoT Deployment.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {24}, pages = {}, pmid = {31847431}, issn = {1424-8220}, abstract = {In recent years, edge computing has emerged as a new concept in the computing paradigm that empowers several future technologies, such as 5G, vehicle-to-vehicle communications, and the Internet of Things (IoT), by providing cloud computing facilities, as well as services to the end users. However, open communication among the entities in an edge based IoT environment makes it vulnerable to various potential attacks that are executed by an adversary. Device authentication is one of the prominent techniques in security that permits an IoT device to authenticate mutually with a cloud server with the help of an edge node. If authentication is successful, they establish a session key between them for secure communication. To achieve this goal, a novel device authentication and key management mechanism for the edge based IoT environment, called the lightweight authentication and key management scheme for the edge based IoT environment (LDAKM-EIoT), was designed. The detailed security analysis and formal security verification conducted by the widely used "Automated Validation of Internet Security Protocols and Applications (AVISPA)" tool prove that the proposed LDAKM-EIoT is secure against several attack vectors that exist in the infrastructure of the edge based IoT environment. The elaborated comparative analysis of the proposed LDAKM-EIoT and different closely related schemes provides evidence that LDAKM-EIoT is more secure with less communication and computation costs. Finally, the network performance parameters are calculated and analyzed using the NS2 simulation to demonstrate the practical facets of the proposed LDAKM-EIoT.}, } @article {pmid31847339, year = {2019}, author = {Capella, JV and Bonastre, A and Ors, R and Peris, M}, title = {A New Application of Internet of Things and Cloud Services in Analytical Chemistry: Determination of Bicarbonate in Water.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {24}, pages = {}, pmid = {31847339}, issn = {1424-8220}, support = {DPI2016-80303-C2-1-P//Ministerio de Economía y Competitividad/ ; }, abstract = {In a constantly evolving world, new technologies such as Internet of Things (IoT) and cloud-based services offer great opportunities in many fields. In this paper we propose a new approach to the development of smart sensors using IoT and cloud computing, which open new interesting possibilities in analytical chemistry. According to IoT philosophy, these new sensors are able to integrate the generated data on the existing IoT platforms, so that information may be used whenever needed. Furthermore, the utilization of these technologies permits one to obtain sensors with significantly enhanced features using the information available in the cloud. To validate our new approach, a bicarbonate IoT-based smart sensor has been developed. A classical CO2 ion selective electrode (ISE) utilizes the pH information retrieved from the cloud and then provides an indirect measurement of bicarbonate concentration, which is offered to the cloud. The experimental data obtained are compared to those yielded by three other classical ISEs, with satisfactory results being achieved in most instances. Additionally, this methodology leads to lower-consumption, low-cost bicarbonate sensors capable of being employed within an IoT application, for instance in the continuous monitoring of HCO3[-] in rivers. Most importantly, this innovative application field of IoT and cloud approaches can be clearly perceived as an indicator for future developments over the short-term.}, } @article {pmid31844750, year = {2019}, author = {Haghshenas, H and Habibi, J and Fazli, MA}, title = {Parasite cloud service providers: on-demand prices on top of spot prices.}, journal = {Heliyon}, volume = {5}, number = {11}, pages = {e02877}, pmid = {31844750}, issn = {2405-8440}, abstract = {On-demand resource provisioning and elasticity are two of the main characteristics of the cloud computing paradigm. As a result, the load on a cloud service provider (CSP) is not fixed and almost always a number of its physical resources are not used, called spare resources. As the CSPs typically don't want to be overprovisioned at any time, they procure physical resources in accordance to a pessimistic forecast of their loads and this leads to a large amount of spare resources most of the time. Some CSPs rent their spare resources with a lower price called the spot price, which varies over time with respect to the market or the internal state of the CSP. In this paper, we assume the spot price to be a function of the CSP's load. We introduce the concept of a parasite CSP, which rents spare resources from several CSPs simultaneously with spot prices and rents them to its customers with an on-demand price lower than the host CSPs' on-demand prices. We propose the overall architecture and interaction model of the parasite CSP. Mathematical analysis has been made to calculate the amount of spare resources of the host CSPs, the amount of resources that the parasite CSP can rent (its virtual capacity) as well as the probability of SLA violations. We evaluate our analysis over pricing data gathered from Amazon EC2 services. The results show that if the parasite CSP relies on several host CSPs, its virtual capacity can be considerable and the expected penalty due to SLA violation is acceptably low.}, } @article {pmid31841201, year = {2019}, author = {Ganesan, B and Gowda, T and Al-Jumaily, A and Fong, KNK and Meena, SK and Tong, RKY}, title = {Ambient assisted living technologies for older adults with cognitive and physical impairments: a review.}, journal = {European review for medical and pharmacological sciences}, volume = {23}, number = {23}, pages = {10470-10481}, doi = {10.26355/eurrev_201912_19686}, pmid = {31841201}, issn = {2284-0729}, mesh = {*Activities of Daily Living ; Aged ; Aged, 80 and over ; Aging/physiology/psychology ; *Ambient Intelligence ; Cognitive Dysfunction/physiopathology/psychology/*rehabilitation ; Frail Elderly/psychology ; Frailty/physiopathology/psychology/*rehabilitation ; Humans ; Independent Living/psychology ; Life Expectancy/trends ; Mental Health ; Physical Fitness/physiology/psychology ; Quality of Life ; *Self-Help Devices ; }, abstract = {The global number of people over the age of 60 years is expected to increase from 970 million to 2.1 billion in 2050 and 3.1 billion in 2100. About 80% of the aging population will be in the developing countries. Aging population may suffer from various physical, cognitive, and social problems, due to aging process such as impairment of physical related functions (decreased mobility and walking speed, falls, frailty, decreased walking speed, difficulties in basic, and instrumental activities of daily living), cognitive related functions (memory-related issues), sensory functions (hearing loss, cataracts and refractive errors, presbyopia, decreased vestibular function), behavioural and psychological disorders, social isolation issues, and poor quality of life. Over the period of the last few decades, emerging technologies such as internet of things (IoT), artificial intelligence (AI), sensors, cloud computing, wireless communication technologies, and assistive robotics have given the vision to develop various ambient or active assisted living (AAL) approaches for supporting an elderly people to live safely and independently in their living environment and participate in their daily and community activities, as well as supporting them to maintain their physical, mental health, and quality of their life. The aim of this paper is to review the use of Ambient or Active Assisted Living for older adults with physical, cognitive impairments, and their social participation.}, } @article {pmid31839702, year = {2019}, author = {Wercelens, P and da Silva, W and Hondo, F and Castro, K and Walter, ME and Araújo, A and Lifschitz, S and Holanda, M}, title = {Bioinformatics Workflows With NoSQL Database in Cloud Computing.}, journal = {Evolutionary bioinformatics online}, volume = {15}, number = {}, pages = {1176934319889974}, pmid = {31839702}, issn = {1176-9343}, abstract = {Scientific workflows can be understood as arrangements of managed activities executed by different processing entities. It is a regular Bioinformatics approach applying workflows to solve problems in Molecular Biology, notably those related to sequence analyses. Due to the nature of the raw data and the in silico environment of Molecular Biology experiments, apart from the research subject, 2 practical and closely related problems have been studied: reproducibility and computational environment. When aiming to enhance the reproducibility of Bioinformatics experiments, various aspects should be considered. The reproducibility requirements comprise the data provenance, which enables the acquisition of knowledge about the trajectory of data over a defined workflow, the settings of the programs, and the entire computational environment. Cloud computing is a booming alternative that can provide this computational environment, hiding technical details, and delivering a more affordable, accessible, and configurable on-demand environment for researchers. Considering this specific scenario, we proposed a solution to improve the reproducibility of Bioinformatics workflows in a cloud computing environment using both Infrastructure as a Service (IaaS) and Not only SQL (NoSQL) database systems. To meet the goal, we have built 3 typical Bioinformatics workflows and ran them on 1 private and 2 public clouds, using different types of NoSQL database systems to persist the provenance data according to the Provenance Data Model (PROV-DM). We present here the results and a guide for the deployment of a cloud environment for Bioinformatics exploring the characteristics of various NoSQL database systems to persist provenance data.}, } @article {pmid31835804, year = {2019}, author = {Chi, PW and Wang, MH}, title = {Privacy-Preserving Broker-ABE Scheme for Multiple Cloud-Assisted Cyber Physical Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {24}, pages = {}, pmid = {31835804}, issn = {1424-8220}, support = {MOST 107-2218-E-003-002-MY3//Ministry of Science and Technology, Taiwan/ ; MOST 107-2218-E-035-009-MY3//Ministry of Science and Technology, Taiwan/ ; }, abstract = {Cloud-assisted cyber-physical systems (CCPSs) integrate the physical space with cloud computing. To do so, sensors on the field collect real-life data and forward it to clouds for further data analysis and decision-making. Since multiple services may be accessed at the same time, sensor data should be forwarded to different cloud service providers (CSPs). In this scenario, attribute-based encryption (ABE) is an appropriate technique for securing data communication between sensors and clouds. Each cloud has its own attributes and a broker can determine which cloud is authorized to access data by the requirements set at the time of encryption. In this paper, we propose a privacy-preserving broker-ABE scheme for multiple CCPSs (MCCPS). The ABE separates the policy embedding job from the ABE task. To ease the computational burden of the sensors, this scheme leaves the policy embedding task to the broker, which is generally more powerful than the sensors. Moreover, the proposed scheme provides a way for CSPs to protect data privacy from outside coercion.}, } @article {pmid31835295, year = {2019}, author = {Fan, YC and Liu, YC and Chu, CA}, title = {Efficient CORDIC Iteration Design of LiDAR Sensors' Point-Cloud Map Reconstruction Technology.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {24}, pages = {}, pmid = {31835295}, issn = {1424-8220}, support = {MOST 108-2218-E-035-013//Ministry of Science and Technology, Taiwan/ ; }, abstract = {In this paper, we propose an efficient COordinate Rotation DIgital Computer (CORDIC) iteration circuit design for Light Detection and Ranging (LiDAR) sensors. A novel CORDIC architecture that achieves the goal of pre-selecting angles and reduces the number of iterations is presented for LiDAR sensors. The value of the trigonometric functions can be found in seven rotations regardless of the number of input N digits. The number of iterations are reduced by more than half. The experimental results show the similarity value to be all 1 and prove that the LiDAR decoded packet results are exactly the same as the ground truth. The total chip area is 1.93 mm × 1.93 mm and the core area is 1.32 mm × 1.32 mm, separately. The number of logic gates is 129,688. The designed chip only takes 0.012 ms and 0.912 ms to decode a packet and a 3D frame of LiDAR sensors, respectively. The throughput of the chip is 8.2105 &nbsp; × &nbsp; 10 8 bits/sec. The average power consumption is 237.34 mW at a maximum operating frequency of 100 MHz. This design can not only reduce the number of iterations and the computing time but also reduce the chip area. This paper provides an efficient CORDIC iteration design and solution for LiDAR sensors to reconstruct the point-cloud map for autonomous vehicles.}, } @article {pmid31817630, year = {2019}, author = {Shi, L and Wang, Z}, title = {Computational Strategies for Scalable Genomics Analysis.}, journal = {Genes}, volume = {10}, number = {12}, pages = {}, pmid = {31817630}, issn = {2073-4425}, mesh = {*Algorithms ; *Computational Biology ; *Genomics ; *High-Throughput Nucleotide Sequencing ; *Software ; }, abstract = {The revolution in next-generation DNA sequencing technologies is leading to explosive data growth in genomics, posing a significant challenge to the computing infrastructure and software algorithms for genomics analysis. Various big data technologies have been explored to scale up/out current bioinformatics solutions to mine the big genomics data. In this review, we survey some of these exciting developments in the applications of parallel distributed computing and special hardware to genomics. We comment on the pros and cons of each strategy in the context of ease of development, robustness, scalability, and efficiency. Although this review is written for an audience from the genomics and bioinformatics fields, it may also be informative for the audience of computer science with interests in genomics applications.}, } @article {pmid31817433, year = {2019}, author = {Li, H and Gu, Z and Deng, L and Han, Y and Yang, C and Tian, Z}, title = {A Fine-Grained Video Encryption Service Based on the Cloud-Fog-Local Architecture for Public and Private Videos.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {24}, pages = {}, pmid = {31817433}, issn = {1424-8220}, support = {2018YEB1004003//National Key R&D Program of China/ ; U1636215//China grants/ ; }, abstract = {With the advancement of cloud computing and fog computing, more and more services and data are being moved from local servers to the fog and cloud for processing and storage. Videos are an important part of this movement. However, security issues involved in video moving have drawn wide attention. Although many video-encryption algorithms have been developed to protect local videos, these algorithms fail to solve the new problems faced on the media cloud, such as how to provide a video encryption service to devices with low computing power, how to meet the different encryption requirements for different type of videos, and how to ensure massive video encryption efficiency. To solve these three problems, we propose a cloud-fog-local video encryption framework which consists of a three-layer service model and corresponding key management strategies, a fine-grain video encryption algorithm based on the network abstract layer unit (NALU), and a massive video encryption framework based on Spark. The experiment proves that our proposed solution can meet the different encryption requirements for public videos and private videos. Moreover, in the experiment environment, our encryption algorithm for public videos reaches a speed of 1708 Mbps, and can provide a real-time encryption service for at least 42 channels of 4K-resolution videos.}, } @article {pmid31816927, year = {2019}, author = {Wang, T and Lu, Y and Cao, Z and Shu, L and Zheng, X and Liu, A and Xie, M}, title = {When Sensor-Cloud Meets Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {23}, pages = {}, pmid = {31816927}, issn = {1424-8220}, support = {61872154//National Natural Science Foundation of China/ ; }, abstract = {Sensor-clouds are a combination of wireless sensor networks (WSNs) and cloud computing. The emergence of sensor-clouds has greatly enhanced the computing power and storage capacity of traditional WSNs via exploiting the advantages of cloud computing in resource utilization. However, there are still many problems to be solved in sensor-clouds, such as the limitations of WSNs in terms of communication and energy, the high latency, and the security and privacy issues due to applying a cloud platform as the data processing and control center. In recent years, mobile edge computing has received increasing attention from industry and academia. The core of mobile edge computing is to migrate some or all of the computing tasks of the original cloud computing center to the vicinity of the data source, which gives mobile edge computing great potential in solving the shortcomings of sensor-clouds. In this paper, the latest research status of sensor-clouds is briefly analyzed and the characteristics of the existing sensor-clouds are summarized. After that we discuss the issues of sensor-clouds and propose some applications, especially a trust evaluation mechanism and trustworthy data collection which use mobile edge computing to solve the problems in sensor-clouds. Finally, we discuss research challenges and future research directions in leveraging mobile edge computing for sensor-clouds.}, } @article {pmid31805762, year = {2019}, author = {Oliveira, ASF and Edsall, CJ and Woods, CJ and Bates, P and Nunez, GV and Wonnacott, S and Bermudez, I and Ciccotti, G and Gallagher, T and Sessions, RB and Mulholland, AJ}, title = {A General Mechanism for Signal Propagation in the Nicotinic Acetylcholine Receptor Family.}, journal = {Journal of the American Chemical Society}, volume = {141}, number = {51}, pages = {19953-19958}, doi = {10.1021/jacs.9b09055}, pmid = {31805762}, issn = {1520-5126}, support = {BB/L01386X/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {Humans ; *Molecular Dynamics Simulation ; Protein Conformation ; Receptors, Nicotinic/*chemistry/metabolism ; }, abstract = {Nicotinic acetylcholine receptors (nAChRs) modulate synaptic activity in the central nervous system. The α7 subtype, in particular, has attracted considerable interest in drug discovery as a target for several conditions, including Alzheimer's disease and schizophrenia. Identifying agonist-induced structural changes underlying nAChR activation is fundamentally important for understanding biological function and rational drug design. Here, extensive equilibrium and nonequilibrium molecular dynamics simulations, enabled by cloud-based high-performance computing, reveal the molecular mechanism by which structural changes induced by agonist unbinding are transmitted within the human α7 nAChR. The simulations reveal the sequence of coupled structural changes involved in driving conformational change responsible for biological function. Comparison with simulations of the α4β2 nAChR subtype identifies features of the dynamical architecture common to both receptors, suggesting a general structural mechanism for signal propagation in this important family of receptors.}, } @article {pmid31798298, year = {2019}, author = {Chung, H and Jeong, C and Luhach, AK and Nam, Y and Lee, J}, title = {Remote Pulmonary Function Test Monitoring in Cloud Platform via Smartphone Built-in Microphone.}, journal = {Evolutionary bioinformatics online}, volume = {15}, number = {}, pages = {1176934319888904}, pmid = {31798298}, issn = {1176-9343}, abstract = {With an aging population that continues to grow, health care technology plays an increasingly active role, especially for chronic disease management. In the health care market, cloud platform technology is becoming popular, as both patients and physicians demand cost efficiency, easy access to information, and security. Especially for asthma and chronic obstructive pulmonary disease (COPD) patients, it is recommended that pulmonary function test (PFT) be performed on a daily basis. However, it is difficult for patients to frequently visit a hospital to perform the PFT. In this study, we present an application and cloud platform for remote PFT monitoring that can be directly measured by smartphone microphone with no external devices. In addition, we adopted the IBM Watson Internet-of-Things (IoT) platform for PFT monitoring, using a smartphone's built-in microphone with a high-resolution time-frequency representation. We successfully demonstrated real-time PFT monitoring using the cloud platform. The PFT parameters of FEV1/FVC (%) could be remotely monitored when a subject performed the PFT test. As a pilot study, we tested 13 healthy subjects, and found that the absolute error mean was 4.12 and the standard deviation was 3.45 on all 13 subjects. With the developed applications on the cloud platform, patients can freely measure the PFT parameters without restriction on time and space, and a physician can monitor the patients' status in real time. We hope that the PFT monitoring platform will work as a means for early detection and treatment of patients with pulmonary diseases, especially those having asthma and COPD.}, } @article {pmid31795483, year = {2019}, author = {Olatinwo, DD and Abu-Mahfouz, A and Hancke, G}, title = {A Survey on LPWAN Technologies in WBAN for Remote Health-Care Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {23}, pages = {}, pmid = {31795483}, issn = {1424-8220}, mesh = {Biosensing Techniques/*trends ; Computer Communication Networks ; Human Body ; Humans ; *Monitoring, Physiologic ; *Wearable Electronic Devices ; Wireless Technology/*trends ; }, abstract = {In ubiquitous health-care monitoring (HCM), wireless body area networks (WBANs) are envisioned as appealing solutions that may offer reliable methods for real-time monitoring of patients' health conditions by employing the emerging communication technologies. This paper therefore focuses more on the state-of-the-art wireless communication systems that can be explored in the next-generation WBAN solutions for HCM. Also, this study addressed the critical issues confronted by the existing WBANs that are employed in HCM. Examples of such issues include wide-range health data communication constraint, health data delivery reliability concern, and energy efficiency, which are attributed to the limitations of the legacy short range, medium range, and the cellular technologies that are typically employed in WBAN systems. Since the WBAN sensor devices are usually configured with a finite battery power, they often get drained during prolonged operations. This phenomenon is technically exacerbated by the fact that the legacy communication systems, such as ZigBee, Bluetooth, 6LoWPAN, and so on, consume more energy during data communications. This unfortunate situation offers a scope for employing suitable communication systems identified in this study to improve the productivity of WBANs in HCM. For this to be achieved, the emerging communication systems such as the low-power wide-area networks (LPWANs) are investigated in this study based on their power transmission, data transmission rate, data reliability in the context of efficient data delivery, communication coverage, and latency, including their advantages, as well as disadvantages. As a consequence, the LPWAN solutions are presented for WBAN systems in remote HCM. Furthermore, this research work also points out future directions for the realization of the next-generation of WBANs, as well as how to improve the identified communication systems, to further enhance their productivity in WBAN solutions for HCM.}, } @article {pmid31795386, year = {2019}, author = {Shi, P and Li, N and Wang, S and Liu, Z and Ren, M and Ma, H}, title = {Quantum Multi-User Broadcast Protocol for the "Platform as a Service" Model.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {23}, pages = {}, pmid = {31795386}, issn = {1424-8220}, support = {61772295, 11975132//National Natural Science Foundation of China/ ; ZR2016FB09//Natural Science Foundation of Shandong Province/ ; J18KZ012//Project of Shandong Province Higher Educational Science and Technology Program/ ; }, abstract = {Quantum Cloud Computing is the technology which has the capability to shape the future of computing. In "Platform as a Service (PaaS)" type of cloud computing, the development environment is delivered as a service. In this paper, a multi-user broadcast protocol in network is developed with the mode of one master and N slaves together with a sequence of single photons. It can be applied to a multi-node network, in which a single photon sequence can be sent to all the slave nodes simultaneously. In broadcast communication networks, these single photons encode classical information directly through noisy quantum communication channels. The results show that this protocol can realize the secret key generation and sharing of multiple nodes. The protocol we propose is also proved to be unconditionally secure in theory, which indicates its feasibility in theoretical application.}, } @article {pmid31795110, year = {2019}, author = {Gonzalez, LF and Vidal, I and Valera, F and Nogales, B and Sanchez-Aguero, V and Lopez, DR}, title = {Transport-Layer Limitations for NFV Orchestration in Resource-Constrained Aerial Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {23}, pages = {}, pmid = {31795110}, issn = {1424-8220}, support = {777137//European Commission/ ; TEC2016-76795-C6-3-R//Spanish Ministry of Economy and Competitiveness/ ; }, abstract = {In this paper, we identify the main challenges and problems related with the management and orchestration of Virtualized Network Functions (VNFs) over aerial networks built with Small Unmanned Aerial Vehicles (SUAVs). Our analysis starts from a reference scenario, where several SUAVs are deployed over a delimited geographic area, and provide a mobile cloud environment that supports the deployment of functions and services using Network Functions Virtualization (NFV) technologies. After analyzing the main challenges to NFV orchestration in this reference scenario from a theoretical perspective, we undertake the study of one specific but relevant aspect following a practical perspective, i.e., the limitations of existing transport-layer solutions to support the dissemination of NFV management and orchestration information in the considered scenario. While in traditional cloud computing environments this traffic is delivered using TCP, our simulation results suggest that using this protocol over an aerial network of SUAVs presents certain limitations. Finally, based on the lessons learned from our practical analysis, the paper outlines different alternatives that could be followed to address these challenges.}, } @article {pmid31784915, year = {2019}, author = {Manocha, A and Singh, R and Bhatia, M}, title = {Cognitive Intelligence Assisted Fog-Cloud Architecture for Generalized Anxiety Disorder (GAD) Prediction.}, journal = {Journal of medical systems}, volume = {44}, number = {1}, pages = {7}, pmid = {31784915}, issn = {1573-689X}, mesh = {Aged ; *Algorithms ; Anxiety Disorders/psychology/*therapy ; Cloud Computing/*statistics & numerical data ; Female ; Humans ; Male ; Middle Aged ; Monitoring, Physiologic/*methods ; Quality of Life ; Remote Sensing Technology/methods ; Telemedicine/*organization & administration ; }, abstract = {Generalized Anxiety Disorder (GAD) is a psychological disorder caused by high stress from daily life activities. It causes severe health issues, such as sore muscles, low concentration, fatigue, and sleep deprivation. The less availability of predictive solutions specifically for individuals suffering from GAD can become an imperative reason for health and psychological adversity. The proposed solution aims to monitor health, behavioral and environmental parameters of the individual to predict health adversity caused by GAD. Initially, Weighted-Naïve Bayes (W-NB) classifier is utilized to predict irregular health events by classifying the captured data at the fog layer. The proposed two-phased decision-making process helps to optimize the distribution of required medical services by determining the scale of vulnerability. Furthermore, the utility of the framework is increased by calculating health vulnerability index using Adaptive Neuro-Fuzzy Inference System-Genetic Algorithm (ANFIS-GA) on the cloud. The presented work addresses the concerns in terms of efficient monitoring of anomalies followed by time sensitive two-phased alert generation procedure. To approve the performance of irregular event identification and health severity prediction, the framework has been conveyed in a living room for 30 days in which almost 15 individuals by the age of 68 to 78 years have been continuously monitored. The calculated outcomes represent the monitoring efficiency of the proposed framework over the policies of manual monitoring.}, } @article {pmid31779227, year = {2019}, author = {Kamolov, A and Park, S}, title = {An IoT-Based Ship Berthing Method Using a Set of Ultrasonic Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {23}, pages = {}, pmid = {31779227}, issn = {1424-8220}, support = {10058948//Ministry of Trade, Industry and Energy/ ; S1307-19-1022//Ministry of Science, ICT and National IT Industry Promotion Agency/ ; }, abstract = {It is indisputable that a great deal of brand new technologies such as the internet of things, (IoT) big data, and cloud computing are conquering every aspect of our life. So, in the branch of marine technology, the mentioned technologies are also being applied to obtain more features and to automate marine-related operations as well as creating novel smart devices. As a result of this, traditional ports and ships are being replaced by smart ports and vessels. To achieve this transition, numerous applications need to be developed to make them smart. The purpose of this paper is to present a dedicated an IoT-based system for automating linkage procedures by searching for available locations via port-mounted sensors and planned ship notification. In the experimental system, we have used smartphone as an alternative to the client-side vessel of the system and created an Android app called "Smart Ship Berthing" instead of the charging program, for instance, NORIVIS 4, VDASH, ODYSSEY, etc. To test our proposed server-side system, we used Raspberry Pi with a combination of an ultrasonic sensor to detect the ship and modify the empty berth for anchoring. The experimental results show that the set of UR sensors have high accuracy to detect ships at the port for ship berthing and our proposed system is very amenable to implementation in the real marine environment.}, } @article {pmid31775371, year = {2019}, author = {Shallari, I and O'Nils, M}, title = {From the Sensor to the Cloud: Intelligence Partitioning for Smart Camera Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {23}, pages = {}, pmid = {31775371}, issn = {1424-8220}, abstract = {The Internet of Things has grown quickly in the last few years, with a variety of sensing, processing and storage devices interconnected, resulting in high data traffic. While some sensors such as temperature, or humidity sensors produce a few bits of data periodically, imaging sensors output data in the range of megabytes every second. This raises a complexity for battery operated smart cameras, as they would be required to perform intensive image processing operations on large volumes of data, within energy consumption constraints. By using intelligence partitioning we analyse the effects of different partitioning scenarios for the processing tasks between the smart camera node, the fog computing layer and cloud computing, in the node energy consumption as well as the real time performance of the WVSN (Wireless Vision Sensor Node). The results obtained show that traditional design space exploration approaches are inefficient for WVSN, while intelligence partitioning enhances the energy consumption performance of the smart camera node and meets the timing constraints.}, } @article {pmid31771273, year = {2019}, author = {Shih, DH and Wu, TW and Liu, WX and Shih, PY}, title = {An Azure ACES Early Warning System for Air Quality Index Deteriorating.}, journal = {International journal of environmental research and public health}, volume = {16}, number = {23}, pages = {}, pmid = {31771273}, issn = {1660-4601}, mesh = {Air Pollutants/*analysis ; Air Pollution/*analysis ; *Cloud Computing ; Environmental Monitoring/*methods ; Models, Theoretical ; Taiwan ; }, abstract = {With the development of industrialization and urbanization, air pollution in many countries has become more serious and has affected people's health. The air quality has been continuously concerned by environmental managers and the public. Therefore, accurate air quality deterioration warning system can avoid health hazards. In this study, an air quality index (AQI) warning system based on Azure cloud computing platform is proposed. The prediction model is based on DFR (Decision Forest Regression), NNR (Neural Network Regression), and LR (Linear Regression) machine learning algorithms. The best algorithm was selected to calculate the 6 pollutants required for the AQI calculation of the air quality monitoring in real time. The experimental results show that the LR algorithm has the best performance, and the method of this study has a good prediction on the AQI index warning for the next one to three hours. Based on the ACES system proposed, it is hoped that it can prevent personal health hazards and help to reduce medical costs in public.}, } @article {pmid31766116, year = {2019}, author = {McLamore, ES and Palit Austin Datta, S and Morgan, V and Cavallaro, N and Kiker, G and Jenkins, DM and Rong, Y and Gomes, C and Claussen, J and Vanegas, D and Alocilja, EC}, title = {SNAPS: Sensor Analytics Point Solutions for Detection and Decision Support Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {22}, pages = {}, pmid = {31766116}, issn = {1424-8220}, support = {2018-67016-27578//National Institute of Food and Agriculture/ ; 1805512//National Science Foundation/ ; 1511953//National Science Foundation/ ; }, abstract = {In this review, we discuss the role of sensor analytics point solutions (SNAPS), a reduced complexity machine-assisted decision support tool. We summarize the approaches used for mobile phone-based chemical/biological sensors, including general hardware and software requirements for signal transduction and acquisition. We introduce SNAPS, part of a platform approach to converge sensor data and analytics. The platform is designed to consist of a portfolio of modular tools which may lend itself to dynamic composability by enabling context-specific selection of relevant units, resulting in case-based working modules. SNAPS is an element of this platform where data analytics, statistical characterization and algorithms may be delivered to the data either via embedded systems in devices, or sourced, in near real-time, from mist, fog or cloud computing resources. Convergence of the physical systems with the cyber components paves the path for SNAPS to progress to higher levels of artificial reasoning tools (ART) and emerge as data-informed decision support, as a service for general societal needs. Proof of concept examples of SNAPS are demonstrated both for quantitative data and qualitative data, each operated using a mobile device (smartphone or tablet) for data acquisition and analytics. We discuss the challenges and opportunities for SNAPS, centered around the value to users/stakeholders and the key performance indicators users may find helpful, for these types of machine-assisted tools.}, } @article {pmid31759298, year = {2020}, author = {Li, W and Feng, C and Yu, K and Zhao, D}, title = {MISS-D: A fast and scalable framework of medical image storage service based on distributed file system.}, journal = {Computer methods and programs in biomedicine}, volume = {186}, number = {}, pages = {105189}, doi = {10.1016/j.cmpb.2019.105189}, pmid = {31759298}, issn = {1872-7565}, mesh = {Big Data ; Diagnostic Imaging ; Information Storage and Retrieval/*methods ; Radiology Information Systems/*instrumentation ; }, abstract = {Background and Objective Processing of medical imaging big data is deeply challenging due to the size of data, computational complexity, security storage and inherent privacy issues. Traditional picture archiving and communication system, which is an imaging technology used in the healthcare industry, generally uses centralized high performance disk storage arrays in the practical solutions. The existing storage solutions are not suitable for the diverse range of medical imaging big data that needs to be stored reliably and accessed in a timely manner. The economical solution is emerging as the cloud computing which provides scalability, elasticity, performance and better managing cost. Cloud based storage architecture for medical imaging big data has attracted more and more attention in industry and academia. Methods This study presents a novel, fast and scalable framework of medical image storage service based on distributed file system. Two innovations of the framework are introduced in this paper. An integrated medical imaging content indexing file model for large-scale image sequence is designed to adapt to the high performance storage efficiency on distributed file system. A virtual file pooling technology is proposed, which uses the memory-mapped file method to achieve an efficient data reading process and provides the data swapping strategy in the pool. Result The experiments show that the framework not only has comparable performance of reading and writing files which meets requirements in real-time application domain, but also bings greater convenience for clinical system developers by multiple client accessing types. The framework supports different user client types through the unified micro-service interfaces which basically meet the needs of clinical system development especially for online applications. The experimental results demonstrate the framework can meet the needs of real-time data access as well as traditional picture archiving and communication system. Conclusions This framework aims to allow rapid data accessing for massive medical images, which can be demonstrated by the online web client for MISS-D framework implemented in this paper for real-time data interaction. The framework also provides a substantial subset of features to existing open-source and commercial alternatives, which has a wide range of potential applications.}, } @article {pmid31743060, year = {2020}, author = {Koumpouros, Y and Georgoulas, A}, title = {A systematic review of mHealth funded R&D activities in EU: Trends, technologies and obstacles.}, journal = {Informatics for health & social care}, volume = {45}, number = {2}, pages = {168-187}, doi = {10.1080/17538157.2019.1656208}, pmid = {31743060}, issn = {1753-8165}, mesh = {Computer Security ; Computers, Handheld/trends ; Confidentiality ; Europe ; Financing, Government/statistics & numerical data ; Humans ; Information Systems/*trends ; Research/*economics/*trends ; Telemedicine/*economics/*trends ; }, abstract = {OBJECTIVE: This study provides a systematic review of EU-funded mHealth projects.

METHODS: The review was conducted based mainly on the Projects and Results service provided by the EU Open Data Portal. Even though the search strategy yielded a large number of results, only 45 projects finally met all the inclusion criteria.

RESULTS: The review results reveal useful information regarding mHealth solutions and trends that emerge nowadays in the EU, the diseases addressed, the level of adoption by users and providers, the technological approaches, the projects' structure, and the overall impact. New areas of application, like behavioral intervention approaches as well as an apparent trend towards affective computing, big data, cloud computing, open standards and platforms have also been recognized and recorded. Core legal issues with regard to data security and privacy still pose challenges to mHealth projects, while commercialization of the developed solutions is slow. Interdisciplinary consortia with the participation of a significant number of SMEs and public healthcare organizations are also key factors for a successful project.

CONCLUSION: The study provides researchers and decision-makers with a complete and systematically organized knowledge base in order to plan new mHealth initiatives.}, } @article {pmid31737767, year = {2019}, author = {Brewer, P and Ratan, A}, title = {Data and replication supplement for double auction markets with snipers.}, journal = {Data in brief}, volume = {27}, number = {}, pages = {104729}, pmid = {31737767}, issn = {2352-3409}, abstract = {We provide a dataset for our research article "Profitability, Efficiency and Inequality in Double Auction Markets with Snipers" [1]. This dataset [2] includes configuration files, raw output data, and replications of calculated metrics for our robot-populated market simulations. The raw data is subdivided into a hierarchy of folders corresponding to simulation treatment variables, in a 2 × 2 × 21 design for 84 treatments in total. Treatments variables include: (i) robot population ordering, either "primary" or "reverse"; (ii) two market schedules of agent's values and costs: equal-expected-profit "market 1" and unequal-expected-profit "market 2"; (iii) 21 robot populations identified by the number of Sniper Bots (0-20) on each side of the market. Each treatment directory contains a simulator input file and outputs for 10,000 periods of market data. The outputs include all acceptable buy and sell orders, all trades, profits for each agent, and market metrics such as efficiency-of-allocation, Gini coefficient, and price statistics. An additional public copy in Google Cloud is available for database query by users of Google BigQuery. The market simulator software is a private product created by Paul Brewer at Economic and Financial Technology Consulting LLC. Free open source modules are available for tech-savvy users at GitHub, NPM, and Docker Hub repositories and are sufficient to repeat the simulations. An easier-to-use paid market simulation product will eventually be available online from Econ1.Net. We provide instructions for repeating individual simulations using the free open source simulator and the free container tool Docker.}, } @article {pmid31724999, year = {2020}, author = {Bhandari, M and Zeffiro, T and Reddiboina, M}, title = {Artificial intelligence and robotic surgery: current perspective and future directions.}, journal = {Current opinion in urology}, volume = {30}, number = {1}, pages = {48-54}, doi = {10.1097/MOU.0000000000000692}, pmid = {31724999}, issn = {1473-6586}, mesh = {*Artificial Intelligence ; Humans ; Robotic Surgical Procedures/*methods/trends ; *Robotics ; }, abstract = {PURPOSE OF REVIEW: This review aims to draw a road-map to the use of artificial intelligence in an era of robotic surgery and highlight the challenges inherent to this process.

RECENT FINDINGS: Conventional mechanical robots function by transmitting actions of the surgeon's hands to the surgical target through the tremor-filtered movements of surgical instruments. Similarly, the next iteration of surgical robots conform human-initiated actions to a personalized surgical plan leveraging 3D digital segmentation generated prior to surgery. The advancements in cloud computing, big data analytics, and artificial intelligence have led to increased research and development of intelligent robots in all walks of human life. Inspired by the successful application of deep learning, several surgical companies are joining hands with tech giants to develop intelligent surgical robots. We, hereby, highlight key steps in the handling and analysis of big data to build, define, and deploy deep-learning models for building autonomous robots.

SUMMARY: Despite tremendous growth of autonomous robotics, their entry into the operating room remains elusive. It is time that surgeons actively collaborate for the development of the next generation of intelligent robotic surgery.}, } @article {pmid31721807, year = {2019}, author = {Shukla, S and Hassan, MF and Khan, MK and Jung, LT and Awang, A}, title = {An analytical model to minimize the latency in healthcare internet-of-things in fog computing environment.}, journal = {PloS one}, volume = {14}, number = {11}, pages = {e0224934}, pmid = {31721807}, issn = {1932-6203}, mesh = {*Cloud Computing ; Computer Communication Networks ; Computer Simulation ; Databases as Topic ; *Delivery of Health Care ; Electrocardiography ; Fuzzy Logic ; *Internet of Things ; *Models, Theoretical ; Support Vector Machine ; User-Computer Interface ; }, abstract = {Fog computing (FC) is an evolving computing technology that operates in a distributed environment. FC aims to bring cloud computing features close to edge devices. The approach is expected to fulfill the minimum latency requirement for healthcare Internet-of-Things (IoT) devices. Healthcare IoT devices generate various volumes of healthcare data. This large volume of data results in high data traffic that causes network congestion and high latency. An increase in round-trip time delay owing to large data transmission and large hop counts between IoTs and cloud servers render healthcare data meaningless and inadequate for end-users. Time-sensitive healthcare applications require real-time data. Traditional cloud servers cannot fulfill the minimum latency demands of healthcare IoT devices and end-users. Therefore, communication latency, computation latency, and network latency must be reduced for IoT data transmission. FC affords the storage, processing, and analysis of data from cloud computing to a network edge to reduce high latency. A novel solution for the abovementioned problem is proposed herein. It includes an analytical model and a hybrid fuzzy-based reinforcement learning algorithm in an FC environment. The aim is to reduce high latency among healthcare IoTs, end-users, and cloud servers. The proposed intelligent FC analytical model and algorithm use a fuzzy inference system combined with reinforcement learning and neural network evolution strategies for data packet allocation and selection in an IoT-FC environment. The approach is tested on simulators iFogSim (Net-Beans) and Spyder (Python). The obtained results indicated the better performance of the proposed approach compared with existing methods.}, } @article {pmid31720857, year = {2020}, author = {Mehdipoor, H and Zurita-Milla, R and Augustijn, EW and Izquierdo-Verdiguier, E}, title = {Exploring differences in spatial patterns and temporal trends of phenological models at continental scale using gridded temperature time-series.}, journal = {International journal of biometeorology}, volume = {64}, number = {3}, pages = {409-421}, pmid = {31720857}, issn = {1432-1254}, mesh = {*Climate ; *Climate Change ; Plant Development ; Seasons ; Temperature ; }, abstract = {Phenological models are widely used to estimate the influence of weather and climate on plant development. The goodness of fit of phenological models often is assessed by considering the root-mean-square error (RMSE) between observed and predicted dates. However, the spatial patterns and temporal trends derived from models with similar RMSE may vary considerably. In this paper, we analyse and compare patterns and trends from a suite of temperature-based phenological models, namely extended spring indices, thermal time and photothermal time models. These models were first calibrated using lilac leaf onset observations for the period 1961-1994. Next, volunteered phenological observations and daily gridded temperature data were used to validate the models. After that, the two most accurate models were used to evaluate the patterns and trends of leaf onset for the conterminous US over the period 2000-2014. Our results show that the RMSEs of extended spring indices and thermal time models are similar and about 2 days lower than those produced by the other models. Yet the dates of leaf out produced by each of the models differ by up to 11 days, and the trends differ by up to a week per decade. The results from the histograms and difference maps show that the statistical significance of these trends strongly depends on the type of model applied. Therefore, further work should focus on the development of metrics that can quantify the difference between patterns and trends derived from spatially explicit phenological models. Such metrics could subsequently be used to validate phenological models in both space and time. Also, such metrics could be used to validate phenological models in both space and time.}, } @article {pmid31717617, year = {2019}, author = {Xu, R and Jin, W and Kim, D}, title = {Microservice Security Agent Based On API Gateway in Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {22}, pages = {}, pmid = {31717617}, issn = {1424-8220}, abstract = {Internet of Things (IoT) devices are embedded with software, electronics, and sensors, and feature connectivity with constrained resources. They require the edge computing paradigm, with modular characteristics relying on microservices, to provide an extensible and lightweight computing framework at the edge of the network. Edge computing can relieve the burden of centralized cloud computing by performing certain operations, such as data storage and task computation, at the edge of the network. Despite the benefits of edge computing, it can lead to many challenges in terms of security and privacy issues. Thus, services that protect privacy and secure data are essential functions in edge computing. For example, the end user's ownership and privacy information and control are separated, which can easily lead to data leakage, unauthorized data manipulation, and other data security concerns. Thus, the confidentiality and integrity of the data cannot be guaranteed and, so, more secure authentication and access mechanisms are required to ensure that the microservices are exposed only to authorized users. In this paper, we propose a microservice security agent to integrate the edge computing platform with the API gateway technology for presenting a secure authentication mechanism. The aim of this platform is to afford edge computing clients a practical application which provides user authentication and allows JSON Web Token (JWT)-based secure access to the services of edge computing. To integrate the edge computing platform with the API gateway, we implement a microservice security agent based on the open-source Kong in the EdgeX Foundry framework. Also to provide an easy-to-use approach with Kong, we implement REST APIs for generating new consumers, registering services, configuring access controls. Finally, the usability of the proposed approach is demonstrated by evaluating the round trip time (RTT). The results demonstrate the efficiency of the system and its suitability for real-world applications.}, } @article {pmid31713622, year = {2020}, author = {Luo, Y and Hitz, BC and Gabdank, I and Hilton, JA and Kagda, MS and Lam, B and Myers, Z and Sud, P and Jou, J and Lin, K and Baymuradov, UK and Graham, K and Litton, C and Miyasato, SR and Strattan, JS and Jolanki, O and Lee, JW and Tanaka, FY and Adenekan, P and O'Neill, E and Cherry, JM}, title = {New developments on the Encyclopedia of DNA Elements (ENCODE) data portal.}, journal = {Nucleic acids research}, volume = {48}, number = {D1}, pages = {D882-D889}, pmid = {31713622}, issn = {1362-4962}, support = {U24 HG009397/HG/NHGRI NIH HHS/United States ; }, mesh = {Animals ; DNA/*genetics ; *Databases, Genetic ; *Genome, Human ; Genomics ; Humans ; Mice ; *Software ; }, abstract = {The Encyclopedia of DNA Elements (ENCODE) is an ongoing collaborative research project aimed at identifying all the functional elements in the human and mouse genomes. Data generated by the ENCODE consortium are freely accessible at the ENCODE portal (https://www.encodeproject.org/), which is developed and maintained by the ENCODE Data Coordinating Center (DCC). Since the initial portal release in 2013, the ENCODE DCC has updated the portal to make ENCODE data more findable, accessible, interoperable and reusable. Here, we report on recent updates, including new ENCODE data and assays, ENCODE uniform data processing pipelines, new visualization tools, a dataset cart feature, unrestricted public access to ENCODE data on the cloud (Amazon Web Services open data registry, https://registry.opendata.aws/encode-project/) and more comprehensive tutorials and documentation.}, } @article {pmid31710301, year = {2019}, author = {Bai, J and Jhaney, I and Wells, J}, title = {Developing a Reproducible Microbiome Data Analysis Pipeline Using the Amazon Web Services Cloud for a Cancer Research Group: Proof-of-Concept Study.}, journal = {JMIR medical informatics}, volume = {7}, number = {4}, pages = {e14667}, pmid = {31710301}, issn = {2291-9694}, support = {K99 NR017897/NR/NINR NIH HHS/United States ; }, abstract = {BACKGROUND: Cloud computing for microbiome data sets can significantly increase working efficiencies and expedite the translation of research findings into clinical practice. The Amazon Web Services (AWS) cloud provides an invaluable option for microbiome data storage, computation, and analysis.

OBJECTIVE: The goals of this study were to develop a microbiome data analysis pipeline by using AWS cloud and to conduct a proof-of-concept test for microbiome data storage, processing, and analysis.

METHODS: A multidisciplinary team was formed to develop and test a reproducible microbiome data analysis pipeline with multiple AWS cloud services that could be used for storage, computation, and data analysis. The microbiome data analysis pipeline developed in AWS was tested by using two data sets: 19 vaginal microbiome samples and 50 gut microbiome samples.

RESULTS: Using AWS features, we developed a microbiome data analysis pipeline that included Amazon Simple Storage Service for microbiome sequence storage, Linux Elastic Compute Cloud (EC2) instances (ie, servers) for data computation and analysis, and security keys to create and manage the use of encryption for the pipeline. Bioinformatics and statistical tools (ie, Quantitative Insights Into Microbial Ecology 2 and RStudio) were installed within the Linux EC2 instances to run microbiome statistical analysis. The microbiome data analysis pipeline was performed through command-line interfaces within the Linux operating system or in the Mac operating system. Using this new pipeline, we were able to successfully process and analyze 50 gut microbiome samples within 4 hours at a very low cost (a c4.4xlarge EC2 instance costs $0.80 per hour). Gut microbiome findings regarding diversity, taxonomy, and abundance analyses were easily shared within our research team.

CONCLUSIONS: Building a microbiome data analysis pipeline with AWS cloud is feasible. This pipeline is highly reliable, computationally powerful, and cost effective. Our AWS-based microbiome analysis pipeline provides an efficient tool to conduct microbiome data analysis.}, } @article {pmid31707420, year = {2020}, author = {Lo Piparo, E and Siragusa, L and Raymond, F and Passeri, GI and Cruciani, G and Schilter, B}, title = {Bisphenol A binding promiscuity: A virtual journey through the universe of proteins.}, journal = {ALTEX}, volume = {37}, number = {1}, pages = {85-94}, doi = {10.14573/altex.1906141}, pmid = {31707420}, issn = {1868-8551}, mesh = {Benzhydryl Compounds/*chemistry/metabolism ; Computational Chemistry ; Databases, Protein ; Estrogens, Non-Steroidal/chemistry ; Phenols/*chemistry/metabolism ; Protein Binding ; Protein Conformation ; Proteins/*chemistry ; }, abstract = {Significant efforts are currently being made to move toxicity testing from animal experimentation to human relevant, mechanism-based approaches. In this context, the identification of molecular target(s) responsible for mechanisms of action is an essential step. Inspired by the recent concept of polypharmacology (the ability of drugs to interact with multiple targets) we argue that whole proteome virtual screening might become a breakthrough tool in toxicology reflecting the real complexity of chemical-biological interactions. Therefore, we investigated the value of performing ligand-protein binding prediction screening across the full proteome to identify new mechanisms of action for food chemicals. We applied the new approach to make a broader comparison between bisphenol A (BPA) (food-packaging chemical) and the endogenous estrogen, 17β-estradiol (EST). Applying a novel high-throughput ligand-protein binding prediction tool (BioGPS) by the Amazon Web Services (AWS) cloud (to speed-up the calculation), we investigated the value of performing in silico screening across the full proteome (all human and rodent x-ray protein structures available in the Protein Data Bank). The strong correlation between in silico predictions and available in vitro data demonstrates the high predictive power of the method used. The most striking results obtained was that BPA was predicted to bind to many more proteins than the ones already known, most of which were common to EST. Our findings provide a new and unprecedented insight on the complexity of chemical-protein interactions, highlighting the binding promiscuity of BPA and its broader similarity compared to the female sex hormone, EST.}, } @article {pmid31703611, year = {2019}, author = {Heldenbrand, JR and Baheti, S and Bockol, MA and Drucker, TM and Hart, SN and Hudson, ME and Iyer, RK and Kalmbach, MT and Kendig, KI and Klee, EW and Mattson, NR and Wieben, ED and Wiepert, M and Wildman, DE and Mainzer, LS}, title = {Recommendations for performance optimizations when using GATK3.8 and GATK4.}, journal = {BMC bioinformatics}, volume = {20}, number = {1}, pages = {557}, pmid = {31703611}, issn = {1471-2105}, mesh = {Algorithms ; Chromosomes, Human/genetics ; Genome, Human ; Genomics/*methods ; Haplotypes/genetics ; High-Throughput Nucleotide Sequencing ; Humans ; *Software ; }, abstract = {BACKGROUND: Use of the Genome Analysis Toolkit (GATK) continues to be the standard practice in genomic variant calling in both research and the clinic. Recently the toolkit has been rapidly evolving. Significant computational performance improvements have been introduced in GATK3.8 through collaboration with Intel in 2017. The first release of GATK4 in early 2018 revealed rewrites in the code base, as the stepping stone toward a Spark implementation. As the software continues to be a moving target for optimal deployment in highly productive environments, we present a detailed analysis of these improvements, to help the community stay abreast with changes in performance.

RESULTS: We re-evaluated multiple options, such as threading, parallel garbage collection, I/O options and data-level parallelization. Additionally, we considered the trade-offs of using GATK3.8 and GATK4. We found optimized parameter values that reduce the time of executing the best practices variant calling procedure by 29.3% for GATK3.8 and 16.9% for GATK4. Further speedups can be accomplished by splitting data for parallel analysis, resulting in run time of only a few hours on whole human genome sequenced to the depth of 20X, for both versions of GATK. Nonetheless, GATK4 is already much more cost-effective than GATK3.8. Thanks to significant rewrites of the algorithms, the same analysis can be run largely in a single-threaded fashion, allowing users to process multiple samples on the same CPU.

CONCLUSIONS: In time-sensitive situations, when a patient has a critical or rapidly developing condition, it is useful to minimize the time to process a single sample. In such cases we recommend using GATK3.8 by splitting the sample into chunks and computing across multiple nodes. The resultant walltime will be nnn.4 hours at the cost of $41.60 on 4 c5.18xlarge instances of Amazon Cloud. For cost-effectiveness of routine analyses or for large population studies, it is useful to maximize the number of samples processed per unit time. Thus we recommend GATK4, running multiple samples on one node. The total walltime will be ∼34.1 hours on 40 samples, with 1.18 samples processed per hour at the cost of $2.60 per sample on c5.18xlarge instance of Amazon Cloud.}, } @article {pmid31698581, year = {2019}, author = {Lu, TJ and Zhong, X and Zhong, L and Luo, RQ}, title = {A location-aware feature extraction algorithm for image recognition in mobile edge computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {16}, number = {6}, pages = {6672-6682}, doi = {10.3934/mbe.2019332}, pmid = {31698581}, issn = {1551-0018}, abstract = {With the explosive growth of mobile devices, it is feasible to deploy image recognition applications on mobile devices to provide image recognition services. However, traditional mobile cloud computing architecture cannot meet the demands of real time response and high accuracy since users require to upload raw images to the remote central cloud servers. The emerging architecture, Mobile Edge Computing (MEC) deploys small scale servers at the edge of the network, which can provide computing and storage resources for image recognition applications. To this end, in this paper, we aim to use the MEC architecture to provide image recognition service. Moreover, in order to guarantee the real time response and high accuracy, we also provide a feature extraction algorithm to extract discriminative features from the raw image to improve the accuracy of the image recognition applications. In doing so, the response time can be further reduced and the accuracy can be improved. The experimental results show that the combination between MEC architecture and the proposed feature extraction algorithm not only can greatly reduce the response time, but also improve the accuracy of the image recognition applications.}, } @article {pmid31694254, year = {2019}, author = {Basir, R and Qaisar, S and Ali, M and Aldwairi, M and Ashraf, MI and Mahmood, A and Gidlund, M}, title = {Fog Computing Enabling Industrial Internet of Things: State-of-the-Art and Research Challenges.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {21}, pages = {}, pmid = {31694254}, issn = {1424-8220}, abstract = {Industry is going through a transformation phase, enabling automation and data exchange in manufacturing technologies and processes, and this transformation is called Industry 4.0. Industrial Internet-of-Things (IIoT) applications require real-time processing, near-by storage, ultra-low latency, reliability and high data rate, all of which can be satisfied by fog computing architecture. With smart devices expected to grow exponentially, the need for an optimized fog computing architecture and protocols is crucial. Therein, efficient, intelligent and decentralized solutions are required to ensure real-time connectivity, reliability and green communication. In this paper, we provide a comprehensive review of methods and techniques in fog computing. Our focus is on fog infrastructure and protocols in the context of IIoT applications. This article has two main research areas: In the first half, we discuss the history of industrial revolution, application areas of IIoT followed by key enabling technologies that act as building blocks for industrial transformation. In the second half, we focus on fog computing, providing solutions to critical challenges and as an enabler for IIoT application domains. Finally, open research challenges are discussed to enlighten fog computing aspects in different fields and technologies.}, } @article {pmid31687569, year = {2019}, author = {Vargas-Salgado, C and Aguila-Leon, J and Chiñas-Palacios, C and Hurtado-Perez, E}, title = {Low-cost web-based Supervisory Control and Data Acquisition system for a microgrid testbed: A case study in design and implementation for academic and research applications.}, journal = {Heliyon}, volume = {5}, number = {9}, pages = {e02474}, doi = {10.1016/j.heliyon.2019.e02474}, pmid = {31687569}, issn = {2405-8440}, abstract = {This paper presents the design and implementation of a low-cost Supervisory Control and Data Acquisition system based on a Web interface to be applied to a Hybrid Renewable Energy System (HRES) microgrid. This development will provide a reliable and low-cost control and data acquisition systems for the Renewable Energy Laboratory at Universitat Politècnica de València (LabDER-UPV) in Spain, oriented to the research on microgrid stability and energy generation. The developed low-cost SCADA operates on a microgrid that incorporates a photovoltaic array, a wind turbine, a biomass gasification plant and a battery bank as an energy storage system. Sensors and power meters for electrical parameters, such as voltage, current, frequency, power factor, power generation, and energy consumption, were processed digitally and integrated into Arduino-based devices. A master device on a Raspberry-PI board was set up to send all this information to a local database (DB), and a MySQL Web-DB linked to a Web SCADA interface, programmed in HTML5. The communications protocols include TCP/IP, I2C, SPI, and Serial communication; Arduino-based slave devices communicate with the master Raspberry-PI using NRF24L01 wireless radio frequency transceivers. Finally, a comparison between a standard SCADA against the developed Web-based SCADA system is carried out. The results of the operative tests and the cost comparison of the own-designed developed Web-SCADA system prove its reliability and low-cost, on average an 86% cheaper than a standard brandmark solution, for controlling, monitoring and data logging information, as well as for local and remote operation system when applied to the HRES microgrid testbed.}, } @article {pmid31682421, year = {2020}, author = {Grebner, C and Malmerberg, E and Shewmaker, A and Batista, J and Nicholls, A and Sadowski, J}, title = {Virtual Screening in the Cloud: How Big Is Big Enough?.}, journal = {Journal of chemical information and modeling}, volume = {60}, number = {9}, pages = {4274-4282}, doi = {10.1021/acs.jcim.9b00779}, pmid = {31682421}, issn = {1549-960X}, mesh = {*Cloud Computing ; *Computer-Aided Design ; Ligands ; }, abstract = {Virtual screening is a standard tool in Computer-Assisted Drug Design (CADD). Early in a project, it is typical to use ligand-based similarity search methods to find suitable hit molecules. However, the number of compounds which can be screened and the time required are usually limited by computational resources. We describe here a high-throughput virtual screening project using 3D similarity (FastROCS) and automated evaluation workflows on Orion, a cloud computing platform. Cloud resources make this approach fully scalable and flexible, allowing the generation and search of billions of virtual molecules, and give access to an explicit 3D virtual chemistry space not available before. We discuss the impact of the size of the search space with respect to finding novel chemical hits and the size of the required hit list, as well as computational and economical aspects of resource scaling.}, } @article {pmid31681834, year = {2019}, author = {Ongari, D and Yakutovich, AV and Talirz, L and Smit, B}, title = {Building a Consistent and Reproducible Database for Adsorption Evaluation in Covalent-Organic Frameworks.}, journal = {ACS central science}, volume = {5}, number = {10}, pages = {1663-1675}, pmid = {31681834}, issn = {2374-7943}, abstract = {We present a workflow that traces the path from the bulk structure of a crystalline material to assessing its performance in carbon capture from coal's postcombustion flue gases. This workflow is applied to a database of 324 covalent-organic frameworks (COFs) reported in the literature, to characterize their CO2 adsorption properties using the following steps: (1) optimization of the crystal structure (atomic positions and unit cell) using density functional theory, (2) fitting atomic point charges based on the electron density, (3) characterizing the pore geometry of the structures before and after optimization, (4) computing carbon dioxide and nitrogen isotherms using grand canonical Monte Carlo simulations with an empirical interaction potential, and finally, (5) assessing the CO2 parasitic energy via process modeling. The full workflow has been encoded in the Automated Interactive Infrastructure and Database for Computational Science (AiiDA). Both the workflow and the automatically generated provenance graph of our calculations are made available on the Materials Cloud, allowing peers to inspect every input parameter and result along the workflow, download structures and files at intermediate stages, and start their research right from where this work has left off. In particular, our set of CURATED (Clean, Uniform, and Refined with Automatic Tracking from Experimental Database) COFs, having optimized geometry and high-quality DFT-derived point charges, are available for further investigations of gas adsorption properties. We plan to update the database as new COFs are being reported.}, } @article {pmid31663857, year = {2019}, author = {Shorey, S and Ang, E and Yap, J and Ng, ED and Lau, ST and Chui, CK}, title = {A Virtual Counseling Application Using Artificial Intelligence for Communication Skills Training in Nursing Education: Development Study.}, journal = {Journal of medical Internet research}, volume = {21}, number = {10}, pages = {e14658}, pmid = {31663857}, issn = {1438-8871}, mesh = {*Artificial Intelligence ; Clinical Competence ; Communication ; Counseling/*methods ; Education, Nursing/*methods ; Female ; Humans ; Virtual Reality ; }, abstract = {BACKGROUND: The ability of nursing undergraduates to communicate effectively with health care providers, patients, and their family members is crucial to their nursing professions as these can affect patient outcomes. However, the traditional use of didactic lectures for communication skills training is ineffective, and the use of standardized patients is not time- or cost-effective. Given the abilities of virtual patients (VPs) to simulate interactive and authentic clinical scenarios in secured environments with unlimited training attempts, a virtual counseling application is an ideal platform for nursing students to hone their communication skills before their clinical postings.

OBJECTIVE: The aim of this study was to develop and test the use of VPs to better prepare nursing undergraduates for communicating with real-life patients, their family members, and other health care professionals during their clinical postings.

METHODS: The stages of the creation of VPs included preparation, design, and development, followed by a testing phase before the official implementation. An initial voice chatbot was trained using a natural language processing engine, Google Cloud's Dialogflow, and was later visualized into a three-dimensional (3D) avatar form using Unity 3D.

RESULTS: The VPs included four case scenarios that were congruent with the nursing undergraduates' semesters' learning objectives: (1) assessing the pain experienced by a pregnant woman, (2) taking the history of a depressed patient, (3) escalating a bleeding episode of a postoperative patient to a physician, and (4) showing empathy to a stressed-out fellow final-year nursing student. Challenges arose in terms of content development, technological limitations, and expectations management, which can be resolved by contingency planning, open communication, constant program updates, refinement, and training.

CONCLUSIONS: The creation of VPs to assist in nursing students' communication skills training may provide authentic learning environments that enhance students' perceived self-efficacy and confidence in effective communication skills. However, given the infancy stage of this project, further refinement and constant enhancements are needed to train the VPs to simulate real-life conversations before the official implementation.}, } @article {pmid31661935, year = {2019}, author = {Pop, C and Antal, M and Cioara, T and Anghel, I and Salomie, I and Bertoncini, M}, title = {A Fog Computing enabled Virtual Power Plant Model for Delivery of Frequency Restoration Reserve Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {21}, pages = {}, pmid = {31661935}, issn = {1424-8220}, support = {774478//Horizon 2020 Framework Programme/ ; }, abstract = {Nowadays, centralized energy grid systems are transitioning towards more decentralized systems driven by the need for efficient local integration of new deployed small scale renewable energy sources. The high limits for accessing the energy markets and also for the delivery of ancillary services act as a barrier for small scale prosumers participation forcing the implementation of new cooperative business models at the local level. This paper is proposing a fog computing infrastructure for the local management of energy systems and the creation of coalitions of prosumers able to provide ancillary services to the grid. It features an edge devices layer for energy monitoring of individual prosumers, a fog layer providing Information and Communication Technologies (ICT) techniques for managing local energy systems by implementing cooperative models, and a cloud layer where the service specific technical requirements are defined. On top, a model has been defined allowing the dynamical construction of coalitions of prosumers as Virtual Power Plants at the fog layer for the provisioning of frequency restoration reserve services while considering both the prosumers' local constraints and the service ones as well as the constituents' profit maximization. Simulation results show our solution effectiveness in selecting the optimal coalition of prosumers to reliably deliver the service meeting the technical constraints while featuring a low time and computation overhead being feasible to be run closer to the edge.}, } @article {pmid31658684, year = {2019}, author = {Wang, Y and Yang, J and Guo, X and Qu, Z}, title = {Satellite Edge Computing for the Internet of Things in Aerospace.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {20}, pages = {}, pmid = {31658684}, issn = {1424-8220}, abstract = {As one of the information industry's future development directions, the Internet of Things (IoT) has been widely used. In order to reduce the pressure on the network caused by the long distance between the processing platform and the terminal, edge computing provides a new paradigm for IoT applications. In many scenarios, the IoT devices are distributed in remote areas or extreme terrain and cannot be accessed directly through the terrestrial network, and data transmission can only be achieved via satellite. However, traditional satellites are highly customized, and on-board resources are designed for specific applications rather than universal computing. Therefore, we propose to transform the traditional satellite into a space edge computing node. It can dynamically load software in orbit, flexibly share on-board resources, and provide services coordinated with the cloud. The corresponding hardware structure and software architecture of the satellite is presented. Through the modeling analysis and simulation experiments of the application scenarios, the results show that the space edge computing system takes less time and consumes less energy than the traditional satellite constellation. The quality of service is mainly related to the number of satellites, satellite performance, and task offloading strategy.}, } @article {pmid31639049, year = {2019}, author = {Paul, AJ and Lawrence, D and Song, M and Lim, SH and Pan, C and Ahn, TH}, title = {Using Apache Spark on genome assembly for scalable overlap-graph reduction.}, journal = {Human genomics}, volume = {13}, number = {Suppl 1}, pages = {48}, pmid = {31639049}, issn = {1479-7364}, mesh = {*Algorithms ; Base Sequence ; Conyza/genetics ; Databases, Genetic ; *Genome ; Genome, Human ; Genome, Plant ; Humans ; *Sequence Analysis, DNA ; }, abstract = {BACKGROUND: De novo genome assembly is a technique that builds the genome of a specimen using overlaps of genomic fragments without additional work with reference sequence. Sequence fragments (called reads) are assembled as contigs and scaffolds by the overlaps. The quality of the de novo assembly depends on the length and continuity of the assembly. To enable faster and more accurate assembly of species, existing sequencing techniques have been proposed, for example, high-throughput next-generation sequencing and long-reads-producing third-generation sequencing. However, these techniques require a large amounts of computer memory when very huge-size overlap graphs are resolved. Also, it is challenging for parallel computation.

RESULTS: To address the limitations, we propose an innovative algorithmic approach, called Scalable Overlap-graph Reduction Algorithms (SORA). SORA is an algorithm package that performs string graph reduction algorithms by Apache Spark. The SORA's implementations are designed to execute de novo genome assembly on either a single machine or a distributed computing platform. SORA efficiently compacts the number of edges on enormous graphing paths by adapting scalable features of graph processing libraries provided by Apache Spark, GraphX and GraphFrames.

CONCLUSIONS: We shared the algorithms and the experimental results at our project website, https://github.com/BioHPC/SORA . We evaluated SORA with the human genome samples. First, it processed a nearly one billion edge graph on a distributed cloud cluster. Second, it processed mid-to-small size graphs on a single workstation within a short time frame. Overall, SORA achieved the linear-scaling simulations for the increased computing instances.}, } @article {pmid31630064, year = {2019}, author = {Zandesh, Z and Ghazisaeedi, M and Devarakonda, MV and Haghighi, MS}, title = {Legal framework for health cloud: A systematic review.}, journal = {International journal of medical informatics}, volume = {132}, number = {}, pages = {103953}, doi = {10.1016/j.ijmedinf.2019.103953}, pmid = {31630064}, issn = {1872-8243}, mesh = {Cloud Computing/*legislation & jurisprudence ; Computer Security/*standards ; *Confidentiality ; Databases, Factual ; Delivery of Health Care ; Humans ; Information Storage and Retrieval/*legislation & jurisprudence ; *Privacy ; }, abstract = {BACKGROUND: The complicated nature of cloud computing encompassing internet-based technologies and service models for delivering IT applications, processing capability, storage, and memory space brings along challenging problems. Some issues such as information security, privacy, and legal aspects of cloud computing may become challenging while cross passing with another complex domain like healthcare.

OBJECTIVES: The present study was conducted to report the results of a systematic literature review on the legal aspects of health cloud.

METHOD: The original English papers published in Pub Med, Scopus, Web of Science, and IEEE Digital Library databases were extracted, among which1582 were related to the legal aspects of health cloud environment and were selected using predefined search strings.

CONCLUSION: Through the review process, effective factors in relation to a health cloud legal framework were identified and accordingly, a proper design was developed for this domain. Next, the identified factors were confirmed and adjusted by mapping the contents of the selected papers to different categories and subcategories under the proposed framework. Five Main categories like the issues related to the compliance, data protection, Identity Credential Access Management (ICAM), ownership, and quality of service were selected as the basic pillars in the proposed framework. Finally, 22 papers were selected, among which 19 were mapped to the compliance issues, 18 the issues related to "Data protection" were addressed, and 14 "Identity Credential Access Management (ICAM)" was discussed. Fifteen Papers were mapped to "Data ownership" and "Quality of service" categories. Some papers were found to present some solutions in all the mentioned areas; however, most of them have addressed only a few issues.}, } @article {pmid31627468, year = {2019}, author = {Cabo, C and Ordóñez, C and Sáchez-Lasheras, F and Roca-Pardiñas, J and Cos-Juez, AJ}, title = {Multiscale Supervised Classification of Point Clouds with Urban and Forest Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {20}, pages = {}, pmid = {31627468}, issn = {1424-8220}, abstract = {We analyze the utility of multiscale supervised classification algorithms for object detection and extraction from laser scanning or photogrammetric point clouds. Only the geometric information (the point coordinates) was considered, thus making the method independent of the systems used to collect the data. A maximum of five features (input variables) was used, four of them related to the eigenvalues obtained from a principal component analysis (PCA). PCA was carried out at six scales, defined by the diameter of a sphere around each observation. Four multiclass supervised classification models were tested (linear discriminant analysis, logistic regression, support vector machines, and random forest) in two different scenarios, urban and forest, formed by artificial and natural objects, respectively. The results obtained were accurate (overall accuracy over 80% for the urban dataset, and over 93% for the forest dataset), in the range of the best results found in the literature, regardless of the classification method. For both datasets, the random forest algorithm provided the best solution/results when discrimination capacity, computing time, and the ability to estimate the relative importance of each variable are considered together.}, } @article {pmid31625328, year = {2019}, author = {Zhang, J and Jin, Z and Shen, Y}, title = {[Exploration and Minging of Large Medical Equipment Operation Data under Internet of Things].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {43}, number = {5}, pages = {330-333}, doi = {10.3969/j.issn.1671-7104.2019.05.005}, pmid = {31625328}, issn = {1671-7104}, mesh = {*Cloud Computing ; *Internet ; Surgical Equipment ; }, abstract = {With the technology development in Internet of Things (IoT) area, it is good to try to use IoT and cloud computing technologies to improve the efficiency of medical equipment management. This article described using CT as sample to do the data capture and data analysis with the IoT technology. The positive result got shows the benefit of the exploring.}, } @article {pmid31622419, year = {2019}, author = {Alarifi, A and Abdelsamie, F and Amoon, M}, title = {A fault-tolerant aware scheduling method for fog-cloud environments.}, journal = {PloS one}, volume = {14}, number = {10}, pages = {e0223902}, pmid = {31622419}, issn = {1932-6203}, mesh = {Algorithms ; Cloud Computing ; *Computer Communication Networks ; Reproducibility of Results ; }, abstract = {Fog computing is a promising technology that leverages the resources to provide services for requests of IoT (Internet of Things) devices at the cloud edge. The high dynamic and heterogeneous nature of devices at the cloud edge causes failures to be a popular event and therefore fault tolerance became indispensable. Most early scheduling and fault-tolerant methods did not highly consider time-sensitive requests. This increases the possibility of latencies for serving these requests which causes unfavorable impacts. This paper proposes a fault-tolerant scheduling method (FTSM) for allocating services' requests to the most sufficient devices in fog-cloud IoT-based environments. The main purpose of the proposed method is to reduce the latency and overheads of services and to increase the reliability and capacity of the cloud. The method depends on categorizing devices that can issue requests into three classes according to the type of service required. These classes are time-sensitive, time-tolerant and core. Each time-sensitive request is directly mapped to one or more edge devices using a pre-prepared executive list of devices. Each time-tolerant request may be assigned to one or more devices at the cloud edge or the cloud core. Core requests are assigned to resources at the cloud core. In order to achieve fault tolerance, the proposed method selects the most suitable fault-tolerant technique from replication, checkpointing and resubmission techniques for each request while most existing methods consider only one technique. The effectiveness of the proposed method is assessed using average service time, throughput, operation costs, success rate and capacity percentage as performance indicators.}, } @article {pmid31620562, year = {2019}, author = {Vragniau, C and Bufton, JC and Garzoni, F and Stermann, E and Rabi, F and Terrat, C and Guidetti, M and Josserand, V and Williams, M and Woods, CJ and Viedma, G and Bates, P and Verrier, B and Chaperot, L and Schaffitzel, C and Berger, I and Fender, P}, title = {Synthetic self-assembling ADDomer platform for highly efficient vaccination by genetically encoded multiepitope display.}, journal = {Science advances}, volume = {5}, number = {9}, pages = {eaaw2853}, pmid = {31620562}, issn = {2375-2548}, support = {//Wellcome Trust/United Kingdom ; BB/L01386X/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {*Adenoviridae/classification/genetics/immunology ; Communicable Disease Control ; Communicable Diseases/etiology/immunology ; Epitope Mapping/*methods ; Epitopes/chemistry/genetics/*immunology ; Genetic Engineering ; Humans ; Models, Molecular ; Nanomedicine ; Nanotechnology ; Protein Conformation ; Structure-Activity Relationship ; Vaccination ; Vaccines, Synthetic/*immunology ; Vaccinology/methods ; Viral Proteins/chemical synthesis/chemistry/genetics/*immunology ; }, abstract = {Self-assembling virus-like particles represent highly attractive tools for developing next-generation vaccines and protein therapeutics. We created ADDomer, an adenovirus-derived multimeric protein-based self-assembling nanoparticle scaffold engineered to facilitate plug-and-play display of multiple immunogenic epitopes from pathogens. We used cryo-electron microscopy at near-atomic resolution and implemented novel, cost-effective, high-performance cloud computing to reveal architectural features in unprecedented detail. We analyzed ADDomer interaction with components of the immune system and developed a promising first-in-kind ADDomer-based vaccine candidate to combat emerging Chikungunya infectious disease, exemplifying the potential of our approach.}, } @article {pmid31615014, year = {2019}, author = {Deep, G and Mohana, R and Nayyar, A and Sanjeevikumar, P and Hossain, E}, title = {Authentication Protocol for Cloud Databases Using Blockchain Mechanism.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {20}, pages = {}, pmid = {31615014}, issn = {1424-8220}, abstract = {Cloud computing has made the software development process fast and flexible but on the other hand it has contributed to increasing security attacks. Employees who manage the data in cloud companies may face insider attack, affecting their reputation. They have the advantage of accessing the user data by interacting with the authentication mechanism. The primary aim of this research paper is to provide a novel secure authentication mechanism by using Blockchain technology for cloud databases. Blockchain makes it difficult to change user login credentials details in the user authentication process by an insider. The insider is not able to access the user authentication data due to the distributed ledger-based authentication scheme. Activity of insider can be traced and cannot be changed. Both insider and outsider user's are authenticated using individual IDs and signatures. Furthermore, the user access control on the cloud database is also authenticated. The algorithm and theorem of the proposed mechanism have been given to demonstrate the applicability and correctness.The proposed mechanism is tested on the Scyther formal system tool against denial of service, impersonation, offline guessing, and no replay attacks. Scyther results show that the proposed methodology is secure cum robust.}, } @article {pmid31592524, year = {2020}, author = {Dobbins, NJ and Spital, CH and Black, RA and Morrison, JM and de Veer, B and Zampino, E and Harrington, RD and Britt, BD and Stephens, KA and Wilcox, AB and Tarczy-Hornoch, P and Mooney, SD}, title = {Leaf: an open-source, model-agnostic, data-driven web application for cohort discovery and translational biomedical research.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {27}, number = {1}, pages = {109-118}, pmid = {31592524}, issn = {1527-974X}, support = {U24 TR002306/TR/NCATS NIH HHS/United States ; UL1 TR002319/TR/NCATS NIH HHS/United States ; }, mesh = {*Data Warehousing ; Databases as Topic ; Humans ; Information Storage and Retrieval/*methods ; Internet ; *Translational Research, Biomedical ; Unified Medical Language System ; *User-Computer Interface ; *Vocabulary, Controlled ; }, abstract = {OBJECTIVE: Academic medical centers and health systems are increasingly challenged with supporting appropriate secondary use of clinical data. Enterprise data warehouses have emerged as central resources for these data, but often require an informatician to extract meaningful information, limiting direct access by end users. To overcome this challenge, we have developed Leaf, a lightweight self-service web application for querying clinical data from heterogeneous data models and sources.

MATERIALS AND METHODS: Leaf utilizes a flexible biomedical concept system to define hierarchical concepts and ontologies. Each Leaf concept contains both textual representations and SQL query building blocks, exposed by a simple drag-and-drop user interface. Leaf generates abstract syntax trees which are compiled into dynamic SQL queries.

RESULTS: Leaf is a successful production-supported tool at the University of Washington, which hosts a central Leaf instance querying an enterprise data warehouse with over 300 active users. Through the support of UW Medicine (https://uwmedicine.org), the Institute of Translational Health Sciences (https://www.iths.org), and the National Center for Data to Health (https://ctsa.ncats.nih.gov/cd2h/), Leaf source code has been released into the public domain at https://github.com/uwrit/leaf.

DISCUSSION: Leaf allows the querying of single or multiple clinical databases simultaneously, even those of different data models. This enables fast installation without costly extraction or duplication.

CONCLUSIONS: Leaf differs from existing cohort discovery tools because it does not specify a required data model and is designed to seamlessly leverage existing user authentication systems and clinical databases in situ. We believe Leaf to be useful for health system analytics, clinical research data warehouses, precision medicine biobanks, and clinical studies involving large patient cohorts.}, } @article {pmid31574354, year = {2019}, author = {Nguyen, VD and Leoni, M and Dancheva, T and Jansson, J and Hoffman, J and Wassermann, D and Li, JR}, title = {Portable simulation framework for diffusion MRI.}, journal = {Journal of magnetic resonance (San Diego, Calif. : 1997)}, volume = {309}, number = {}, pages = {106611}, doi = {10.1016/j.jmr.2019.106611}, pmid = {31574354}, issn = {1096-0856}, abstract = {The numerical simulation of the diffusion MRI signal arising from complex tissue micro-structures is helpful for understanding and interpreting imaging data as well as for designing and optimizing MRI sequences. The discretization of the Bloch-Torrey equation by finite elements is a more recently developed approach for this purpose, in contrast to random walk simulations, which has a longer history. While finite element discretization is more difficult to implement than random walk simulations, the approach benefits from a long history of theoretical and numerical developments by the mathematical and engineering communities. In particular, software packages for the automated solutions of partial differential equations using finite element discretization, such as FEniCS, are undergoing active support and development. However, because diffusion MRI simulation is a relatively new application area, there is still a gap between the simulation needs of the MRI community and the available tools provided by finite element software packages. In this paper, we address two potential difficulties in using FEniCS for diffusion MRI simulation. First, we simplified software installation by the use of FEniCS containers that are completely portable across multiple platforms. Second, we provide a portable simulation framework based on Python and whose code is open source. This simulation framework can be seamlessly integrated with cloud computing resources such as Google Colaboratory notebooks working on a web browser or with Google Cloud Platform with MPI parallelization. We show examples illustrating the accuracy, the computational times, and parallel computing capabilities. The framework contributes to reproducible science and open-source software in computational diffusion MRI with the hope that it will help to speed up method developments and stimulate research collaborations.}, } @article {pmid31573287, year = {2019}, author = {Jiang, YF and Wei, K and Huang, L and Xu, K and Sun, QC and Zhang, YZ and Zhang, W and Li, H and You, L and Wang, Z and Lo, HK and Xu, F and Zhang, Q and Pan, JW}, title = {Remote Blind State Preparation with Weak Coherent Pulses in the Field.}, journal = {Physical review letters}, volume = {123}, number = {10}, pages = {100503}, doi = {10.1103/PhysRevLett.123.100503}, pmid = {31573287}, issn = {1079-7114}, abstract = {Quantum computing has seen tremendous progress in past years. Due to implementation complexity and cost, the future path of quantum computation is strongly believed to delegate computational tasks to powerful quantum servers on the cloud. Universal blind quantum computing (UBQC) provides the protocol for the secure delegation of arbitrary quantum computations, and it has received significant attention. However, a great challenge in UBQC is how to transmit a quantum state over a long distance securely and reliably. Here, we solve this challenge by proposing a resource-efficient remote blind qubit preparation (RBQP) protocol, with weak coherent pulses for the client to produce, using a compact and low-cost laser. We experimentally verify a key step of RBQP-quantum nondemolition measurement-in the field test over 100 km of fiber. Our experiment uses a quantum teleportation setup in the telecom wavelength and generates 1000 secure qubits with an average fidelity of (86.9±1.5)%, which exceeds the quantum no-cloning fidelity of equatorial qubit states. The results prove the feasibility of UBQC over long distances, and thus serves as a key milestone towards secure cloud quantum computing.}, } @article {pmid31573196, year = {2019}, author = {Rai, BK and Sresht, V and Yang, Q and Unwalla, R and Tu, M and Mathiowetz, AM and Bakken, GA}, title = {Comprehensive Assessment of Torsional Strain in Crystal Structures of Small Molecules and Protein-Ligand Complexes using ab Initio Calculations.}, journal = {Journal of chemical information and modeling}, volume = {59}, number = {10}, pages = {4195-4208}, doi = {10.1021/acs.jcim.9b00373}, pmid = {31573196}, issn = {1549-960X}, mesh = {Databases, Chemical ; *Drug Discovery ; Hydrogen Bonding ; Ligands ; Molecular Conformation ; Molecular Structure ; Proteins/*chemistry ; Rotation ; Small Molecule Libraries ; }, abstract = {The energetics of rotation around single bonds (torsions) is a key determinant of the three-dimensional shape that druglike molecules adopt in solution, the solid state, and in different biological environments, which in turn defines their unique physical and pharmacological properties. Therefore, accurate characterization of torsion angle preference and energetics is essential for the success of computational drug discovery and design. Here, we analyze torsional strain in crystal structures of druglike molecules in Cambridge structure database (CSD) and bioactive ligand conformations in protein data bank (PDB), expressing the total strain energy as a sum of strain energy from constituent rotatable bonds. We utilized cloud computing to generate torsion scan profiles of a very large collection of chemically diverse neutral fragments at DFT(B3LYP)/6-31G*//6-31G** or DFT(B3LYP)/6-31+G*//6-31+G** (for sulfur-containing molecule). With the data generated from these ab initio calculations, we performed rigorous analysis of strain due to deviation of observed torsion angles relative to their ideal gas-phase geometries. Contrary to the previous studies based on molecular mechanics, we find that in the crystalline state, molecules generally adopt low-strain conformations, with median per-torsion strain energy in CSD and PDB under one-tenth and one-third of a kcal/mol, respectively. However, for a small fraction (<5%) of motifs, external effects such as steric hindrance and hydrogen bonds result in strain penalty exceeding 2.5 kcal/mol. We find that due to poor quality of PDB structures in general, bioactive structures tend to have higher torsional strain compared to small-molecule crystal conformations. However, in the absence of structural fitting artifacts in PDB structures, protein-induced strain in bioactive conformations is quantitatively similar to those due to the packing forces in small-molecule crystal structures. This analysis allows us to establish strain energy thresholds to help identify biologically relevant conformers in a given ensemble. The work presented here is the most comprehensive study to date that demonstrates the utility and feasibility of gas-phase quantum mechanics (QM) calculations to study conformational preference and energetics of drug-size molecules. Potential applications of this study in computational lead discovery and structure-based design are discussed.}, } @article {pmid31569552, year = {2019}, author = {Fantacci, R and Nizzi, F and Pecorella, T and Pierucci, L and Roveri, M}, title = {False Data Detection for Fog and Internet of Things Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {19}, pages = {}, pmid = {31569552}, issn = {1424-8220}, support = {-Grant 2015YPXH4W\_004.//`Project GAUChO---A Green Adaptive Fog Computing and Networking Architecture'' funded by Progetti di Ricerca di Rilevante Interesse Nazionale (PRIN) Bando 2015/ ; }, abstract = {The Internet of Things (IoT) context brings new security issues due to billions of smart end-devices both interconnected in wireless networks and connected to the Internet by using different technologies. In this paper, we propose an attack-detection method, named Data Intrusion Detection System (DataIDS), based on real-time data analysis. As end devices are mainly resource constrained, Fog Computing (FC) is introduced to implement the DataIDS. FC increases storage, computation capabilities, and processing capabilities, allowing it to detect promptly an attack with respect to security solutions on the Cloud. This paper also considers an attack tree to model threats and vulnerabilities of Fog/IoT scenarios with heterogeneous devices and suggests countermeasure costs. We verify the performance of the proposed DataIDS, implementing a testbed with several devices that measure different physical quantities and by using standard data-gathering protocols.}, } @article {pmid31565209, year = {2019}, author = {Al-Issa, Y and Ottom, MA and Tamrawi, A}, title = {eHealth Cloud Security Challenges: A Survey.}, journal = {Journal of healthcare engineering}, volume = {2019}, number = {}, pages = {7516035}, pmid = {31565209}, issn = {2040-2309}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; *Confidentiality ; Data Collection ; Electronic Health Records ; Humans ; Information Storage and Retrieval ; Medical Informatics/*methods ; Privacy ; Reproducibility of Results ; Software ; Surveys and Questionnaires ; Telemedicine/*methods ; }, abstract = {Cloud computing is a promising technology that is expected to transform the healthcare industry. Cloud computing has many benefits like flexibility, cost and energy savings, resource sharing, and fast deployment. In this paper, we study the use of cloud computing in the healthcare industry and different cloud security and privacy challenges. The centralization of data on the cloud raises many security and privacy concerns for individuals and healthcare providers. This centralization of data (1) provides attackers with one-stop honey-pot to steal data and intercept data in-motion and (2) moves data ownership to the cloud service providers; therefore, the individuals and healthcare providers lose control over sensitive data. As a result, security, privacy, efficiency, and scalability concerns are hindering the wide adoption of the cloud technology. In this work, we found that the state-of-the art solutions address only a subset of those concerns. Thus, there is an immediate need for a holistic solution that balances all the contradicting requirements.}, } @article {pmid31554246, year = {2019}, author = {Zhao, L and Liu, Z and Mbachu, J}, title = {Development of Intelligent Prefabs Using IoT Technology to Improve the Performance of Prefabricated Construction Projects.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {19}, pages = {}, pmid = {31554246}, issn = {1424-8220}, support = {004000514119067//Beijing University of Technology/ ; 004000546319529//Beijing University of Technology/ ; 2018YFF0300300//National Basic Research Program of China (973 Program)/ ; }, abstract = {Prefabrication (PC) projects have many advantages, such as cost and energy savings and waste reduction. However, some problems still exist that hamper the development of prefabrication projects. To improve PC project performance and advance innovation in construction, this study introduces an innovative method that incorporates Radio Frequency Identification (RFID) and Long Range (LoRa) technologies, sensor networks, the BIM model and cloud computing to automatically collect, analyze and display real-time information about PC components. It can locate PC components on a construction site and monitor their structural performance during the installation process. RFID technology and strain sensors were used to collect the required data on a construction site. All the data was transmitted to a server using LoRa technology. Then, the cloud-based Building Information Modelling (BIM) model of the project was developed to store and vividly present project information and real-time onsite data. Moreover, the cloud-based BIM model enables project team members to access the project information from anywhere by using mobile devices. The proposed system was tested on a real PC project to validate its effectiveness. The results indicate that the sensor network can provide reliable data via LoRa technology, and a PC component can be accurately located on site. Also, the monitoring data of structural performance for the PC component during the installation process is acceptable. The proposed method using innovative technologies can improve PC project performance and help industry professionals by providing sufficient required information.}, } @article {pmid31547413, year = {2019}, author = {Pérez-Torres, R and Torres-Huitzil, C and Galeana-Zapién, H}, title = {A Spatio-Temporal Approach to Individual Mobility Modeling in On-Device Cognitive Computing Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {18}, pages = {}, pmid = {31547413}, issn = {1424-8220}, support = {237417//Consejo Nacional de Ciencia y Tecnología/ ; }, abstract = {The increased availability of GPS-enabled devices makes possible to collect location data for mining purposes and to develop mobility-based services (MBS). For most of the MBSs, determining interesting locations and frequent Points of Interest (POIs) is of paramount importance to study the semantic of places visited by an individual and the mobility patterns as a spatio-temporal phenomenon. In this paper, we propose a novel approach that uses mobility-based services for on-device and individual-centered mobility understanding. Unlike existing approaches that use crowd data for cloud-assisted POI extraction, the proposed solution autonomously detects POIs and mobility events to incrementally construct a cognitive map (spatio-temporal model) of individual mobility suitable to constrained mobile platforms. In particular, we focus on detecting POIs and enter-exits events as the key to derive statistical properties for characterizing the dynamics of an individual's mobility. We show that the proposed spatio-temporal map effectively extracts core features from the user-POI interaction that are relevant for analytics such as mobility prediction. We also demonstrate how the obtained spatio-temporal model can be exploited to assess the relevance of daily mobility routines. This novel cognitive and on-line mobility modeling contributes toward the distributed intelligence of IoT connected devices without strongly compromising energy.}, } @article {pmid31546907, year = {2019}, author = {Passian, A and Imam, N}, title = {Nanosystems, Edge Computing, and the Next Generation Computing Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {18}, pages = {}, pmid = {31546907}, issn = {1424-8220}, abstract = {It is widely recognized that nanoscience and nanotechnology and their subfields, such as nanophotonics, nanoelectronics, and nanomechanics, have had a tremendous impact on recent advances in sensing, imaging, and communication, with notable developments, including novel transistors and processor architectures. For example, in addition to being supremely fast, optical and photonic components and devices are capable of operating across multiple orders of magnitude length, power, and spectral scales, encompassing the range from macroscopic device sizes and kW energies to atomic domains and single-photon energies. The extreme versatility of the associated electromagnetic phenomena and applications, both classical and quantum, are therefore highly appealing to the rapidly evolving computing and communication realms, where innovations in both hardware and software are necessary to meet the growing speed and memory requirements. Development of all-optical components, photonic chips, interconnects, and processors will bring the speed of light, photon coherence properties, field confinement and enhancement, information-carrying capacity, and the broad spectrum of light into the high-performance computing, the internet of things, and industries related to cloud, fog, and recently edge computing. Conversely, owing to their extraordinary properties, 0D, 1D, and 2D materials are being explored as a physical basis for the next generation of logic components and processors. Carbon nanotubes, for example, have been recently used to create a new processor beyond proof of principle. These developments, in conjunction with neuromorphic and quantum computing, are envisioned to maintain the growth of computing power beyond the projected plateau for silicon technology. We survey the qualitative figures of merit of technologies of current interest for the next generation computing with an emphasis on edge computing.}, } @article {pmid31532858, year = {2020}, author = {Livingstone, D and Chau, J}, title = {Otoscopic diagnosis using computer vision: An automated machine learning approach.}, journal = {The Laryngoscope}, volume = {130}, number = {6}, pages = {1408-1413}, doi = {10.1002/lary.28292}, pmid = {31532858}, issn = {1531-4995}, mesh = {Algorithms ; Artificial Intelligence ; Diagnosis, Computer-Assisted/*methods ; Ear Diseases/*diagnosis ; Humans ; *Machine Learning ; Otolaryngology/methods/*statistics & numerical data ; Otoscopy/methods/*statistics & numerical data ; }, abstract = {OBJECTIVE: Access to otolaryngology is limited by lengthy wait lists and lack of specialists, especially in rural and remote areas. The objective of this study was to use an automated machine learning approach to build a computer vision algorithm for otoscopic diagnosis capable of greater accuracy than trained physicians. This algorithm could be used by primary care providers to facilitate timely referral, triage, and effective treatment.

METHODS: Otoscopic images were obtained from Google Images (Google Inc., Mountain View, CA), from open access repositories, and within otolaryngology clinics associated with our institution. After preprocessing, 1,366 unique images were uploaded to the Google Cloud Vision AutoML platform (Google Inc.) and annotated with one or more of 14 otologic diagnoses. A consensus set of labels for each otoscopic image was attained, and a multilabel classifier architecture algorithm was trained. The performance of the algorithm on an 89-image test set was compared to the performance of physicians from pediatrics, emergency medicine, otolaryngology, and family medicine.

RESULTS: For all diagnoses combined, the average precision (positive predictive value) of the algorithm was 90.9%, and the average recall (sensitivity) was 86.1%. The algorithm made 79 correct diagnoses with an accuracy of 88.7%. The average physician accuracy was 58.9%.

CONCLUSION: We have created a computer vision algorithm using automated machine learning that on average rivals the accuracy of the physicians we tested. Fourteen different otologic diagnoses were analyzed. The field of medicine will be changed dramatically by artificial intelligence within the next few decades, and physicians of all specialties must be prepared to guide that process.

LEVEL OF EVIDENCE: NA Laryngoscope, 130:1408-1413, 2020.}, } @article {pmid31531222, year = {2019}, author = {Jegadeesan, S and Dhamodaran, M and Azees, M and Shanmugapriya, SS}, title = {Computationally efficient mutual authentication protocol for remote infant incubator monitoring system.}, journal = {Healthcare technology letters}, volume = {6}, number = {4}, pages = {92-97}, pmid = {31531222}, issn = {2053-3713}, abstract = {Internet of Things (IoT), cloud computing and wireless medical sensor networks have significantly improved remote healthcare monitoring. In a healthcare monitoring system, many resource-limited sensors are deployed to sense, process and communicate the information. However, continuous and accurate operations of these devices are very important, especially in the infant incubator monitoring system. Because important decisions are made on the received information. Therefore, it is necessary to ensure the authenticity between the incubator monitoring system and doctors. In this work, a public key encryption based computationally efficient mutual authentication protocol is proposed for secure data transmission between incubator monitoring systems and doctors or administrators. The proposed protocol improves performance and reduces the computational cost without compromising the security. The security analysis part shows the strength of the proposed protocol against various attacks, performance analysis part shows that the proposed protocol performs better than other existing protocol based on Rivest-Shamir-Adleman and elliptic-curve cryptography schemes.}, } @article {pmid31527408, year = {2019}, author = {Connor, R and Brister, R and Buchmann, JP and Deboutte, W and Edwards, R and Martí-Carreras, J and Tisza, M and Zalunin, V and Andrade-Martínez, J and Cantu, A and D'Amour, M and Efremov, A and Fleischmann, L and Forero-Junco, L and Garmaeva, S and Giluso, M and Glickman, C and Henderson, M and Kellman, B and Kristensen, D and Leubsdorf, C and Levi, K and Levi, S and Pakala, S and Peddu, V and Ponsero, A and Ribeiro, E and Roy, F and Rutter, L and Saha, S and Shakya, M and Shean, R and Miller, M and Tully, B and Turkington, C and Youens-Clark, K and Vanmechelen, B and Busby, B}, title = {NCBI's Virus Discovery Hackathon: Engaging Research Communities to Identify Cloud Infrastructure Requirements.}, journal = {Genes}, volume = {10}, number = {9}, pages = {}, pmid = {31527408}, issn = {2073-4425}, support = {R35 CA220523/CA/NCI NIH HHS/United States ; }, mesh = {Big Data ; Cloud Computing/*standards ; Genome, Human ; *Genome, Viral ; Humans ; *Metagenome ; Metagenomics/*methods/standards ; Software ; }, abstract = {A wealth of viral data sits untapped in publicly available metagenomic data sets when it might be extracted to create a usable index for the virological research community. We hypothesized that work of this complexity and scale could be done in a hackathon setting. Ten teams comprised of over 40 participants from six countries, assembled to create a crowd-sourced set of analysis and processing pipelines for a complex biological data set in a three-day event on the San Diego State University campus starting 9 January 2019. Prior to the hackathon, 141,676 metagenomic data sets from the National Center for Biotechnology Information (NCBI) Sequence Read Archive (SRA) were pre-assembled into contiguous assemblies (contigs) by NCBI staff. During the hackathon, a subset consisting of 2953 SRA data sets (approximately 55 million contigs) was selected, which were further filtered for a minimal length of 1 kb. This resulted in 4.2 million (Mio) contigs, which were aligned using BLAST against all known virus genomes, phylogenetically clustered and assigned metadata. Out of the 4.2 Mio contigs, 360,000 contigs were labeled with domains and an additional subset containing 4400 contigs was screened for virus or virus-like genes. The work yielded valuable insights into both SRA data and the cloud infrastructure required to support such efforts, revealing analysis bottlenecks and possible workarounds thereof. Mainly: (i) Conservative assemblies of SRA data improves initial analysis steps; (ii) existing bioinformatic software with weak multithreading/multicore support can be elevated by wrapper scripts to use all cores within a computing node; (iii) redesigning existing bioinformatic algorithms for a cloud infrastructure to facilitate its use for a wider audience; and (iv) a cloud infrastructure allows a diverse group of researchers to collaborate effectively. The scientific findings will be extended during a follow-up event. Here, we present the applied workflows, initial results, and lessons learned from the hackathon.}, } @article {pmid31522294, year = {2019}, author = {Mendez, KM and Pritchard, L and Reinke, SN and Broadhurst, DI}, title = {Toward collaborative open data science in metabolomics using Jupyter Notebooks and cloud computing.}, journal = {Metabolomics : Official journal of the Metabolomic Society}, volume = {15}, number = {10}, pages = {125}, pmid = {31522294}, issn = {1573-3890}, mesh = {Animals ; *Cloud Computing ; *Data Science ; Humans ; *Metabolomics ; *Software ; }, abstract = {BACKGROUND: A lack of transparency and reporting standards in the scientific community has led to increasing and widespread concerns relating to reproduction and integrity of results. As an omics science, which generates vast amounts of data and relies heavily on data science for deriving biological meaning, metabolomics is highly vulnerable to irreproducibility. The metabolomics community has made substantial efforts to align with FAIR data standards by promoting open data formats, data repositories, online spectral libraries, and metabolite databases. Open data analysis platforms also exist; however, they tend to be inflexible and rely on the user to adequately report their methods and results. To enable FAIR data science in metabolomics, methods and results need to be transparently disseminated in a manner that is rapid, reusable, and fully integrated with the published work. To ensure broad use within the community such a framework also needs to be inclusive and intuitive for both computational novices and experts alike.

AIM OF REVIEW: To encourage metabolomics researchers from all backgrounds to take control of their own data science, mould it to their personal requirements, and enthusiastically share resources through open science.

This tutorial introduces the concept of interactive web-based computational laboratory notebooks. The reader is guided through a set of experiential tutorials specifically targeted at metabolomics researchers, based around the Jupyter Notebook web application, GitHub data repository, and Binder cloud computing platform.}, } @article {pmid31522286, year = {2019}, author = {Liu, X and Ma, W and Cao, H}, title = {NPMA: A Novel Privacy-Preserving Mutual Authentication in TMIS for Mobile Edge-Cloud Architecture.}, journal = {Journal of medical systems}, volume = {43}, number = {10}, pages = {318}, pmid = {31522286}, issn = {1573-689X}, support = {2017YFB0802400//National Key R&D Program of China/ ; 5001-20109195456//the Fundamental Research Funds for the Central Universities and the Innovation Fund of Xidian University/ ; 61373171//National Science Foundation of China under grant/ ; B08038//The 111 Project/ ; gxyqZD2019060//the Program for Excellent Young Talents in University of Anhui Province under Grant/ ; }, mesh = {Cloud Computing/*standards ; *Computer Security ; Confidentiality/*standards ; Humans ; Telemedicine/*organization & administration/standards ; }, abstract = {Mobile Edge-Cloud Network is a new network structure after fog-cloud computing, where service and data computing are scattered in the most logical, nearby and efficient place. It provides better services than fog-cloud computing with better performance in reasonably low cost way and allows users to eliminate numerous limitations inherent in fog-cloud computing, although it inherits those security-privacy issues from fog-cloud computing. A novel privacy-preserving mutual authentication in TMIS for mobile Edge-Cloud architecture (abbreviated to NPMA) is constructed in this paper. NPMA scheme not only mitigates some weaknesses of fog-cloud computing, but has other advantages. First, NPMA scheme supports patients(edge-servers) anonymity and forward-backward untraceability (traceability, when needed), since their identities are hidden in two distinct dynamic anonyms and a static one and only the trusted center can recover their real identities, when needed. Second, each edge-server shares a secret value, which realizes authentication with extremely low computional cost in authentication phase. Finally, NPMA scheme is proven safely against passive and active attacks under elliptic curve computable Diffie-Hellman problem (ECDHP) assumption in random oracle model. Hence, it achieves the required security properties and outperforms prior approaches in terms of energy and computational costs.}, } @article {pmid31510508, year = {2019}, author = {Kim, Y and Sim, M and Moon, I}, title = {Secure storage and retrieval schemes for multiple encrypted digital holograms with orthogonal phase encoding multiplexing.}, journal = {Optics express}, volume = {27}, number = {16}, pages = {22147-22160}, doi = {10.1364/OE.27.022147}, pmid = {31510508}, issn = {1094-4087}, abstract = {Recent developments in 3D computational optical imaging such as digital holographic microscopy has ushered in a new era for biological research. Therefore, efficient and secure storage and retrieval of digital holograms is a challenging task for future cloud computing services. In this study, we propose a novel scheme to securely store and retrieve multiple encrypted digital holograms by using phase encoding multiplexing. In the proposed schemes, an encrypted hologram can only be accessed using a binary phase mask, which is the key to retrieve the image. In addition, it is possible to independently store, retrieve, and manage the encrypted digital holograms without affecting other groups of the encrypted holograms multiplexed using different sets of binary phase masks, due to the orthogonality properties of the Hadamard matrices with high autocorrelation and low cross-correlation. The desired encrypted holograms may also be searched for, removed, and added independently of other groups of the encrypted holograms. More and more 3D images or digital holograms can be securely and efficiently stored, retrieved, and managed.}, } @article {pmid31509915, year = {2019}, author = {Kibiwott, KP and Zhao, YN and Kogo, J and Zhang, FL}, title = {Verifiable fully outsourced attribute-based signcryption system for IoT eHealth big data in cloud computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {16}, number = {5}, pages = {3561-3594}, doi = {10.3934/mbe.2019178}, pmid = {31509915}, issn = {1551-0018}, mesh = {Algorithms ; Big Data ; *Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; *Internet of Things ; Medical Informatics/*instrumentation/methods ; Models, Theoretical ; *Outsourced Services ; Privacy ; Reproducibility of Results ; Software ; Telemedicine/*instrumentation/methods ; }, abstract = {The entrance of Internet of Things (IoT) technologies to healthcare industry has impacted the explosion of eHealth big data. Cloud computing is widely considered to be the promising solution to store this data because of the presence of abundant resources at a lower cost. However, the privacy and security of the IoT generated data cannot be ensured as the data is kept far from the owner's phys- ical domain. In order to resolve the underlined issues, a reassuring solution is to adopt attribute-based signcryption (ABSC) due to the desirable cryptographic properties it holds including fine-grained ac- cess control, authentication, confidentiality and data owner privacy. Nonetheless, executing expensive computation such as pairing and modular exponential operations in resource-constrained IoT device platform can be too taxing and demanding. To address the challenges stated above, we proposed in this paper, a more efficient scheme where computation power is borrowed from the cloud server to process expensive computations while leaving simple operations to local users. In order to realize this, trusted attribute authority, signcryptor and designcryptor outsources to the cloud expensive tasks for key gener- ation, signcryption and designcryption respectively. Moreover, validity and correctness of outsourced computations can be verified by employing outsourcing verification server. Security analysis, compar- isons evaluation and simulation of the proposed scheme is presented. The output demonstrates that it is efficient, secure and therefore suitable for application in resource-constrained IoT devices.}, } @article {pmid31506093, year = {2019}, author = {Ellrott, K and Buchanan, A and Creason, A and Mason, M and Schaffter, T and Hoff, B and Eddy, J and Chilton, JM and Yu, T and Stuart, JM and Saez-Rodriguez, J and Stolovitzky, G and Boutros, PC and Guinney, J}, title = {Reproducible biomedical benchmarking in the cloud: lessons from crowd-sourced data challenges.}, journal = {Genome biology}, volume = {20}, number = {1}, pages = {195}, pmid = {31506093}, issn = {1474-760X}, support = {5U24CA209923/CA/NCI NIH HHS/United States ; R01 GM109031/GM/NIGMS NIH HHS/United States ; P30CA016042/CA/NCI NIH HHS/United States ; R01CA180778/CA/NCI NIH HHS/United States ; U24 CA210990/CA/NCI NIH HHS/United States ; }, mesh = {*Algorithms ; Benchmarking ; Information Dissemination ; Models, Biological ; Reproducibility of Results ; }, abstract = {Challenges are achieving broad acceptance for addressing many biomedical questions and enabling tool assessment. But ensuring that the methods evaluated are reproducible and reusable is complicated by the diversity of software architectures, input and output file formats, and computing environments. To mitigate these problems, some challenges have leveraged new virtualization and compute methods, requiring participants to submit cloud-ready software packages. We review recent data challenges with innovative approaches to model reproducibility and data sharing, and outline key lessons for improving quantitative biomedical data analysis through crowd-sourced benchmarking challenges.}, } @article {pmid31499750, year = {2019}, author = {Cao, PC and Liu, WW and Liu, GJ and Zhai, JT and Ji, XP and Dai, YW}, title = {Steganographic coding scheme based on dither convolutional trellis under resampling mechanism.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {16}, number = {5}, pages = {6015-6033}, doi = {10.3934/mbe.2019301}, pmid = {31499750}, issn = {1551-0018}, abstract = {With the rapid development of mobile internet and cloud computing, numerous digital me-dia files in mobile social networking and media sharing software have become the important carriers of steganography. However, these digital media files may be resampled by the media server when being pushed to the intelligent mobile terminals. The resampling of digital media files is a transfor-mation which enlarges or shrinks objects by a scale factor that is the same in all dimensions. In order to reduce embedding distortion while ensuring the correct extraction of secret messages under resam-pling mechanism, a steganographic coding scheme based on dither convolutional trellis is proposed in this paper. The resampling mapping is estimated with finite sample pairs. The resampling stego media files with secret messages embedded are generated from the estimated resampling cover media files by syndrome-trellis codes (STCs). According to the estimated resampling mapping, the dither convolutional trellis for one dimensional resampling is constructed to generate the source stego me-dia files from source cover media files and resampling stego media files. The steganographic coding scheme is also extended to the circumstance of two dimensional resampling such as image scaling. The experimental results show that the proposed steganographic scheme can achieve less embedding dis-tortion while ensuring the accuracy of secret messages extraction under multi-dimensional resampling mechanism.}, } @article {pmid31495995, year = {2020}, author = {Ford, AS and Weitzner, BD and Bahl, CD}, title = {Integration of the Rosetta suite with the python software stack via reproducible packaging and core programming interfaces for distributed simulation.}, journal = {Protein science : a publication of the Protein Society}, volume = {29}, number = {1}, pages = {43-51}, pmid = {31495995}, issn = {1469-896X}, mesh = {Cloud Computing ; Computational Biology/*methods ; Models, Molecular ; Proteins/*chemistry ; Software ; User-Computer Interface ; }, abstract = {The Rosetta software suite for macromolecular modeling is a powerful computational toolbox for protein design, structure prediction, and protein structure analysis. The development of novel Rosetta-based scientific tools requires two orthogonal skill sets: deep domain-specific expertise in protein biochemistry and technical expertise in development, deployment, and analysis of molecular simulations. Furthermore, the computational demands of molecular simulation necessitate large scale cluster-based or distributed solutions for nearly all scientifically relevant tasks. To reduce the technical barriers to entry for new development, we integrated Rosetta with modern, widely adopted computational infrastructure. This allows simplified deployment in large-scale cluster and cloud computing environments, and effective reuse of common libraries for simulation execution and data analysis. To achieve this, we integrated Rosetta with the Conda package manager; this simplifies installation into existing computational environments and packaging as docker images for cloud deployment. Then, we developed programming interfaces to integrate Rosetta with the PyData stack for analysis and distributed computing, including the popular tools Jupyter, Pandas, and Dask. We demonstrate the utility of these components by generating a library of a thousand de novo disulfide-rich miniproteins in a hybrid simulation that included cluster-based design and interactive notebook-based analyses. Our new tools enable users, who would otherwise not have access to the necessary computational infrastructure, to perform state-of-the-art molecular simulation and design with Rosetta.}, } @article {pmid31489937, year = {2019}, author = {Patra, S and Manzoni, P and T Calafate, C and Zamora, W and Cano, JC}, title = {Leveraging a Publish/Subscribe Fog System to Provide Collision Warnings in Vehicular Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {18}, pages = {}, pmid = {31489937}, issn = {1424-8220}, support = {RTI2018-096384-B-I00//Ministerio de Ciencia, Innovación y Universidades/ ; }, abstract = {Fog computing, an extension of the Cloud Computing paradigm where routers themselves may provide the virtualisation infrastructure, aims at achieving fluidity when distributing in-network functions, in addition to allowing fast and scalable processing, and exchange of information. In this paper we present a fog computing architecture based on a "content island" which interconnects sets of "things" to exchange and process data among themselves or with other content islands. We then present a use case that focuses on a smartphone-based forward collision warning application for a connected vehicle scenario. This application makes use of the optical sensor of smartphones to estimate the distance between the device itself and other vehicles in its field of view. The vehicle travelling directly ahead is identified relying on the information from the GPS, camera, and inter-island communication. Warnings are generated at both content islands, if the driver does not maintain a predefined safe distance towards the vehicle ahead. Experiments performed with the application show that with the developed method, we are able to estimate the distance between vehicles, and the inter-island communication has a very low overhead, resulting in improved performance. On comparing our proposed solution based on edge/fog computing with a cloud-based api, it was observed that our solution outperformed the cloud-based api, thus making us optimistic of the utility of the proposed architecture.}, } @article {pmid31489843, year = {2019}, author = {Loncar-Turukalo, T and Zdravevski, E and Machado da Silva, J and Chouvarda, I and Trajkovik, V}, title = {Literature on Wearable Technology for Connected Health: Scoping Review of Research Trends, Advances, and Barriers.}, journal = {Journal of medical Internet research}, volume = {21}, number = {9}, pages = {e14017}, pmid = {31489843}, issn = {1438-8871}, mesh = {Humans ; Remote Sensing Technology/*methods ; Technology ; Telemedicine/*standards ; Wearable Electronic Devices/*standards ; }, abstract = {BACKGROUND: Wearable sensing and information and communication technologies are key enablers driving the transformation of health care delivery toward a new model of connected health (CH) care. The advances in wearable technologies in the last decade are evidenced in a plethora of original articles, patent documentation, and focused systematic reviews. Although technological innovations continuously respond to emerging challenges and technology availability further supports the evolution of CH solutions, the widespread adoption of wearables remains hindered.

OBJECTIVE: This study aimed to scope the scientific literature in the field of pervasive wearable health monitoring in the time interval from January 2010 to February 2019 with respect to four important pillars: technology, safety and security, prescriptive insight, and user-related concerns. The purpose of this study was multifold: identification of (1) trends and milestones that have driven research in wearable technology in the last decade, (2) concerns and barriers from technology and user perspective, and (3) trends in the research literature addressing these issues.

METHODS: This study followed the scoping review methodology to identify and process the available literature. As the scope surpasses the possibilities of manual search, we relied on the natural language processing tool kit to ensure an efficient and exhaustive search of the literature corpus in three large digital libraries: Institute of Electrical and Electronics Engineers, PubMed, and Springer. The search was based on the keywords and properties to be found in articles using the search engines of the digital libraries.

RESULTS: The annual number of publications in all segments of research on wearable technology shows an increasing trend from 2010 to February 2019. The technology-related topics dominated in the number of contributions, followed by research on information delivery, safety, and security, whereas user-related concerns were the topic least addressed. The literature corpus evidences milestones in sensor technology (miniaturization and placement), communication architectures and fifth generation (5G) cellular network technology, data analytics, and evolution of cloud and edge computing architectures. The research lag in battery technology makes energy efficiency a relevant consideration in the design of both sensors and network architectures with computational offloading. The most addressed user-related concerns were (technology) acceptance and privacy, whereas research gaps indicate that more efforts should be invested into formalizing clear use cases with timely and valuable feedback and prescriptive recommendations.

CONCLUSIONS: This study confirms that applications of wearable technology in the CH domain are becoming mature and established as a scientific domain. The current research should bring progress to sustainable delivery of valuable recommendations, enforcement of privacy by design, energy-efficient pervasive sensing, seamless monitoring, and low-latency 5G communications. To complement technology achievements, future work involving all stakeholders providing research evidence on improved care pathways and cost-effectiveness of the CH model is needed.}, } @article {pmid31487947, year = {2019}, author = {Li, L and Guo, M and Ma, L and Mao, H and Guan, Q}, title = {Online Workload Allocation via Fog-Fog-Cloud Cooperation to Reduce IoT Task Service Delay.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {18}, pages = {}, pmid = {31487947}, issn = {1424-8220}, support = {61671208//National Natural Science Foundation of China/ ; }, abstract = {Fog computing has recently emerged as an extension of cloud computing in providing high-performance computing services for delay-sensitive Internet of Things (IoT) applications. By offloading tasks to a geographically proximal fog computing server instead of a remote cloud, the delay performance can be greatly improved. However, some IoT applications may still experience considerable delays, including queuing and computation delays, when huge amounts of tasks instantaneously feed into a resource-limited fog node. Accordingly, the cooperation among geographically close fog nodes and the cloud center is desired in fog computing with the ever-increasing computational demands from IoT applications. This paper investigates a workload allocation scheme in an IoT-fog-cloud cooperation system for reducing task service delay, aiming at satisfying as many as possible delay-sensitive IoT applications' quality of service (QoS) requirements. To this end, we first formulate the workload allocation problem in an IoT-edge-cloud cooperation system, which suggests optimal workload allocation among local fog node, neighboring fog node, and the cloud center to minimize task service delay. Then, the stability of the IoT-fog-cloud queueing system is theoretically analyzed with Lyapunov drift plus penalty theory. Based on the analytical results, we propose a delay-aware online workload allocation and scheduling (DAOWA) algorithm to achieve the goal of reducing long-term average task serve delay. Theoretical analysis and simulations have been conducted to demonstrate the efficiency of the proposal in task serve delay reduction and IoT-fog-cloud queueing system stability.}, } @article {pmid31484551, year = {2019}, author = {Chien, TW and Lee, YL and Wang, HY}, title = {Detecting hospital behaviors of up-coding on DRGs using Rasch model of continuous variables and online cloud computing in Taiwan.}, journal = {BMC health services research}, volume = {19}, number = {1}, pages = {630}, pmid = {31484551}, issn = {1472-6963}, mesh = {*Cloud Computing ; *Diagnosis-Related Groups/economics ; Fees, Medical ; Hospitals ; Humans ; Insurance, Health, Reimbursement/*statistics & numerical data ; National Health Programs/economics/*statistics & numerical data ; Patient Discharge/*statistics & numerical data ; Taiwan ; }, abstract = {BACKGROUND: This work aims to apply data-detection algorithms to predict the possible deductions of reimbursement from Taiwan's Bureau of National Health Insurance (BNHI), and to design an online dashboard to send alerts and reminders to physicians after completing their patient discharge summaries.

METHODS: Reimbursement data for discharged patients were extracted from a Taiwan medical center in 2016. Using the Rasch model of continuous variables, we applied standardized residual analyses to 20 sets of norm-referenced diagnosis-related group (DRGs), each with 300 cases, and compared these to 194 cases with deducted records from the BNHI. We then examine whether the results of prediction using the Rasch model have a high probability associated with the deducted cases. Furthermore, an online dashboard was designed for use in the online monitoring of possible deductions on fee items in medical settings.

RESULTS: The results show that 1) the effects deducted by the NHRI can be predicted with an accuracy rate of 0.82 using the standardized residual approach of the Rasch model; 2) the accuracies for drug, medical material and examination fees are not associated among different years, and all of those areas under the ROC curve (AUC) are significantly greater than the randomized probability of 0.50; and 3) the online dashboard showing the possible deductions on fee items can be used by hospitals in the future.

CONCLUSION: The DRG-based comparisons in the possible deductions on medical fees, along with the algorithm based on Rasch modeling, can be a complementary tool in upgrading the efficiency and accuracy in processing medical fee applications in the discernable future.}, } @article {pmid31482150, year = {2019}, author = {Park, SY and Nanda, S and Faraci, G and Park, Y and Lee, HY}, title = {CCMP: software-as-a-service approach for fully-automated microbiome profiling.}, journal = {Journal of biomedical informatics: X}, volume = {2}, number = {}, pages = {}, pmid = {31482150}, issn = {2590-177X}, support = {R01 AI083115/AI/NIAID NIH HHS/United States ; R01 AI095066/AI/NIAID NIH HHS/United States ; }, abstract = {Microbiome profiling holds great promise for the development of novel disease biomarkers and therapeutics. Next-generation sequencing is currently the preferred method for microbiome data collection and multiple standardized tools, packages, and pipelines have been developed for the purpose of raw data processing and microbial annotation. However, these currently available pipelines come with entry-level barriers such as high-performance hardware, software installation, and sequential command-line scripting that often deter end-users. We thus created Cloud Computing for Microbiome Profiling (CCMP, https://ccmp.usc.edu), a public cloud-based web tool which combines the analytical power of current microbiome analysis platforms with a user-friendly interface. CCMP is a free-of-charge software-as-a-service (SaaS) that simplifies user experience by enabling users to complete their analysis in a single step, uploading raw sequencing data files. Once users upload 16S ribosomal RNA gene sequence data, our pipeline performs taxonomic annotation, abundance profiling, and statistical tests to report microbiota signatures altered by diseases or experimental conditions. CCMP took a 125 gigabyte (GB) input of 16S ribosomal RNA gene sequence data from 1052 specimens in FASTQ format and reported figures and tables of taxonomic annotations, statistical tests, α and β diversity calculations, and principal coordinate analyses within 21 hours. CCMP is the first fully-automated web interface that integrates three key solutions for large-scale data analysis: cloud computing, fast file transfer technology, and microbiome analysis tools. As a reliable platform that supplies consistent microbiome analysis, CCMP will advance microbiome research by making effortful bioinformatics easily accessible to public.}, } @article {pmid31480772, year = {2019}, author = {Fischer, GS and Righi, RDR and Costa, CAD and Galante, G and Griebler, D}, title = {Towards Evaluating Proactive and Reactive Approaches on Reorganizing Human Resources in IoT-Based Smart Hospitals.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {17}, pages = {}, pmid = {31480772}, issn = {1424-8220}, support = {990398//Coordenação de Aperfeiçoamento de Pessoal de Nível Superior/ ; }, mesh = {Cloud Computing ; Delivery of Health Care/methods ; *Hospitals ; Humans ; Monitoring, Physiologic ; }, abstract = {Hospitals play an important role on ensuring a proper treatment of human health. One of the problems to be faced is the increasingly overcrowded patients care queues, who end up waiting for longer times without proper treatment to their health problems. The allocation of health professionals in hospital environments is not able to adapt to the demands of patients. There are times when underused rooms have idle professionals, and overused rooms have fewer professionals than necessary. Previous works have not solved this problem since they focus on understanding the evolution of doctor supply and patient demand, as to better adjust one to the other. However, they have not proposed concrete solutions for that regarding techniques for better allocating available human resources. Moreover, elasticity is one of the most important features of cloud computing, referring to the ability to add or remove resources according to the needs of the application or service. Based on this background, we introduce Elastic allocation of human resources in Healthcare environments (ElHealth) an IoT-focused model able to monitor patient usage of hospital rooms and adapt these rooms for patients demand. Using reactive and proactive elasticity approaches, ElHealth identifies when a room will have a demand that exceeds the capacity of care, and proposes actions to move human resources to adapt to patient demand. Our main contribution is the definition of Human Resources IoT-based Elasticity (i.e., an extension of the concept of resource elasticity in Cloud Computing to manage the use of human resources in a healthcare environment, where health professionals are allocated and deallocated according to patient demand). Another contribution is a cost-benefit analysis for the use of reactive and predictive strategies on human resources reorganization. ElHealth was simulated on a hospital environment using data from a Brazilian polyclinic, and obtained promising results, decreasing the waiting time by up to 96.4% and 96.73% in reactive and proactive approaches, respectively.}, } @article {pmid31450080, year = {2019}, author = {Gu, D and Li, T and Wang, X and Yang, X and Yu, Z}, title = {Visualizing the intellectual structure and evolution of electronic health and telemedicine research.}, journal = {International journal of medical informatics}, volume = {130}, number = {}, pages = {103947}, doi = {10.1016/j.ijmedinf.2019.08.007}, pmid = {31450080}, issn = {1872-8243}, mesh = {Artificial Intelligence ; *Bibliometrics ; Biomedical Research/*trends ; Cloud Computing ; Data Mining/*methods ; *Databases, Factual ; Electronic Health Records/*statistics & numerical data ; Europe ; Humans ; Medical Informatics/*trends ; Telemedicine/*trends ; }, abstract = {BACKGROUND: In recent years, the development and application of emerging information technologies, such as artificial intelligence, cloud computing, Internet of Things, and wearable devices, has expanded the content of electronic health (e-health). Electronic health has become a research focus, but few studies have explored its knowledge structure from a global perspective.

METHODS: To detect the evolution track, knowledge base and research hotspots of e-health, we conducted a series of bibliometric analyses on the retrieved 3,085 papers from the Web of Science core database in 1992-2017. We used several bibliometric tools, such as HistCite, CiteSpace, NetDraw, and NEViewer, to describe the evolution process, time-and-space knowledge map, and hotspots in e-health.

RESULTS: The research results are as follows. (a) The number of publications has been obviously increasing after 2005 and according to the trend line it is expected to continue increase exponentially in the future. (b) Countries/regions conducting e-health research have close cooperative relationship, among which European countries have the closest cooperation. (c) Electronic health records, mobile health and health information technology are research hotspots in electronic health. Moreover, scholars also pay attention to the hot issues such as privacy, security, and quality improvement.

CONCLUSIONS: Electronic health is a large and growing field with quite a number of research articles in medical journals. This study provides a comprehensive knowledge structure of electronic health for scholars in the healthcare informatics field, which can help them quickly grasp research hotspots and choose future research projects.}, } @article {pmid31442994, year = {2020}, author = {Xu, K and Wang, Y and Yang, L and Wang, Y and Qiao, B and Qin, S and Xu, Y and Zhang, H and Qu, H}, title = {CloudDet: Interactive Visual Analysis of Anomalous Performances in Cloud Computing Systems.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {26}, number = {1}, pages = {1107-1117}, doi = {10.1109/TVCG.2019.2934613}, pmid = {31442994}, issn = {1941-0506}, abstract = {Detecting and analyzing potential anomalous performances in cloud computing systems is essential for avoiding losses to customers and ensuring the efficient operation of the systems. To this end, a variety of automated techniques have been developed to identify anomalies in cloud computing. These techniques are usually adopted to track the performance metrics of the system (e.g., CPU, memory, and disk I/O), represented by a multivariate time series. However, given the complex characteristics of cloud computing data, the effectiveness of these automated methods is affected. Thus, substantial human judgment on the automated analysis results is required for anomaly interpretation. In this paper, we present a unified visual analytics system named CloudDet to interactively detect, inspect, and diagnose anomalies in cloud computing systems. A novel unsupervised anomaly detection algorithm is developed to identify anomalies based on the specific temporal patterns of the given metrics data (e.g., the periodic pattern). Rich visualization and interaction designs are used to help understand the anomalies in the spatial and temporal context. We demonstrate the effectiveness of CloudDet through a quantitative evaluation, two case studies with real-world data, and interviews with domain experts.}, } @article {pmid31438303, year = {2019}, author = {Lin, HL and Cheng, SM and Hsu, DF and Huang, CC and Wu, DC}, title = {Information System Implementation Optimizes Medical Coding.}, journal = {Studies in health technology and informatics}, volume = {264}, number = {}, pages = {1706-1707}, doi = {10.3233/SHTI190607}, pmid = {31438303}, issn = {1879-8365}, mesh = {*Clinical Coding ; Diagnosis-Related Groups ; *International Classification of Diseases ; Specialization ; Taiwan ; }, abstract = {Diagnosis Related Groups (DRGs) and the Tenth Revision of the International Statistical Classification of Disease and Related Health Problems (ICD-10) were implemented to Taiwan in 2010 and 2016 respectively. New rules related to the medical costs reimbursement were great challenges facing medical institutions. One of the medical centers in north Taiwan introduced an ICD e-dictionary, DRGs cloud computing system, and integrated them into the hospital information system. Further, developing a medical coder specialization work model optimized the workflow, coding quality, and efficiency, which defeated the adverse effects of DRGs and ICD-10 implementation successfully.}, } @article {pmid31438074, year = {2019}, author = {Madanian, S and Parry, D}, title = {IoT, Cloud Computing and Big Data: Integrated Framework for Healthcare in Disasters.}, journal = {Studies in health technology and informatics}, volume = {264}, number = {}, pages = {998-1002}, doi = {10.3233/SHTI190374}, pmid = {31438074}, issn = {1879-8365}, mesh = {Cloud Computing ; Delivery of Health Care ; *Disaster Planning ; *Disasters ; Internet ; }, abstract = {Currently, healthcare in disaster management context faces a number of challenges mostly due to the lack of availability of reliable data from diverse sources required to be accessible by appropriate authorities. Therefore, the main objective of this study is the introduction of a framework based on the integration of three technologies, Internet of Things (IoT), cloud computing and big data to solve this issue in all disaster phases and provide precise and effective healthcare. This framework supports healthcare managers by enabling data sharing among them and assists them in performing analytical calculations to discover meaningful, logical and accurate trend(s) required for strategic planning and better preparedness in the face of disasters. Also, the outcome of the framework may help decision makers to identify and predict the health consequences of the disasters for any specific geographical location in any country based on its geographical properties and disaster background.}, } @article {pmid31430988, year = {2019}, author = {Chen, S and Wen, H and Wu, J and Xu, A and Jiang, Y and Song, H and Chen, Y}, title = {Radio Frequency Fingerprint-Based Intelligent Mobile Edge Computing for Internet of Things Authentication.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {16}, pages = {}, pmid = {31430988}, issn = {1424-8220}, support = {2018YFB0904900,2018YFB0904905//National Key R&D Program of China/ ; }, abstract = {In this paper, a light-weight radio frequency fingerprinting identification (RFFID) scheme that combines with a two-layer model is proposed to realize authentications for a large number of resource-constrained terminals under the mobile edge computing (MEC) scenario without relying on encryption-based methods. In the first layer, signal collection, extraction of RF fingerprint features, dynamic feature database storage, and access authentication decision are carried out by the MEC devices. In the second layer, learning features, generating decision models, and implementing machine learning algorithms for recognition are performed by the remote cloud. By this means, the authentication rate can be improved by taking advantage of the machine-learning training methods and computing resource support of the cloud. Extensive simulations are performed under the IoT application scenario. The results show that the novel method can achieve higher recognition rate than that of traditional RFFID method by using wavelet feature effectively, which demonstrates the efficiency of our proposed method.}, } @article {pmid31430911, year = {2019}, author = {Yu, S and Park, K and Park, Y}, title = {A Secure Lightweight Three-Factor Authentication Scheme for IoT in Cloud Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {16}, pages = {}, pmid = {31430911}, issn = {1424-8220}, abstract = {With the development of cloud computing and communication technology, users can access the internet of things (IoT) services provided in various environments, including smart home, smart factory, and smart healthcare. However, a user is insecure various types of attacks, because sensitive information is often transmitted via an open channel. Therefore, secure authentication schemes are essential to provide IoT services for legal users. In 2019, Pelaez et al. presented a lightweight IoT-based authentication scheme in cloud computing environment. However, we prove that Pelaez et al.'s scheme cannot prevent various types of attacks such as impersonation, session key disclosure, and replay attacks and cannot provide mutual authentication and anonymity. In this paper, we present a secure and lightweight three-factor authentication scheme for IoT in cloud computing environment to resolve these security problems. The proposed scheme can withstand various attacks and provide secure mutual authentication and anonymity by utilizing secret parameters and biometric. We also show that our scheme achieves secure mutual authentication using Burrows-Abadi-Needham logic analysis. Furthermore, we demonstrate that our scheme resists replay and man-in-the-middle attacks usingthe automated validation of internet security protocols and applications (AVISPA) simulation tool. Finally, we compare the performance and the security features of the proposed scheme with some existing schemes. Consequently, we provide better safety and efficiency than related schemes and the proposed scheme is suitable for practical IoT-based cloud computing environment.}, } @article {pmid31426586, year = {2019}, author = {Cao, H and Wachowicz, M}, title = {An Edge-Fog-Cloud Architecture of Streaming Analytics for Internet of Things Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {16}, pages = {}, pmid = {31426586}, issn = {1424-8220}, support = {IRCPJ 488403-14//NSERC/Cisco Industrial Research Chair/ ; }, abstract = {Exploring Internet of Things (IoT) data streams generated by smart cities means not only transforming data into better business decisions in a timely way but also generating long-term location intelligence for developing new forms of urban governance and organization policies. This paper proposes a new architecture based on the edge-fog-cloud continuum to analyze IoT data streams for delivering data-driven insights in a smart parking scenario.}, } @article {pmid31426555, year = {2019}, author = {Trakadas, P and Nomikos, N and Michailidis, ET and Zahariadis, T and Facca, FM and Breitgand, D and Rizou, S and Masip, X and Gkonis, P}, title = {Hybrid Clouds for Data-Intensive, 5G-Enabled IoT Applications: An Overview, Key Issues and Relevant Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {16}, pages = {}, pmid = {31426555}, issn = {1424-8220}, support = {ICT-762013//Horizon 2020/ ; }, abstract = {Hybrid cloud multi-access edge computing (MEC) deployments have been proposed as efficient means to support Internet of Things (IoT) applications, relying on a plethora of nodes and data. In this paper, an overview on the area of hybrid clouds considering relevant research areas is given, providing technologies and mechanisms for the formation of such MEC deployments, as well as emphasizing several key issues that should be tackled by novel approaches, especially under the 5G paradigm. Furthermore, a decentralized hybrid cloud MEC architecture, resulting in a Platform-as-a-Service (PaaS) is proposed and its main building blocks and layers are thoroughly described. Aiming to offer a broad perspective on the business potential of such a platform, the stakeholder ecosystem is also analyzed. Finally, two use cases in the context of smart cities and mobile health are presented, aimed at showing how the proposed PaaS enables the development of respective IoT applications.}, } @article {pmid31423028, year = {2019}, author = {Taneja, M and Jalodia, N and Byabazaire, J and Davy, A and Olariu, C}, title = {SmartHerd management: A microservices-based fog computing-assisted IoT platform towards data-driven smart dairy farming.}, journal = {Software: practice & experience}, volume = {49}, number = {7}, pages = {1055-1078}, pmid = {31423028}, issn = {0038-0644}, abstract = {Internet of Things (IoT), fog computing, cloud computing, and data-driven techniques together offer a great opportunity for verticals such as dairy industry to increase productivity by getting actionable insights to improve farming practices, thereby increasing efficiency and yield. In this paper, we present SmartHerd, a fog computing-assisted end-to-end IoT platform for animal behavior analysis and health monitoring in a dairy farming scenario. The platform follows a microservices-oriented design to assist the distributed computing paradigm and addresses the major issue of constrained Internet connectivity in remote farm locations. We present the implementation of the designed software system in a 6-month mature real-world deployment, wherein the data from wearables on cows is sent to a fog-based platform for data classification and analysis, which includes decision-making capabilities and provides actionable insights to farmer towards the welfare of animals. With fog-based computational assistance in the SmartHerd setup, we see an 84% reduction in amount of data transferred to the cloud as compared with the conventional cloud-based approach.}, } @article {pmid31400820, year = {2020}, author = {Savari, GF and Krishnasamy, V and Sathik, J and Ali, ZM and Abdel Aleem, SHE}, title = {Internet of Things based real-time electric vehicle load forecasting and charging station recommendation.}, journal = {ISA transactions}, volume = {97}, number = {}, pages = {431-447}, doi = {10.1016/j.isatra.2019.08.011}, pmid = {31400820}, issn = {1879-2022}, abstract = {Electric vehicles (EVs) are emerging as a favorable strategy to meet the increasing environmental concerns and energy insufficiency, and this trend is expected to grow in the near future. However, the inadequate charging infrastructure is becoming a major barrier to the wide acceptance of EVs. Deployment of this infrastructure is expected to maximize the adoption of EVs to facilitate users' range anxiety. Therefore, connectivity between the charging stations (CS) is mandatory. Understanding the real-time status of CSs can provide valuable information to users such as availability of charging provisions, reserves and the time to reach the CS. The intent of this paper is to provide a better EV charging system by utilizing the advantages of the Internet of Things (IoT) technology. The IoT paradigm offers the present facilities a real-time interactional view of the physical world by a variety of sensors and broadcasting tools. This research article proposes a real-time server-based forecasting application: i) to provide scheduling management to avoid waiting time; and ii) to provide a real-time CS recommendation for EVs with an economic cost and reduced charging time. In addition, the proposed scheme avoids third-party intervention and protects EV user privacy and complex information exchange between the user and CS. The end users can easily use the CS based on their requirements. This synergetic application is built up through the PHP programming language in the Linux UBUNTU 16.04 LTS operating system, and all relevant information is processed and managed through Cloud Structured Query Language (CSQL) from a Google cloud platform. The effectiveness of this application is also validated through a low-cost test system using LTC 4150, ESP 8266 Wi-Fi module and Arduino.}, } @article {pmid31397316, year = {2019}, author = {Leonard, C and Wood, S and Holmes, O and Waddell, N and Gorse, D and Hansen, DP and Pearson, JV}, title = {Running Genomic Analyses in the Cloud.}, journal = {Studies in health technology and informatics}, volume = {266}, number = {}, pages = {149-155}, doi = {10.3233/SHTI190787}, pmid = {31397316}, issn = {1879-8365}, mesh = {Cloud Computing ; *Genomics ; Queensland ; }, abstract = {Genomic testing is rapidly moving into healthcare practice. However it comes with informatics challenges that the healthcare system has not previously faced - the raw data can be hundreds of gigabytes per test, the compute demands can be thousands of CPU hours, and the test can reveal deeply private health-srelated information that can have implications for anyone related to the person tested. While not a panacea, cloud computing has particular properties that can ameliorate some of these difficulties. This paper presents some of the key lessons learned while deploying a set of genomic analyses on cloud computing for Queensland Genomics.}, } @article {pmid31396908, year = {2019}, author = {Kohlhoff, KJ}, title = {Google-Accelerated Biomolecular Simulations.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {2022}, number = {}, pages = {291-309}, doi = {10.1007/978-1-4939-9608-7_12}, pmid = {31396908}, issn = {1940-6029}, mesh = {*Cloud Computing ; Computational Biology/methods ; Information Storage and Retrieval/*methods ; Molecular Dynamics Simulation ; Proteins/*chemistry ; Software ; }, abstract = {Biomolecular simulations rely heavily on the availability of suitable compute infrastructure for data-driven tasks like modeling, sampling, and analysis. These resources are typically available on a per-lab and per-facility basis, or through dedicated national supercomputing centers. In recent years, cloud computing has emerged as an alternative by offering an abundance of on-demand, specialist-maintained resources that enable efficiency and increased turnaround through rapid scaling.Scientific computations that take the shape of parallel workloads using large datasets are commonplace, making them ideal candidates for distributed computing in the cloud. Recent developments have greatly simplified the task for the experimenter to configure the cloud for use and job submission. This chapter will show how to use Google's Cloud Platform for biomolecular simulations by example of the molecular dynamics package GROningen MAchine for Chemical Simulations (GROMACS). The instructions readily transfer to a large variety of other tasks, allowing the reader to use the cloud for their specific purposes.Importantly, by using Docker containers, a popular light-weight virtualization solution, and cloud storage, key issues in scientific research are addressed: reproducibility of results, record keeping, and the possibility for other researchers to obtain copies and directly build upon previous work for further experimentation and hypothesis testing.}, } @article {pmid31394750, year = {2019}, author = {Kianoush, S and Savazzi, S and Rampa, V and Nicoli, M}, title = {People Counting by Dense WiFi MIMO Networks: Channel Features and Machine Learning Algorithms.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {16}, pages = {}, pmid = {31394750}, issn = {1424-8220}, abstract = {Subject counting systems are extensively used in ambient intelligence applications, such as smart home, smart building and smart retail scenarios. In this paper, we investigate the problem of transforming an unmodified WiFi radio infrastructure into a flexible sensing system for passive subject counting. We first introduce the multi-dimensional channel features that capture the subject presence. Then, we compare Bayesian and neural network based machine learning tools specialized for subject discrimination and counting. Ensemble classification is used to leverage space-frequency diversity and combine learning tools trained with different channel features. A combination of multiple models is shown to improve the counting accuracy. System design is based on a dense network of WiFi devices equipped with multiple antennas. Experimental validation is conducted in an indoor space featuring up to five moving people. Real-time computing and practical solutions for cloud migration are also considered. The proposed approach for passive counting gives detection results with 99% average accuracy.}, } @article {pmid31382536, year = {2019}, author = {Real, S and Araujo, A}, title = {Navigation Systems for the Blind and Visually Impaired: Past Work, Challenges, and Open Problems.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {15}, pages = {}, pmid = {31382536}, issn = {1424-8220}, mesh = {Blindness/*pathology ; Geographic Information Systems ; Humans ; Radio Frequency Identification Device ; *Sensory Aids ; Smartphone ; User-Computer Interface ; Vision Disorders/*pathology ; Wearable Electronic Devices ; }, abstract = {Over the last decades, the development of navigation devices capable of guiding the blind through indoor and/or outdoor scenarios has remained a challenge. In this context, this paper's objective is to provide an updated, holistic view of this research, in order to enable developers to exploit the different aspects of its multidisciplinary nature. To that end, previous solutions will be briefly described and analyzed from a historical perspective, from the first "Electronic Travel Aids" and early research on sensory substitution or indoor/outdoor positioning, to recent systems based on artificial vision. Thereafter, user-centered design fundamentals are addressed, including the main points of criticism of previous approaches. Finally, several technological achievements are highlighted as they could underpin future feasible designs. In line with this, smartphones and wearables with built-in cameras will then be indicated as potentially feasible options with which to support state-of-art computer vision solutions, thus allowing for both the positioning and monitoring of the user's surrounding area. These functionalities could then be further boosted by means of remote resources, leading to cloud computing schemas or even remote sensing via urban infrastructure.}, } @article {pmid31372538, year = {2019}, author = {Soltanshahi, M and Asemi, R and Shafiei, N}, title = {Energy-aware virtual machines allocation by krill herd algorithm in cloud data centers.}, journal = {Heliyon}, volume = {5}, number = {7}, pages = {e02066}, doi = {10.1016/j.heliyon.2019.e02066}, pmid = {31372538}, issn = {2405-8440}, abstract = {The growing demand for computational power has led to the emergence of large-scale data centers that consume massive amounts of energy, thus resulting in high operating costs and CO2 emission. Furthermore, cloud computing environments are required to provide a high Quality of Service (QoS) to their clients and, therefore, need to handle power shortages. An optimized virtual machine allocation to physical hosts lowers energy consumption and allows for high-quality services. In this study, a novel solution was proposed for the allocation of virtual machines to physical hosts in cloud data centers using the Krill Herd algorithm, which is the fastest collective intelligence algorithm recently introduced. The performance of the proposed method was evaluated using the CloudSim simulator, and the results are suggestive of a 35% reduction in energy consumption.}, } @article {pmid31371732, year = {2019}, author = {Liu, S and Zhang, L and Wang, B}, title = {Individual diversity between interdependent networks promotes the evolution of cooperation by means of mixed coupling.}, journal = {Scientific reports}, volume = {9}, number = {1}, pages = {11163}, pmid = {31371732}, issn = {2045-2322}, abstract = {Along with the rapid development of network-based information technology, such as cloud computing, big data, the IoT, and so on, human society has stepped into a new era of complex networks. People's life and production activities depend more and more on various complex networks to ensure security and reliability. The complex interrelationships between human and nature establish a link to explain the cooperation of individual behaviour, especially for individual diversity. However, existing researches mostly ignore the influence of individual diversity on networks involved in individual behaviour to strategy selection. Therefore, it needs further research on how to consider both individual diversity and independent networks in the evolution of cooperative behaviour. To address this issue, we extend a simple game model into the interdependent networks through the mixed coupling (i.e., utility and probability) in this work. Also, we divide the kinds of strategic behaviour of a player in one layer concerning individual diversity. Moreover, there exists an optimal region of mixed coupling between networks such that cooperation can be promoted. Finally, experimental results can open the path to understanding the emergence and maintenance of cooperation within various interconnected and interrelated real-world systems newly.}, } @article {pmid31370322, year = {2019}, author = {Zhang, J and Ou, P}, title = {Privacy-Preserving Multi-Receiver Certificateless Broadcast Encryption Scheme with De-Duplication.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {15}, pages = {}, pmid = {31370322}, issn = {1424-8220}, abstract = {Nowadays, the widely deployed and high performance Internet of Things (IoT) facilitates the communication between its terminal nodes. To enhance data sharing among terminal devices and ensure the recipients' privacy protection, a few anonymous multi-recipient broadcast encryption (AMBE) proposals are recently given. Nevertheless, the majority of these AMBE proposals are only proven be securely against adaptively chosen plain-text attack (CPA) or selectively chosen ciphertext attack (CCA). Furthermore, all AMBE proposals are subjected to key escrow issue due to inherent characteristics of the ID-based public cryptography (ID-PKC), and cannot furnish secure de-duplication detection. However, for cloud storage, it is very important for expurgating duplicate copies of the identical message since de-duplication can save the bandwidth of network and storage space. To address the above problems, in the work, we present a privacy-preserving multi-receiver certificateless broadcast encryption scheme with de-duplication (PMCBED) in the cloud-computing setting based on certificateless cryptography and anonymous broadcast encryption. In comparison with the prior AMBE proposals, our scheme has the following three characteristics. First, it can fulfill semantic security notions of data-confidentiality and receiver identity anonymity, whereas the existing proposals only accomplish them by formalizing the weaker security models. Second, it achieves duplication detection of the ciphertext for the identical message encrypted with our broadcast encryption. Finally, it also avoids the key escrow problem of the AMBE schemes.}, } @article {pmid31370149, year = {2019}, author = {Sittón-Candanedo, I and Alonso, RS and García, Ó and Muñoz, L and Rodríguez-González, S}, title = {Edge Computing, IoT and Social Computing in Smart Energy Scenarios.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {15}, pages = {}, pmid = {31370149}, issn = {1424-8220}, support = {Inés Sittón has been supported by IFARHU - SENACYT scholarship program (Government of Panama)//Secretaria Nacional de Ciencia y Tecnología/ ; Interreg program V-A Spain-Portugal 2014-2020 (PocTep)//European Regional Development Fund/ ; }, abstract = {The Internet of Things (IoT) has become one of the most widely research paradigms, having received much attention from the research community in the last few years. IoT is the paradigm that creates an internet-connected world, where all the everyday objects capture data from our environment and adapt it to our needs. However, the implementation of IoT is a challenging task and all the implementation scenarios require the use of different technologies and the emergence of new ones, such as Edge Computing (EC). EC allows for more secure and efficient data processing in real time, achieving better performance and results. Energy efficiency is one of the most interesting IoT scenarios. In this scenario sensors, actuators and smart devices interact to generate a large volume of data associated with energy consumption. This work proposes the use of an Edge-IoT platform and a Social Computing framework to build a system aimed to smart energy efficiency in a public building scenario. The system has been evaluated in a public building and the results make evident the notable benefits that come from applying Edge Computing to both energy efficiency scenarios and the framework itself. Those benefits included reduced data transfer from the IoT-Edge to the Cloud and reduced Cloud, computing and network resource costs.}, } @article {pmid31357725, year = {2019}, author = {Fernández-Caramés, TM and Froiz-Míguez, I and Blanco-Novoa, O and Fraga-Lamas, P}, title = {Enabling the Internet of Mobile Crowdsourcing Health Things: A Mobile Fog Computing, Blockchain and IoT Based Continuous Glucose Monitoring System for Diabetes Mellitus Research and Care.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {15}, pages = {}, pmid = {31357725}, issn = {1424-8220}, support = {ED431C 2016-045, ED431G/01//Xunta de Galicia/ ; TEC2016-75067-C4-1-R//Agencia Estatal de Investigación of Spain/ ; }, mesh = {Blockchain ; Blood Glucose/*isolation & purification ; Blood Glucose Self-Monitoring/*methods ; Crowdsourcing ; Diabetes Mellitus/*blood/pathology ; Humans ; Internet ; Mobile Applications ; *Monitoring, Physiologic ; Telemedicine ; }, abstract = {Diabetes patients suffer from abnormal blood glucose levels, which can cause diverse health disorders that affect their kidneys, heart and vision. Due to these conditions, diabetes patients have traditionally checked blood glucose levels through Self-Monitoring of Blood Glucose (SMBG) techniques, like pricking their fingers multiple times per day. Such techniques involve a number of drawbacks that can be solved by using a device called Continuous Glucose Monitor (CGM), which can measure blood glucose levels continuously throughout the day without having to prick the patient when carrying out every measurement. This article details the design and implementation of a system that enhances commercial CGMs by adding Internet of Things (IoT) capabilities to them that allow for monitoring patients remotely and, thus, warning them about potentially dangerous situations. The proposed system makes use of smartphones to collect blood glucose values from CGMs and then sends them either to a remote cloud or to distributed fog computing nodes. Moreover, in order to exchange reliable, trustworthy and cybersecure data with medical scientists, doctors and caretakers, the system includes the deployment of a decentralized storage system that receives, processes and stores the collected data. Furthermore, in order to motivate users to add new data to the system, an incentive system based on a digital cryptocurrency named GlucoCoin was devised. Such a system makes use of a blockchain that is able to execute smart contracts in order to automate CGM sensor purchases or to reward the users that contribute to the system by providing their own data. Thanks to all the previously mentioned technologies, the proposed system enables patient data crowdsourcing and the development of novel mobile health (mHealth) applications for diagnosing, monitoring, studying and taking public health actions that can help to advance in the control of the disease and raise global awareness on the increasing prevalence of diabetes.}, } @article {pmid31357720, year = {2019}, author = {Salhaoui, M and Guerrero-González, A and Arioua, M and Ortiz, FJ and El Oualkadi, A and Torregrosa, CL}, title = {Smart Industrial IoT Monitoring and Control System Based on UAV and Cloud Computing Applied to a Concrete Plant.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {15}, pages = {}, pmid = {31357720}, issn = {1424-8220}, abstract = {Unmanned aerial vehicles (UAVs) are now considered one of the best remote sensing techniques for gathering data over large areas. They are now being used in the industry sector as sensing tools for proactively solving or preventing many issues, besides quantifying production and helping to make decisions. UAVs are a highly consistent technological platform for efficient and cost-effective data collection and event monitoring. The industrial Internet of things (IIoT) sends data from systems that monitor and control the physical world to data processing systems that cloud computing has shown to be important tools for meeting processing requirements. In fog computing, the IoT gateway links different objects to the internet. It can operate as a joint interface for different networks and support different communication protocols. A great deal of effort has been put into developing UAVs and multi-UAV systems. This paper introduces a smart IIoT monitoring and control system based on an unmanned aerial vehicle that uses cloud computing services and exploits fog computing as the bridge between IIoT layers. Its novelty lies in the fact that the UAV is automatically integrated into an industrial control system through an IoT gateway platform, while UAV photos are systematically and instantly computed and analyzed in the cloud. Visual supervision of the plant by drones and cloud services is integrated in real-time into the control loop of the industrial control system. As a proof of concept, the platform was used in a case study in an industrial concrete plant. The results obtained clearly illustrate the feasibility of the proposed platform in providing a reliable and efficient system for UAV remote control to improve product quality and reduce waste. For this, we studied the communication latency between the different IIoT layers in different IoT gateways.}, } @article {pmid31357407, year = {2019}, author = {Fraga-Lamas, P and Celaya-Echarri, M and Lopez-Iturri, P and Castedo, L and Azpilicueta, L and Aguirre, E and Suárez-Albela, M and Falcone, F and Fernández-Caramés, TM}, title = {Design and Experimental Validation of a LoRaWAN Fog Computing Based Architecture for IoT Enabled Smart Campus Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {15}, pages = {}, pmid = {31357407}, issn = {1424-8220}, support = {ED431C 2016-045, ED431G/01//Xunta de Galicia/ ; TEC2016-75067-C4-1-R//Agencia Estatal de Investigación of Spain/ ; }, abstract = {A smart campus is an intelligent infrastructure where smart sensors and actuators collaborate to collect information and interact with the machines, tools, and users of a university campus. As in a smart city, a smart campus represents a challenging scenario for Internet of Things (IoT) networks, especially in terms of cost, coverage, availability, latency, power consumption, and scalability. The technologies employed so far to cope with such a scenario are not yet able to manage simultaneously all the previously mentioned demanding requirements. Nevertheless, recent paradigms such as fog computing, which extends cloud computing to the edge of a network, make possible low-latency and location-aware IoT applications. Moreover, technologies such as Low-Power Wide-Area Networks (LPWANs) have emerged as a promising solution to provide low-cost and low-power consumption connectivity to nodes spread throughout a wide area. Specifically, the Long-Range Wide-Area Network (LoRaWAN) standard is one of the most recent developments, receiving attention both from industry and academia. In this article, the use of a LoRaWAN fog computing-based architecture is proposed for providing connectivity to IoT nodes deployed in a campus of the University of A Coruña (UDC), Spain. To validate the proposed system, the smart campus has been recreated realistically through an in-house developed 3D Ray-Launching radio-planning simulator that is able to take into consideration even small details, such as traffic lights, vehicles, people, buildings, urban furniture, or vegetation. The developed tool can provide accurate radio propagation estimations within the smart campus scenario in terms of coverage, capacity, and energy efficiency of the network. The results obtained with the planning simulator can then be compared with empirical measurements to assess the operating conditions and the system accuracy. Specifically, this article presents experiments that show the accurate results obtained by the planning simulator in the largest scenario ever built for it (a campus that covers an area of 26,000 m 2), which are corroborated with empirical measurements. Then, how the tool can be used to design the deployment of LoRaWAN infrastructure for three smart campus outdoor applications is explained: a mobility pattern detection system, a smart irrigation solution, and a smart traffic-monitoring deployment. Consequently, the presented results provide guidelines to smart campus designers and developers, and for easing LoRaWAN network deployment and research in other smart campuses and large environments such as smart cities.}, } @article {pmid31354949, year = {2019}, author = {Belmann, P and Fischer, B and Krüger, J and Procházka, M and Rasche, H and Prinz, M and Hanussek, M and Lang, M and Bartusch, F and Gläßle, B and Krüger, J and Pühler, A and Sczyrba, A}, title = {de.NBI Cloud federation through ELIXIR AAI.}, journal = {F1000Research}, volume = {8}, number = {}, pages = {842}, pmid = {31354949}, issn = {2046-1402}, mesh = {*Biological Science Disciplines ; Germany ; *Software ; }, abstract = {The academic de.NBI Cloud offers compute resources for life science research in Germany. At the beginning of 2017, de.NBI Cloud started to implement a federated cloud consisting of five compute centers, with the aim of acting as one resource to their users. A federated cloud introduces multiple challenges, such as a central access and project management point, a unified account across all cloud sites and an interchangeable project setup across the federation. In order to implement the federation concept, de.NBI Cloud integrated with the ELIXIR authentication and authorization infrastructure system (ELIXIR AAI) and in particular Perun, the identity and access management system of ELIXIR. The integration solves the mentioned challenges and represents a backbone, connecting five compute centers which are based on OpenStack and a web portal for accessing the federation.This article explains the steps taken and software components implemented for setting up a federated cloud based on the collaboration between de.NBI Cloud and ELIXIR AAI. Furthermore, the setup and components that are described are generic and can therefore be used for other upcoming or existing federated OpenStack clouds in Europe.}, } @article {pmid31354187, year = {2019}, author = {Makkie, M and Huang, H and Zhao, Y and Vasilakos, AV and Liu, T}, title = {Fast and scalable distributed deep convolutional autoencoder for fMRI big data analytics.}, journal = {Neurocomputing}, volume = {325}, number = {}, pages = {20-30}, pmid = {31354187}, issn = {0925-2312}, support = {R01 AG042599/AG/NIA NIH HHS/United States ; R01 DA033393/DA/NIDA NIH HHS/United States ; }, abstract = {In recent years, analyzing task-based fMRI (tfMRI) data has become an essential tool for understanding brain function and networks. However, due to the sheer size of tfMRI data, its intrinsic complex structure, and lack of ground truth of underlying neural activities, modeling tfMRI data is hard and challenging. Previously proposed data modeling methods including Independent Component Analysis (ICA) and Sparse Dictionary Learning only provided shallow models based on blind source separation under the strong assumption that original fMRI signals could be linearly decomposed into time series components with corresponding spatial maps. Given the Convolutional Neural Network (CNN) successes in learning hierarchical abstractions from low-level data such as tfMRI time series, in this work we propose a novel scalable distributed deep CNN autoencoder model and apply it for fMRI big data analysis. This model aims to both learn the complex hierarchical structures of the tfMRI big data and to leverage the processing power of multiple GPUs in a distributed fashion. To deploy such a model, we have created an enhanced processing pipeline on the top of Apache Spark and Tensorflow, leveraging from a large cluster of GPU nodes over cloud. Experimental results from applying the model on the Human Connectome Project (HCP) data show that the proposed model is efficient and scalable toward tfMRI big data modeling and analytics, thus enabling data-driven extraction of hierarchical neuroscientific information from massive fMRI big data.}, } @article {pmid31352360, year = {2020}, author = {Li, Y and Wen, Y and Tao, D and Guan, K}, title = {Transforming Cooling Optimization for Green Data Center via Deep Reinforcement Learning.}, journal = {IEEE transactions on cybernetics}, volume = {50}, number = {5}, pages = {2002-2013}, doi = {10.1109/TCYB.2019.2927410}, pmid = {31352360}, issn = {2168-2275}, abstract = {Data center (DC) plays an important role to support services, such as e-commerce and cloud computing. The resulting energy consumption from this growing market has drawn significant attention, and noticeably almost half of the energy cost is used to cool the DC to a particular temperature. It is thus an critical operational challenge to curb the cooling energy cost without sacrificing the thermal safety of a DC. The existing solutions typically follow a two-step approach, in which the system is first modeled based on expert knowledge and, thus, the operational actions are determined with heuristics and/or best practices. These approaches are often hard to generalize and might result in suboptimal performances due to intrinsic model errors for large-scale systems. In this paper, we propose optimizing the DC cooling control via the emerging deep reinforcement learning (DRL) framework. Compared to the existing approaches, our solution lends itself an end-to-end cooling control algorithm (CCA) via an off-policy offline version of the deep deterministic policy gradient (DDPG) algorithm, in which an evaluation network is trained to predict the DC energy cost along with resulting cooling effects, and a policy network is trained to gauge optimized control settings. Moreover, we introduce a de-underestimation (DUE) validation mechanism for the critic network to reduce the potential underestimation of the risk caused by neural approximation. Our proposed algorithm is evaluated on an EnergyPlus simulation platform and on a real data trace collected from the National Super Computing Centre (NSCC) of Singapore. The resulting numerical results show that the proposed CCA can achieve up to 11% cooling cost reduction on the simulation platform compared with a manually configured baseline control algorithm. In the trace-based study of conservative nature, the proposed algorithm can achieve about 15% cooling energy savings on the NSCC data trace. Our pioneering approach can shed new light on the application of DRL to optimize and automate DC operations and management, potentially revolutionizing digital infrastructure management with intelligence.}, } @article {pmid31340460, year = {2019}, author = {Xu, J and Hao, Z and Sun, X}, title = {Optimal Offloading Decision Strategies and Their Influence Analysis of Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {14}, pages = {}, pmid = {31340460}, issn = {1424-8220}, abstract = {Mobile edge computing (MEC) has become more popular both in academia and industry. Currently, with the help of edge servers and cloud servers, it is one of the substantial technologies to overcome the latency between cloud server and wireless device, computation capability and storage shortage of wireless devices. In mobile edge computing, wireless devices take responsibility with input data. At the same time, edge servers and cloud servers take charge of computation and storage. However, until now, how to balance the power consumption of edge devices and time delay has not been well addressed in mobile edge computing. In this paper, we focus on strategies of the task offloading decision and the influence analysis of offloading decisions on different environments. Firstly, we propose a system model considering both energy consumption and time delay and formulate it into an optimization problem. Then, we employ two algorithms-Enumerating and Branch-and-Bound-to get the optimal or near-optimal decision for minimizing the system cost including the time delay and energy consumption. Furthermore, we compare the performance between two algorithms and draw the conclusion that the comprehensive performance of Branch-and-Bound algorithm is better than that of the other. Finally, we analyse the influence factors of optimal offloading decisions and the minimum cost in detail by changing key parameters.}, } @article {pmid31327114, year = {2020}, author = {Valluru, D and Jeya, IJS}, title = {IoT with cloud based lung cancer diagnosis model using optimal support vector machine.}, journal = {Health care management science}, volume = {23}, number = {4}, pages = {670-679}, doi = {10.1007/s10729-019-09489-x}, pmid = {31327114}, issn = {1572-9389}, mesh = {Algorithms ; Cloud Computing ; Humans ; *Internet of Things ; Lung Neoplasms/*diagnostic imaging ; *Support Vector Machine ; Tomography, X-Ray Computed ; }, abstract = {In the last decade, exponential growth of Internet of Things (IoT) and cloud computing takes the healthcare services to the next level. At the same time, lung cancer is identified as a dangerous disease which increases the global mortality rate annually. Presently, support vector machine (SVM) is the effective image classification tool especially in medical imaging. Feature selection and parameter optimization are the effective ways to improve the results of SVM and are conventionally resolved individually. This paper presents an optimal SVM for lung image classification where the parameters of SVM are optimized and feature selection takes place by modified grey wolf optimization algorithm combined with genetic algorithm (GWO-GA). The experimentation part takes place on three dimensions: test for parameter optimization, feature selection, and optimal SVM. For assessing the performance of the presented approach, a benchmark image database is employed which comprises of 50 low-dosage and stored lung CT images. The presented method exhibits its superior results on all the applied test images under several aspects. In addition, it achieves average classification accuracy of 93.54 which is significantly higher than the compared methods.}, } @article {pmid31324263, year = {2019}, author = {Odun-Ayo, I and Goddy-Worlu, R and Samuel, V and Geteloma, V}, title = {Cloud designs and deployment models: a systematic mapping study.}, journal = {BMC research notes}, volume = {12}, number = {1}, pages = {436}, pmid = {31324263}, issn = {1756-0500}, mesh = {Cloud Computing/*statistics & numerical data ; Computational Biology/methods/*statistics & numerical data ; Computer Security ; Data Mining/methods/*statistics & numerical data ; Humans ; Information Management/methods/*statistics & numerical data ; Privacy ; Research Design/standards/statistics & numerical data ; }, abstract = {BACKGROUND: Cloud computing is a unique paradigm that is aggregating resources available from cloud service providers for use by customers on demand and pay per use basis. There is a Cloud federation that integrates the four primary Cloud models and the Cloud aggregator that integrates multiple computing services. A systematic mapping study provides an overview of work done in a particular field of interest and identifies gaps for further research.

OBJECTIVES: The objective of this paper was to conduct a study of deployment and designs models for Cloud using a systematic mapping process. The methodology involves examining core aspect of the field of study using the research, contribution and topic facets.

RESULTS: The results obtained indicated that there were more publications on solution proposals, which constituted 41.98% of papers relating to design and deployment models on the Cloud. Out of this, 5.34% was on security, 1.5% on privacy, 6.11% on configuration, 7.63% on implementation, 11.45% on service deployment, and 9.92% of the solution proposal was on design. The results obtained will be useful for further studies by the academia and industry in this broad topic that was examined.}, } @article {pmid31324039, year = {2019}, author = {Fernández-Cerero, D and Fernández-Rodríguez, JY and Álvarez-García, JA and Soria-Morillo, LM and Fernández-Montes, A}, title = {Single-Board-Computer Clusters for Cloudlet Computing in Internet of Things.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {13}, pages = {}, pmid = {31324039}, issn = {1424-8220}, support = {754489//H2020 Marie Skłodowska-Curie Actions/ ; 13/RC/2094//Science Foundation Ireland/Ireland ; TIN2017-82113-C2-1-R//Ministerio de Economía y Competitividad/ ; RTI2018-098062-A-I00//Ministerio de Economía y Competitividad/ ; }, abstract = {The number of connected sensors and devices is expected to increase to billions in the near future. However, centralised cloud-computing data centres present various challenges to meet the requirements inherent to Internet of Things (IoT) workloads, such as low latency, high throughput and bandwidth constraints. Edge computing is becoming the standard computing paradigm for latency-sensitive real-time IoT workloads, since it addresses the aforementioned limitations related to centralised cloud-computing models. Such a paradigm relies on bringing computation close to the source of data, which presents serious operational challenges for large-scale cloud-computing providers. In this work, we present an architecture composed of low-cost Single-Board-Computer clusters near to data sources, and centralised cloud-computing data centres. The proposed cost-efficient model may be employed as an alternative to fog computing to meet real-time IoT workload requirements while keeping scalability. We include an extensive empirical analysis to assess the suitability of single-board-computer clusters as cost-effective edge-computing micro data centres. Additionally, we compare the proposed architecture with traditional cloudlet and cloud architectures, and evaluate them through extensive simulation. We finally show that acquisition costs can be drastically reduced while keeping performance levels in data-intensive IoT use cases.}, } @article {pmid31313979, year = {2020}, author = {Garvey, C and Maskal, C}, title = {Sentiment Analysis of the News Media on Artificial Intelligence Does Not Support Claims of Negative Bias Against Artificial Intelligence.}, journal = {Omics : a journal of integrative biology}, volume = {24}, number = {5}, pages = {286-299}, doi = {10.1089/omi.2019.0078}, pmid = {31313979}, issn = {1557-8100}, mesh = {Artificial Intelligence/*legislation & jurisprudence ; Delivery of Health Care/*legislation & jurisprudence ; Humans ; Technology/legislation & jurisprudence ; }, abstract = {Artificial intelligence (AI) is a hot topic in digital health, as automated systems are being adopted throughout the health care system. Because they are still flexible, emerging technologies can be shaped significantly by media representations as well as public engagement with science. In this context, we examine the belief that negative news media coverage of AI-and specifically, the alleged use of imagery from the movie Terminator-is to blame for public concerns about AI. This belief is identified as a potential barrier to meaningful engagement of AI scientists and technology developers with journalists and the broader public. We name this climate of risk perception the "Terminator Syndrome"-not because of its origins in the movie of the same name per se, but because such unchecked beliefs can terminate broad public engagement on AI before they even begin. Using both quantitative and qualitative approaches, this study examined the hypothesis that the news media coverage of AI is negative. We conducted a sentiment analysis of news data spanning over six decades, from 1956 to 2018, using the Google Cloud Natural Language API Sentiment Analysis tool. Contrary to the alleged negative sentiment in news media coverage of AI, we found that the available evidence does not support this claim. We conclude with an innovation policy-relevant discussion on the current state of AI risk perceptions, and what critical social sciences offer for responsible AI innovation in digital health, life sciences, and society.}, } @article {pmid31295891, year = {2019}, author = {Moon, J and Kum, S and Lee, S}, title = {A Heterogeneous IoT Data Analysis Framework with Collaboration of Edge-Cloud Computing: Focusing on Indoor PM10 and PM2.5 Status Prediction.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {14}, pages = {}, pmid = {31295891}, issn = {1424-8220}, abstract = {The edge platform has evolved to become a part of a distributed computing environment. While typical edges do not have enough processing power to train machine learning models in real time, it is common to generate models in the cloud for use on the edge. The pattern of heterogeneous Internet of Things (IoT) data is dependent on individual circumstances. It is not easy to guarantee prediction performance when a monolithic model is used without considering the spatial characteristics of the space generating those data. In this paper, we propose a collaborative framework using a new method to select the best model for the edge from candidate models of cloud based on sample data correlation. This method lets the edge use the most suitable model without any training tasks on the edge side, and it also minimizes privacy issues. We apply the proposed method to predict future fine particulate matter concentration in an individual space. The results suggest that our method can provide better performance than the previous method.}, } @article {pmid31289831, year = {2019}, author = {Youens-Clark, K and Bomhoff, M and Ponsero, AJ and Wood-Charlson, EM and Lynch, J and Choi, I and Hartman, JH and Hurwitz, BL}, title = {iMicrobe: Tools and data-dreaiven discovery platform for the microbiome sciences.}, journal = {GigaScience}, volume = {8}, number = {7}, pages = {}, pmid = {31289831}, issn = {2047-217X}, mesh = {Big Data ; Metagenome ; Metagenomics/*methods ; Microbiota/*genetics ; *Software ; }, abstract = {BACKGROUND: Scientists have amassed a wealth of microbiome datasets, making it possible to study microbes in biotic and abiotic systems on a population or planetary scale; however, this potential has not been fully realized given that the tools, datasets, and computation are available in diverse repositories and locations. To address this challenge, we developed iMicrobe.us, a community-driven microbiome data marketplace and tool exchange for users to integrate their own data and tools with those from the broader community.

FINDINGS: The iMicrobe platform brings together analysis tools and microbiome datasets by leveraging National Science Foundation-supported cyberinfrastructure and computing resources from CyVerse, Agave, and XSEDE. The primary purpose of iMicrobe is to provide users with a freely available, web-based platform to (1) maintain and share project data, metadata, and analysis products, (2) search for related public datasets, and (3) use and publish bioinformatics tools that run on highly scalable computing resources. Analysis tools are implemented in containers that encapsulate complex software dependencies and run on freely available XSEDE resources via the Agave API, which can retrieve datasets from the CyVerse Data Store or any web-accessible location (e.g., FTP, HTTP).

CONCLUSIONS: iMicrobe promotes data integration, sharing, and community-driven tool development by making open source data and tools accessible to the research community in a web-based platform.}, } @article {pmid31287816, year = {2019}, author = {Ahmed, AE and Heldenbrand, J and Asmann, Y and Fadlelmola, FM and Katz, DS and Kendig, K and Kendzior, MC and Li, T and Ren, Y and Rodriguez, E and Weber, MR and Wozniak, JM and Zermeno, J and Mainzer, LS}, title = {Managing genomic variant calling workflows with Swift/T.}, journal = {PloS one}, volume = {14}, number = {7}, pages = {e0211608}, pmid = {31287816}, issn = {1932-6203}, support = {U41 HG006941/HG/NHGRI NIH HHS/United States ; }, mesh = {Animals ; *Computational Biology ; *Genomics ; Humans ; *Software ; Workflow ; }, abstract = {Bioinformatics research is frequently performed using complex workflows with multiple steps, fans, merges, and conditionals. This complexity makes management of the workflow difficult on a computer cluster, especially when running in parallel on large batches of data: hundreds or thousands of samples at a time. Scientific workflow management systems could help with that. Many are now being proposed, but is there yet the "best" workflow management system for bioinformatics? Such a system would need to satisfy numerous, sometimes conflicting requirements: from ease of use, to seamless deployment at peta- and exa-scale, and portability to the cloud. We evaluated Swift/T as a candidate for such role by implementing a primary genomic variant calling workflow in the Swift/T language, focusing on workflow management, performance and scalability issues that arise from production-grade big data genomic analyses. In the process we introduced novel features into the language, which are now part of its open repository. Additionally, we formalized a set of design criteria for quality, robust, maintainable workflows that must function at-scale in a production setting, such as a large genomic sequencing facility or a major hospital system. The use of Swift/T conveys two key advantages. (1) It operates transparently in multiple cluster scheduling environments (PBS Torque, SLURM, Cray aprun environment, etc.), thus a single workflow is trivially portable across numerous clusters. (2) The leaf functions of Swift/T permit developers to easily swap executables in and out of the workflow, which makes it easy to maintain and to request resources optimal for each stage of the pipeline. While Swift/T's data-level parallelism eliminates the need to code parallel analysis of multiple samples, it does make debugging more difficult, as is common for implicitly parallel code. Nonetheless, the language gives users a powerful and portable way to scale up analyses in many computing architectures. The code for our implementation of a variant calling workflow using Swift/T can be found on GitHub at https://github.com/ncsa/Swift-T-Variant-Calling, with full documentation provided at http://swift-t-variant-calling.readthedocs.io/en/latest/.}, } @article {pmid31287062, year = {2019}, author = {Liu, L and Duan, S and Zhang, Y and Wu, Y and Zhang, L}, title = {Initial Experience of the Synchronized, Real-Time, Interactive, Remote Transthoracic Echocardiogram Consultation System in Rural China: Longitudinal Observational Study.}, journal = {JMIR medical informatics}, volume = {7}, number = {3}, pages = {e14248}, pmid = {31287062}, issn = {2291-9694}, abstract = {BACKGROUND: China has a vast territory, and the quality of health care services provided, especially transthoracic echocardiography (TTE), in remote regions is still low. Patients usually need to travel long distances to tertiary care centers for confirmation of a diagnosis. Considering the rapid development of high-speed communication technology, telemedicine will be a significant technology for improving the diagnosis and treatment of patients at secondary care hospitals.

OBJECTIVE: This study aimed to discuss the feasibility and perceived clinical value of a synchronized, real-time, interactive, remote TTE consultation system based on cloud computing technology.

METHODS: By using the cloud computing platform coupled with unique dynamic image coding and decoding and synchronization technology, multidimensional communication information in the form of voice, texts, and pictures was integrated. A remote TTE consultation system connecting Henan Provincial People's Hospital and two county-level secondary care hospitals located 300 km away was developed, which was used for consultation with 45 patients.

RESULTS: This remote TTE consultation system achieved remote consultation for 45 patients. The total time for consultation was 341.31 min, and the mean time for each patient was 7.58 (SD 6.17) min. Among the 45 patients, 3 were diagnosed with congenital heart diseases (7%) and 42 were diagnosed with acquired heart diseases (93%) at the secondary care hospitals. After expert consultation, the final diagnosis was congenital heart diseases in 5 patients (11%), acquired heart disease in 34 patients (76%), and absence of heart abnormalities in 6 patients (13%). Compared with the initial diagnosis at secondary care hospitals, remote consultation using this system revealed new abnormalities in 7 patients (16%), confirmation was obtained in 6 patients (13%), and abnormalities were excluded in 6 patients (13%). The expert opinions agreed with the initial diagnosis in the remaining 26 patients (58%). In addition, several questions about rare illnesses raised by the rural doctors at the secondary care hospitals were answered.

CONCLUSIONS: The synchronized real-time interactive remote TTE consultation system based on cloud computing service and unique dynamic image coding and decoding technology had high feasibility and applicability.}, } @article {pmid31286058, year = {2019}, author = {Qian, T and Zhu, S and Hoshida, Y}, title = {Use of big data in drug development for precision medicine: an update.}, journal = {Expert review of precision medicine and drug development}, volume = {4}, number = {3}, pages = {189-200}, pmid = {31286058}, issn = {2380-8993}, support = {671231/ERC_/European Research Council/International ; R01 DK099558/DK/NIDDK NIH HHS/United States ; }, abstract = {INTRODUCTION: Big-data-driven drug development resources and methodologies have been evolving with ever-expanding data from large-scale biological experiments, clinical trials, and medical records from participants in data collection initiatives. The enrichment of biological- and clinical-context-specific large-scale data has enabled computational inference more relevant to real-world biomedical research, particularly identification of therapeutic targets and drugs for specific diseases and clinical scenarios.

AREAS COVERED: Here we overview recent progresses made in the fields: new big-data-driven approach to therapeutic target discovery, candidate drug prioritization, inference of clinical toxicity, and machine-learning methods in drug discovery.

EXPERT OPINION: In the near future, much larger volumes and complex datasets for precision medicine will be generated, e.g., individual and longitudinal multi-omic, and direct-to-consumer datasets. Closer collaborations between experts with different backgrounds would also be required to better translate analytic results into prognosis and treatment in the clinical practice. Meanwhile, cloud computing with protected patient privacy would become more routine analytic practice to fill the gaps within data integration along with the advent of big-data. To conclude, integration of multitudes of data generated for each individual along with techniques tailored for big-data analytics may eventually enable us to achieve precision medicine.}, } @article {pmid31284514, year = {2019}, author = {Fernandez, JM and Vidal, I and Valera, F}, title = {Enabling the Orchestration of IoT Slices through Edge and Cloud Microservice Platforms.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {13}, pages = {}, pmid = {31284514}, issn = {1424-8220}, support = {5GinFIRE project (732497)//European Commission/ ; 5GCity project (TEC2016-76795-C6-3-R)//Spanish Ministry of Economy and Competitiveness/ ; }, abstract = {This article addresses one of the main challenges related to the practical deployment of Internet of Things (IoT) solutions: the coordinated operation of entities at different infrastructures to support the automated orchestration of end-to-end Internet of Things services. This idea is referred to as "Internet of Things slicing" and is based on the network slicing concept already defined for the Fifth Generation (5G) of mobile networks. In this context, we present the architectural design of a slice orchestrator addressing the aforementioned challenge, based on well-known standard technologies and protocols. The proposed solution is able to integrate existing technologies, like cloud computing, with other more recent technologies like edge computing and network slicing. In addition, a functional prototype of the proposed orchestrator has been implemented, using open-source software and microservice platforms. As a first step to prove the practical feasibility of our solution, the implementation of the orchestrator considers cloud and edge domains. The validation results obtained from the prototype prove the feasibility of the solution from a functional perspective, verifying its capacity to deploy Internet of Things related functions even on resource constrained platforms. This approach enables new application models where these Internet of Things related functions can be onboarded on small unmanned aerial vehicles, offering a flexible and cost-effective solution to deploy these functions at the network edge. In addition, this proposal can also be used on commercial cloud platforms, like the Google Compute Engine, showing that it can take advantage of the benefits of edge and cloud computing respectively.}, } @article {pmid31284421, year = {2019}, author = {Ferrández-Pastor, FJ and García-Chamizo, JM and Gomez-Trillo, S and Valdivieso-Sarabia, R and Nieto-Hidalgo, M}, title = {Smart Management Consumption in Renewable Energy Fed Ecosystems.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {13}, pages = {}, pmid = {31284421}, issn = {1424-8220}, support = {5000//Generalitat Valenciana/ ; }, abstract = {Advances in embedded electronic systems, the development of new communication protocols, and the application of artificial intelligence paradigms have enabled the improvement of current automation systems of energy management. Embedded devices integrate different sensors with connectivity, computing resources, and reduced cost. Communication and cloud services increase their performance; however, there are limitations in the implementation of these technologies. If the cloud is used as the main source of services and resources, overload problems will occur. There are no models that facilitate the complete integration and interoperability in the facilities already created. This article proposes a model for the integration of smart energy management systems in new and already created facilities, using local embedded devices, Internet of Things communication protocols and services based on artificial intelligence paradigms. All services are distributed in the new smart grid network using edge and fog computing techniques. The model proposes an architecture both to be used as support for the development of smart services and for energy management control systems adapted to the installation: a group of buildings and/or houses that shares energy management and energy generation. Machine learning to predict consumption and energy generation, electric load classification, energy distribution control, and predictive maintenance are the main utilities integrated. As an experimental case, a facility that incorporates wind and solar generation is used for development and testing. Smart grid facilities, designed with artificial intelligence algorithms, implemented with Internet of Things protocols, and embedded control devices facilitate the development, cost reduction, and the integration of new services. In this work, a method to design, develop, and install smart services in self-consumption facilities is proposed. New smart services with reduced costs are installed and tested, confirming the advantages of the proposed model.}, } @article {pmid31278683, year = {2019}, author = {Strozzi, F and Janssen, R and Wurmus, R and Crusoe, MR and Githinji, G and Di Tommaso, P and Belhachemi, D and Möller, S and Smant, G and de Ligt, J and Prins, P}, title = {Scalable Workflows and Reproducible Data Analysis for Genomics.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {1910}, number = {}, pages = {723-745}, pmid = {31278683}, issn = {1940-6029}, support = {203077/WT_/Wellcome Trust/United Kingdom ; }, mesh = {Big Data ; Biological Evolution ; Cloud Computing ; *Computational Biology/methods ; Data Analysis ; *Genomics/methods ; Humans ; Reproducibility of Results ; Software ; Workflow ; }, abstract = {Biological, clinical, and pharmacological research now often involves analyses of genomes, transcriptomes, proteomes, and interactomes, within and between individuals and across species. Due to large volumes, the analysis and integration of data generated by such high-throughput technologies have become computationally intensive, and analysis can no longer happen on a typical desktop computer.In this chapter we show how to describe and execute the same analysis using a number of workflow systems and how these follow different approaches to tackle execution and reproducibility issues. We show how any researcher can create a reusable and reproducible bioinformatics pipeline that can be deployed and run anywhere. We show how to create a scalable, reusable, and shareable workflow using four different workflow engines: the Common Workflow Language (CWL), Guix Workflow Language (GWL), Snakemake, and Nextflow. Each of which can be run in parallel.We show how to bundle a number of tools used in evolutionary biology by using Debian, GNU Guix, and Bioconda software distributions, along with the use of container systems, such as Docker, GNU Guix, and Singularity. Together these distributions represent the overall majority of software packages relevant for biology, including PAML, Muscle, MAFFT, MrBayes, and BLAST. By bundling software in lightweight containers, they can be deployed on a desktop, in the cloud, and, increasingly, on compute clusters.By bundling software through these public software distributions, and by creating reproducible and shareable pipelines using these workflow engines, not only do bioinformaticians have to spend less time reinventing the wheel but also do we get closer to the ideal of making science reproducible. The examples in this chapter allow a quick comparison of different solutions.}, } @article {pmid31277474, year = {2019}, author = {Battula, SK and Garg, S and Naha, RK and Thulasiraman, P and Thulasiram, R}, title = {A Micro-Level Compensation-Based Cost Model for Resource Allocation in a Fog Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {13}, pages = {}, pmid = {31277474}, issn = {1424-8220}, abstract = {Fog computing aims to support applications requiring low latency and high scalability by using resources at the edge level. In general, fog computing comprises several autonomous mobile or static devices that share their idle resources to run different services. The providers of these devices also need to be compensated based on their device usage. In any fog-based resource-allocation problem, both cost and performance need to be considered for generating an efficient resource-allocation plan. Estimating the cost of using fog devices prior to the resource allocation helps to minimize the cost and maximize the performance of the system. In the fog computing domain, recent research works have proposed various resource-allocation algorithms without considering the compensation to resource providers and the cost estimation of the fog resources. Moreover, the existing cost models in similar paradigms such as in the cloud are not suitable for fog environments as the scaling of different autonomous resources with heterogeneity and variety of offerings is much more complicated. To fill this gap, this study first proposes a micro-level compensation cost model and then proposes a new resource-allocation method based on the cost model, which benefits both providers and users. Experimental results show that the proposed algorithm ensures better resource-allocation performance and lowers application processing costs when compared to the existing best-fit algorithm.}, } @article {pmid31277463, year = {2019}, author = {Naranjo, DM and Prieto, JR and Moltó, G and Calatrava, A}, title = {A Visual Dashboard to Track Learning Analytics for Educational Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {13}, pages = {}, pmid = {31277463}, issn = {1424-8220}, support = {TIN2016-79951-R//Ministerio de Economía, Industria y Competitividad, Gobierno de España/ ; }, abstract = {Cloud providers such as Amazon Web Services (AWS) stand out as useful platforms to teach distributed computing concepts as well as the development of Cloud-native scalable application architectures on real-world infrastructures. Instructors can benefit from high-level tools to track the progress of students during their learning paths on the Cloud, and this information can be disclosed via educational dashboards for students to understand their progress through the practical activities. To this aim, this paper introduces CloudTrail-Tracker, an open-source platform to obtain enhanced usage analytics from a shared AWS account. The tool provides the instructor with a visual dashboard that depicts the aggregated usage of resources by all the students during a certain time frame and the specific use of AWS for a specific student. To facilitate self-regulation of students, the dashboard also depicts the percentage of progress for each lab session and the pending actions by the student. The dashboard has been integrated in four Cloud subjects that use different learning methodologies (from face-to-face to online learning) and the students positively highlight the usefulness of the tool for Cloud instruction in AWS. This automated procurement of evidences of student activity on the Cloud results in close to real-time learning analytics useful both for semi-automated assessment and student self-awareness of their own training progress.}, } @article {pmid31277004, year = {2019}, author = {Zhou, Y and Dong, J and Xiao, X and Liu, R and Zou, Z and Zhao, G and Ge, Q}, title = {Continuous monitoring of lake dynamics on the Mongolian Plateau using all available Landsat imagery and Google Earth Engine.}, journal = {The Science of the total environment}, volume = {689}, number = {}, pages = {366-380}, doi = {10.1016/j.scitotenv.2019.06.341}, pmid = {31277004}, issn = {1879-1026}, abstract = {Lakes are important water resources on the Mongolian Plateau (MP) for human's livelihood and production as well as maintaining ecosystem services. Previous studies, based on the Landsat-based analyses at epoch scale and visual interpretation approach, have reported a significant loss in the lake areas and numbers, especially from the late 1990s to 2010. Given the remarkable inter- and intra-annual variations of lakes in the arid and semi-arid region, a comprehensive picture of annual lake dynamics is needed. Here we took advantages of the power of all the available Landsat images and the cloud computing platform Google Earth Engine (GEE) to map water body for each scene, and then extracted lakes by post-processing including raster-to-vector conversion and separation of lakes and rivers. Continuous dynamics of the lakes over 1 km[2] was monitored annually on the MP from 1991 to 2017. We found a significant shrinkage in the lake areas and numbers of the MP from 1991 to 2009, then the decreasing lakes on the MP have recovered since circa 2009. Specifically, Inner Mongolia of China experienced more dramatic lake variations than Mongolia. A few administrative regions with huge lakes, including Hulunbuir and Xilin Gol in Inner Mongolia and Ubsa in Mongolia, dominated the lake area variations in the study area, suggesting that the prior treatments on these major lakes would be critical for water management on the MP. The varied drivers of lake variations in different regions showed the complexity of factors impacting lakes. While both natural and anthropogenic factors significantly affected lake dynamics before 2009, precipitation played increasingly important role for the recovery of lakes on the MP after 2009.}, } @article {pmid31273541, year = {2019}, author = {Damodharan, P and Ravichandran, CS}, title = {Applicability Evaluation of Web Mining in Healthcare E-Commerce towards Business Success and a derived Cournot Model.}, journal = {Journal of medical systems}, volume = {43}, number = {8}, pages = {268}, pmid = {31273541}, issn = {1573-689X}, mesh = {*Commerce ; Competitive Behavior ; *Data Mining ; *Delivery of Health Care ; *Internet ; *Models, Organizational ; }, abstract = {Internet has become integral part of day-to-day business to almost everybody, this result in diversified interests of customers. So as to catering to this intrinsic need, any E-Commerce firm to survive must be of cutting edge and competitive edge. The providers should not only to stay abreast with technologies where the life cycle of a technology is at its bare minimum and further dwindling They should also entertain the customers through inventively fine tuning the delicate parameters of website. This involves evaluating the usage pattern and trails of the customer left as a log, deriving pattern from click stream etc. However, the cutting edge technology applied by the big healthcare E-Commerce industries like private Cloud utilization (John et al., Optimization and Computing, 2012), web content mining enables them to attract and retain innumerable number of customers even during peak hours. According to the research carried out in this paper, there are two distinct types of online business based on web content promoted towards buy, they are classified as exhaustive promote and partial promote. Typically exhaustive promote website perform even complex web mining operations for identifying and enticing the potential customers to buy various healthcare products based on various factors such as buying habits, interests etc. However for the partial promote in the observed cases, they are not even aware of the existence of such techniques. Based on the analysis performed on various renounced online websites, if 60% and above of the web content leads the customer to perform the 'buy, then it is exhaustive promote the rest is considered as partial promote. Moreover a huge gap is observed between Partial and exhaustive promote when it comes to the deployment of the web mining techniques. Consequently to understand the varying role of web mining in the online business successes, this paper models the web mining as a Game in Cournot Model. The results show that the model suits the economics behind the online businesses in both the cases and thus helps to identify or enhance the underlying web mining techniques towards business success.}, } @article {pmid31273467, year = {2019}, author = {Shanmugapriya, E and Kavitha, R}, title = {Efficient and Secure Privacy Analysis for Medical Big Data Using TDES and MKSVM with Access Control in Cloud.}, journal = {Journal of medical systems}, volume = {43}, number = {8}, pages = {265}, pmid = {31273467}, issn = {1573-689X}, mesh = {Access to Information ; Algorithms ; *Cloud Computing ; *Computer Security ; *Confidentiality ; *Health Records, Personal ; }, abstract = {Big Data and cloud computing are two essential issues in the recent years, empowers computing resources to be given as Information Technology services with high efficiency and effectiveness. So as to protect the security of data holders, data are regularly stored in the cloud in an encrypted form. In any case, encrypted data introduce new challenges for cloud data deduplication, which becomes crucial for big data storage and processing in the cloud along with access control. In this paper dissected the medical big data security utilizing encryption with access control process. Big database reduce process Map-Reduce framework with Optimal Fuzzy C means (OFCM) to Clustered data are accumulated in the cloud and furthermore using classification approach to classify sensitive and non-sensitive data in the cloud to encryption. This security process Triple DES (TDES) to encrypted and stored in the cloud and propose practical optimization techniques that further enhance the scheme's performance, at long last authentication phase with attribute-based access control is used to authenticate data in cloud sim. From the proposed method the clustering, classification and encryption results are compared to existing approaches.}, } @article {pmid31245267, year = {2019}, author = {Nazarov, O and Guan, J and Chihuri, S and Li, G}, title = {Research utility of the National Violent Death Reporting System: a scoping review.}, journal = {Injury epidemiology}, volume = {6}, number = {}, pages = {18}, pmid = {31245267}, issn = {2197-1714}, support = {R49 CE002096/CE/NCIPC CDC HHS/United States ; }, abstract = {BACKGROUND: To better understand and prevent suicide and homicide, the National Center for Injury Prevention and Control of the US Centers for Disease Control and Prevention launched the National Violent Death Reporting System (NVDRS) in six states in 2002. As of 2018, the NVDRS has been expanded to include all 50 states, the District of Columbia and Puerto Rico. The purpose of this review was to assess the research utility of the NVDRS based on studies indexed in major bibliographical databases.

METHODS: We performed a scoping review of published studies that were based on data from the NVDRS, identified by searching six electronic databases: PubMed, EMBASE, Google Scholar, OVID, Scopus, and Web of Science. We examined the time trend of annual NVDRS-based research output, generated a word cloud using the keywords listed in the publications, and mapped the knowledge domains covered by NVDRS-based studies.

RESULTS: Our review included a total of 150 studies published between 2005 and 2018. There was a marked increase in the annual number of NVDRS-based publications, with 120 (80.0%) of the 150 studies published between 2011 and 2018. Overall, 104 (69.3%) studies focused on suicide and 39 (26.0%) on homicide. Of the included studies, 100 (66.7%) were descriptive epidemiology, 31 (20.7%) were risk factor analyses, 9 (6.0%) were evaluations, 7 (4.7%) were trend analyses, and 4 (2.7%) were data quality assessments. Knowledge domain mapping identified two major clusters of studies, one on suicide and the other on homicide. The cluster on suicide was commonly linked to "circumstance," "alcohol" and "substance abuse" and the cluster on homicide was commonly linked to "firearm," "injury," and "gang." The two clusters were interlinked to overlapping networks of keywords, such as "firearm" and "mental health problem."

CONCLUSIONS: Research utility of the NVDRS has increased considerably in recent years. Studies based on data from the NVDRS are clustered in two knowledge domains - suicide and homicide. The vast potential of the NVDRS for violence research and prevention remains to be fully exploited.}, } @article {pmid31240414, year = {2019}, author = {Parker, W and Lee, B and Nicolaou, S}, title = {AI Is Bringing USB Back: Implementing a Beta Chest X-ray Neural Network.}, journal = {Journal of digital imaging}, volume = {32}, number = {6}, pages = {1116-1117}, doi = {10.1007/s10278-019-00247-7}, pmid = {31240414}, issn = {1618-727X}, mesh = {*Artificial Intelligence ; Canada ; Humans ; Neural Networks, Computer ; *Radiography, Thoracic ; *Radiology Information Systems ; }, abstract = {In a day and age of rapid technological growth and advancement in digital technology, quantum computing, and decentralized cloud computing, it is difficult to get excited about USB sticks, those little dongles that store only a few gigabytes and commonly get lost in the bottom of your bag. Well, they are making a major comeback at our institution in Canada. That is right, when Stanford and MIT are making the next Facebook and autonomous vehicles, we are bringing USB back.}, } @article {pmid31238553, year = {2019}, author = {Wu, ZY}, title = {A Secure and Efficient Digital-Data-Sharing System for Cloud Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {12}, pages = {}, pmid = {31238553}, issn = {1424-8220}, abstract = {"Education Cloud" is a cloud-computing application used in educational contexts to facilitate the use of comprehensive digital technologies and establish data-based learning environments. The immense amount of digital resources, data, and teaching materials involved in these environments must be stored in robust data-access systems. These systems must be equipped with effective security mechanisms to guarantee confidentiality and ensure the integrity of the cloud-computing environment. To minimize the potential risk of privacy exposure, digital sharing service providers must encrypt their digital resources, data, and teaching materials, and digital-resource owners must have complete control over what data or materials they share. In addition, the data in these systems must be accessible to e-learners. In other words, data-access systems should not only encrypt data, but also provide access control mechanisms by which users may access the data. In cloud environments, digital sharing systems no longer target single users, and the access control by numerous users may overload a system and increase management burden and complexity. This study addressed these challenges to create a system that preserves the benefits of combining digital sharing systems and cloud computing. A cloud-based and learner-centered access control mechanism suitable for multi-user digital sharing was developed. The proposed mechanism resolves the problems concerning multi-user access requests in cloud environments and dynamic updating in digital-sharing systems, thereby reducing the complexity of security management.}, } @article {pmid31234280, year = {2019}, author = {Ma, K and Bagula, A and Nyirenda, C and Ajayi, O}, title = {An IoT-Based Fog Computing Model.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {12}, pages = {}, pmid = {31234280}, issn = {1424-8220}, abstract = {The internet of things (IoT) and cloud computing are two technologies which have recently changed both the academia and industry and impacted our daily lives in different ways. However, despite their impact, both technologies have their shortcomings. Though being cheap and convenient, cloud services consume a huge amount of network bandwidth. Furthermore, the physical distance between data source(s) and the data centre makes delays a frequent problem in cloud computing infrastructures. Fog computing has been proposed as a distributed service computing model that provides a solution to these limitations. It is based on a para-virtualized architecture that fully utilizes the computing functions of terminal devices and the advantages of local proximity processing. This paper proposes a multi-layer IoT-based fog computing model called IoT-FCM, which uses a genetic algorithm for resource allocation between the terminal layer and fog layer and a multi-sink version of the least interference beaconing protocol (LIBP) called least interference multi-sink protocol (LIMP) to enhance the fault-tolerance/robustness and reduce energy consumption of a terminal layer. Simulation results show that compared to the popular max-min and fog-oriented max-min, IoT-FCM performs better by reducing the distance between terminals and fog nodes by at least 38% and reducing energy consumed by an average of 150 KWh while being at par with the other algorithms in terms of delay for high number of tasks.}, } @article {pmid31227823, year = {2019}, author = {Nath, T and Mathis, A and Chen, AC and Patel, A and Bethge, M and Mathis, MW}, title = {Using DeepLabCut for 3D markerless pose estimation across species and behaviors.}, journal = {Nature protocols}, volume = {14}, number = {7}, pages = {2152-2176}, doi = {10.1038/s41596-019-0176-0}, pmid = {31227823}, issn = {1750-2799}, mesh = {Algorithms ; Animals ; Behavior, Animal/*physiology ; Humans ; Imaging, Three-Dimensional/*methods ; Programming Languages ; *Software ; User-Computer Interface ; *Video Recording ; Workflow ; }, abstract = {Noninvasive behavioral tracking of animals during experiments is critical to many scientific pursuits. Extracting the poses of animals without using markers is often essential to measuring behavioral effects in biomechanics, genetics, ethology, and neuroscience. However, extracting detailed poses without markers in dynamically changing backgrounds has been challenging. We recently introduced an open-source toolbox called DeepLabCut that builds on a state-of-the-art human pose-estimation algorithm to allow a user to train a deep neural network with limited training data to precisely track user-defined features that match human labeling accuracy. Here, we provide an updated toolbox, developed as a Python package, that includes new features such as graphical user interfaces (GUIs), performance improvements, and active-learning-based network refinement. We provide a step-by-step procedure for using DeepLabCut that guides the user in creating a tailored, reusable analysis pipeline with a graphical processing unit (GPU) in 1-12 h (depending on frame size). Additionally, we provide Docker environments and Jupyter Notebooks that can be run on cloud resources such as Google Colaboratory.}, } @article {pmid31226745, year = {2019}, author = {da Silva, HW and Venâncio Neto, AJ}, title = {Resilient Multiuser Session Control in Softwarized Fog-Supported Internet of Moving Thing Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {12}, pages = {}, pmid = {31226745}, issn = {1424-8220}, abstract = {The combination of IoT and mobility promises to open a new frontier of innovations in smart environments, through the advent of the Internet of Moving Things (IoMT) paradigm. In IoMT, an array of IoT devices leverage IP-based mobile connectivity to provide a vast range of data ubiquitously. The IoMT realization will foster smart environments at unprecedented levels, by efficiently affording services and applications whereby today's technologies make their efficiency unfeasible, such as autonomous driving and in-ambulance remotely-assisted patient. IoMT-supported mission-critical applications push computing and networking requirements to totally new levels that must be met, raising the need for refined approaches that advance beyond existing technologies. In light of this, this paper proposes the Resilient MultiUser Session Control (ReMUSiC) framework, which deploys emerging softwarization and cloudification technologies to afford flexible, optimized and self-organized control plane perspectives. ReMUSiC extends our previous work through the following innovations. A quality-oriented resilience mechanism is capable of responding to network dynamics events (failure and mobility) by readapting IoMT multiuser mobile sessions. A softwarized networking control plane that allows to, at runtime, both fetch current network state and set up resources in the attempt to always keep affected IoMT multiuser mobile sessions best-connected and best-served. A cloudification approach allows a robust environment, through which cloud- and fog-systems interwork to cater to performance-enhanced capabilities. The IoMT's suitability and performance impacts by ReMUSiC framework use are assessed through real testbed prototyping. Impact analysis in Quality of Service (QoS) performance and perceived Quality of Experience (QoE), demonstrate the remarkable abilities of the ReMUSiC framework, over a related approach, in keeping IoMT multiuser mobile sessions always best-connected and best-served.}, } @article {pmid31226112, year = {2019}, author = {Xu, J and Du, X and Cai, W and Zhu, C and Chen, Y}, title = {MeURep: A novel user reputation calculation approach in personalized cloud services.}, journal = {PloS one}, volume = {14}, number = {6}, pages = {e0217933}, pmid = {31226112}, issn = {1932-6203}, mesh = {*Cloud Computing ; Humans ; *Models, Theoretical ; Reproducibility of Results ; *Software ; Time Factors ; }, abstract = {User reliability is notably crucial for personalized cloud services. In cloud computing environments, large amounts of cloud services are provided for users. With the exponential increase in number of cloud services, it is difficult for users to select the appropriate services from equivalent or similar candidate services. The quality-of-service (QoS) has become an important criterion for selection, and the users can conduct personalized selection according to the observed QoS data of other users; however, it is difficult to ensure that the users are reliable. Actually, unreliable users may provide unreliable QoS data and have negative effects on the personalized cloud service selection. Therefore, how to determine reliable QoS data for personalized cloud service selection remains a significant problem. To measure the reliability for each user, we present a cloud service selection framework based on user reputation and propose a new user reputation calculation approach, which is named MeURep and includes L1-MeURep and L2-MeURep. Experiments are conducted, and the results confirm that MeURep has higher efficiency than previously proposed approaches.}, } @article {pmid31222199, year = {2019}, author = {Ohta, T and Tanjo, T and Ogasawara, O}, title = {Accumulating computational resource usage of genomic data analysis workflow to optimize cloud computing instance selection.}, journal = {GigaScience}, volume = {8}, number = {4}, pages = {}, pmid = {31222199}, issn = {2047-217X}, mesh = {*Cloud Computing ; Computational Biology/*methods ; Genomics/*methods ; High-Throughput Nucleotide Sequencing ; *Software ; Workflow ; }, abstract = {BACKGROUND: Container virtualization technologies such as Docker are popular in the bioinformatics domain because they improve the portability and reproducibility of software deployment. Along with software packaged in containers, the standardized workflow descriptors Common Workflow Language (CWL) enable data to be easily analyzed on multiple computing environments. These technologies accelerate the use of on-demand cloud computing platforms, which can be scaled according to the quantity of data. However, to optimize the time and budgetary restraints of cloud usage, users must select a suitable instance type that corresponds to the resource requirements of their workflows.

RESULTS: We developed CWL-metrics, a utility tool for cwltool (the reference implementation of CWL), to collect runtime metrics of Docker containers and workflow metadata to analyze workflow resource requirements. To demonstrate the use of this tool, we analyzed 7 transcriptome quantification workflows on 6 instance types. The results revealed that choice of instance type can deliver lower financial costs and faster execution times using the required amount of computational resources.

CONCLUSIONS: CWL-metrics can generate a summary of resource requirements for workflow executions, which can help users to optimize their use of cloud computing by selecting appropriate instances. The runtime metrics data generated by CWL-metrics can also help users to share workflows between different workflow management frameworks.}, } @article {pmid31217669, year = {2019}, author = {Vo, H and Kong, J and Teng, D and Liang, Y and Aji, A and Teodoro, G and Wang, F}, title = {MaReIA: A Cloud MapReduce Based High Performance Whole Slide Image Analysis Framework.}, journal = {Distributed and parallel databases}, volume = {37}, number = {2}, pages = {251-272}, pmid = {31217669}, issn = {1573-7578}, support = {K25 CA181503/CA/NCI NIH HHS/United States ; }, abstract = {Recent advancements in systematic analysis of high resolution whole slide images have increase efficiency of diagnosis, prognosis and prediction of cancer and important diseases. Due to the enormous sizes and dimensions of whole slide images, the analysis requires extensive computing resources which are not commonly available. Images have to be tiled for processing due to computer memory limitations, which lead to inaccurate results due to the ignorance of boundary crossing objects. Thus, we propose a generic and highly scalable cloud-based image analysis framework for whole slide images. The framework enables parallelized integration of image analysis steps, such as segmentation and aggregation of micro-structures in a single pipeline, and generation of final objects manageable by databases. The core concept relies on the abstraction of objects in whole slide images as different classes of spatial geometries, which in turn can be handled as text based records in MapReduce. The framework applies an overlapping partitioning scheme on images, and provides parallelization of tiling and image segmentation based on MapReduce architecture. It further provides robust object normalization, graceful handling of boundary objects with an efficient spatial indexing based matching method to generate accurate results. Our experiments on Amazon EMR show that MaReIA is highly scalable, generic and extremely cost effective by benchmark tests.}, } @article {pmid31213035, year = {2019}, author = {Yuan, C and Sun, X}, title = {Server Consolidation Based on Culture Multiple-Ant-Colony Algorithm in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {12}, pages = {}, pmid = {31213035}, issn = {1424-8220}, support = {18YDYGHZ00040//Science and Technology Commission of Tianjin Municipality/ ; }, abstract = {High-energy consumption in data centers has become a critical issue. The dynamic server consolidation has significant effects on saving energy of a data center. An effective way to consolidate virtual machines is to migrate virtual machines in real time so that some light load physical machines can be turned off or switched to low-power mode. The present challenge is to reduce the energy consumption of cloud data centers. In this paper, for the first time, a server consolidation algorithm based on the culture multiple-ant-colony algorithm was proposed for dynamic execution of virtual machine migration, thus reducing the energy consumption of cloud data centers. The server consolidation algorithm based on the culture multiple-ant-colony algorithm (CMACA) finds an approximate optimal solution through a specific target function. The simulation results show that the proposed algorithm not only reduces the energy consumption but also reduces the number of virtual machine migration.}, } @article {pmid31212670, year = {2019}, author = {Barros, EBC and Filho, DML and Batista, BG and Kuehne, BT and Peixoto, MLM}, title = {Fog Computing Model to Orchestrate the Consumption and Production of Energy in Microgrids.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {11}, pages = {}, pmid = {31212670}, issn = {1424-8220}, abstract = {Energy advancement and innovation have generated several challenges for large modernized cities, such as the increase in energy demand, causing the appearance of the small power grid with a local source of supply, called the Microgrid. A Microgrid operates either connected to the national centralized power grid or singly, as a power island mode. Microgrids address these challenges using sensing technologies and Fog-Cloudcomputing infrastructures for building smart electrical grids. A smart Microgrid can be used to minimize the power demand problem, but this solution needs to be implemented correctly so as not to increase the amount of data being generated. Thus, this paper proposes the use of Fog computing to help control power demand and manage power production by eliminating the high volume of data being passed to the Cloud and decreasing the requests' response time. The GridLab-d simulator was used to create a Microgrid, where it is possible to exchange information between consumers and generators. Thus, to understand the potential of the Fog in this scenario, a performance evaluation is performed to verify how factors such as residence number, optimization algorithms, appliance shifting, and energy sources may influence the response time and resource usage.}, } @article {pmid31206528, year = {2019}, author = {DeLancey, ER and Kariyeva, J and Bried, JT and Hird, JN}, title = {Large-scale probabilistic identification of boreal peatlands using Google Earth Engine, open-access satellite data, and machine learning.}, journal = {PloS one}, volume = {14}, number = {6}, pages = {e0218165}, pmid = {31206528}, issn = {1932-6203}, mesh = {Alberta ; Biodiversity ; Carbon/chemistry ; Climate Change ; Conservation of Natural Resources/*methods ; Earth, Planet ; Ecosystem ; Machine Learning ; Radar ; Taiga ; Wetlands ; }, abstract = {Freely-available satellite data streams and the ability to process these data on cloud-computing platforms such as Google Earth Engine have made frequent, large-scale landcover mapping at high resolution a real possibility. In this paper we apply these technologies, along with machine learning, to the mapping of peatlands-a landcover class that is critical for preserving biodiversity, helping to address climate change impacts, and providing ecosystem services, e.g., carbon storage-in the Boreal Forest Natural Region of Alberta, Canada. We outline a data-driven, scientific framework that: compiles large amounts of Earth observation data sets (radar, optical, and LiDAR); examines the extracted variables for suitability in peatland modelling; optimizes model parameterization; and finally, predicts peatland occurrence across a large boreal area (397, 958 km2) of Alberta at 10 m spatial resolution (equalling 3.9 billion pixels across Alberta). The resulting peatland occurrence model shows an accuracy of 87% and a kappa statistic of 0.57 when compared to our validation data set. Differentiating peatlands from mineral wetlands achieved an accuracy of 69% and kappa statistic of 0.37. This data-driven approach is applicable at large geopolitical scales (e.g., provincial, national) for wetland and landcover inventories that support long-term, responsible resource management.}, } @article {pmid31201700, year = {2020}, author = {Makinde, EO and Oyelade, EO}, title = {Land cover mapping using Sentinel-1 SAR and Landsat 8 imageries of Lagos State for 2017.}, journal = {Environmental science and pollution research international}, volume = {27}, number = {1}, pages = {66-74}, pmid = {31201700}, issn = {1614-7499}, mesh = {Environmental Monitoring/*methods ; Humans ; Nigeria ; *Radar ; *Satellite Imagery ; }, abstract = {For several years, Landsat imageries have been used for land cover mapping analysis. However, cloud cover constitutes a major obstacle to land cover classification in coastal tropical regions including Lagos State. In this work, a land cover appearance for Lagos State is examined using Sentinel-1 synthetic aperture radar (SAR) and Land Satellite 8 (Landsat 8) imageries. To this aim, a Sentinel-1 SAR dual-pol (VV+VH) Interferometric Wide swath mode (IW) data orbit for 2017 and a Landsat 8 Operational Land Imager (OLI) for 2017 over Lagos State were acquired and analysed. The Sentinel-1 imagery was calibrated and terrain corrected using a SRTM 3Sec DEM. Maximum likelihood classification algorithm was performed. A supervised pixel-based imagery classification to classify the dataset using training points selected from RGB combination of VV and VH polarizations was applied. Accuracy assessment was performed using test data collected from high-resolution imagery of Google Earth to determine the overall classification accuracy and Kappa coefficient. The Landsat 8 was orthorectified and maximum likelihood classification algorithm also performed. The results for Sentinel-1 include an RGB composite of the imagery, classified imagery, with overall accuracy calculated as 0.757, while the kappa value was evaluated to be about 0.719. Also, the Landsat 8 includes a RBG composite of the imagery, classified imagery, but an overall accuracy of 0.908 and a kappa value of 0.876. It is concluded that Sentinel 1 SAR result has been effectively exploited for producing acceptable accurate land cover map of Lagos State with relevant advantages for areas with cloud cover. In addition, the Landsat 8 result reported a high accuracy assessment values with finer visual land cover map appearance.}, } @article {pmid31198866, year = {2019}, author = {Wang, LY and Lew, SL and Lau, SH and Leow, MC}, title = {Usability factors predicting continuance of intention to use cloud e-learning application.}, journal = {Heliyon}, volume = {5}, number = {6}, pages = {e01788}, pmid = {31198866}, issn = {2405-8440}, abstract = {In this ever-progressive digital era, conventional e-learning methods have become inadequate to handle the requirements of upgraded learning processes especially in the higher education. E-learning adopting Cloud computing is able to transform e-learning into a flexible, shareable, content-reusable, and scalable learning methodology. Despite plentiful Cloud e-learning frameworks have been proposed across literature, limited researches have been conducted to study the usability factors predicting continuance intention to use Cloud e-learning applications. In this study, five usability factors namely Computer Self Efficacy (CSE), Enjoyment (E), Perceived Ease of Use (PEU), Perceived Usefulness (PU), and User Perception (UP) have been identified for factor analysis. All the five independent variables were hypothesized to be positively associated to a dependent variable namely Continuance Intention (CI). A survey was conducted on 170 IT students in one of the private universities in Malaysia. The students were given one trimester to experience the usability of Cloud e-Learning application. As an instrument to analyse the usability factors towards continuance intention of the application, a questionnaire consisting thirty questions was formulated and used. The collected data were analysed using SMARTPLS 3.0. The results obtained from this study observed that computer self-efficacy and enjoyment as intrinsic motivations significantly predict continuance intention, while perceived ease of use, perceived usefulness and user perception were insignificant. This outcome implies that computer self-efficacy and enjoyment significantly affect the willingness of students to continue using Cloud e-learning application in their studies. The discussions and implications of this study are vital for researchers and practitioners of educational technologies in higher education.}, } @article {pmid31197310, year = {2020}, author = {Jalili, V and Afgan, E and Taylor, J and Goecks, J}, title = {Cloud bursting galaxy: federated identity and access management.}, journal = {Bioinformatics (Oxford, England)}, volume = {36}, number = {1}, pages = {1-9}, pmid = {31197310}, issn = {1367-4811}, support = {U24 CA231877/CA/NCI NIH HHS/United States ; U41 HG006620/HG/NHGRI NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Computational Biology/standards ; *Computer Security/trends ; Software ; }, abstract = {MOTIVATION: Large biomedical datasets, such as those from genomics and imaging, are increasingly being stored on commercial and institutional cloud computing platforms. This is because cloud-scale computing resources, from robust backup to high-speed data transfer to scalable compute and storage, are needed to make these large datasets usable. However, one challenge for large-scale biomedical data on the cloud is providing secure access, especially when datasets are distributed across platforms. While there are open Web protocols for secure authentication and authorization, these protocols are not in wide use in bioinformatics and are difficult to use for even technologically sophisticated users.

RESULTS: We have developed a generic and extensible approach for securely accessing biomedical datasets distributed across cloud computing platforms. Our approach combines OpenID Connect and OAuth2, best-practice Web protocols for authentication and authorization, together with Galaxy (https://galaxyproject.org), a web-based computational workbench used by thousands of scientists across the world. With our enhanced version of Galaxy, users can access and analyze data distributed across multiple cloud computing providers without any special knowledge of access/authorization protocols. Our approach does not require users to share permanent credentials (e.g. username, password, API key), instead relying on automatically generated temporary tokens that refresh as needed. Our approach is generalizable to most identity providers and cloud computing platforms. To the best of our knowledge, Galaxy is the only computational workbench where users can access biomedical datasets across multiple cloud computing platforms using best-practice Web security approaches and thereby minimize risks of unauthorized data access and credential use.

Freely available for academic and commercial use under the open-source Academic Free License (https://opensource.org/licenses/AFL-3.0) from the following Github repositories: https://github.com/galaxyproject/galaxy and https://github.com/galaxyproject/cloudauthz.}, } @article {pmid31197309, year = {2019}, author = {Hammoud, M and Santos, CMD and Gois, JP}, title = {iTUPA: an online automated application to perform Topographic-Unit Parsimony Analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {35}, number = {22}, pages = {4818-4820}, doi = {10.1093/bioinformatics/btz487}, pmid = {31197309}, issn = {1367-4811}, mesh = {*Biodiversity ; *Software ; }, abstract = {SUMMARY: iTUPA is a free online application for automatizing the Topographic-Unit Parsimony Analysis (TUPA), which identifies areas of endemism based on topography. iTUPA generates species-occurrences matrices based on user-defined topographic units (TUs) and provides a parsimony analysis of the generated matrix. We tested iTUPA after a proposal of regionalization for the Brazilian Atlantic Forest. iTUPA can handle millions of species registers simultaneously and uses Google Earth high-definition maps to visually explore the endemism data. We believe iTUPA is a useful tool for further discussions on biodiversity conservation.

iTUPA is hosted on Google cloud and freely available at http://nuvem.ufabc.edu.br/itupa. iTUPA is implemented using R (version 3.5.1), with RStudio 1.1.453 used as the implementation IDE, Shiny 1.1.0 web framework, and Google Maps® API version 3.36.}, } @article {pmid31194747, year = {2019}, author = {Zhang, J and Li, L and Tang, Y and Luo, S and Yang, Y and Xin, Y}, title = {Secure two-party computation of solid triangle area and tetrahedral volume based on cloud platform.}, journal = {PloS one}, volume = {14}, number = {6}, pages = {e0217067}, pmid = {31194747}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; }, abstract = {With the emergence and widespread application of cloud computing, the use of cloud platforms to solve the problem of secure multi-party computation has emerged as a new research direction. The traditional computation of a solid geometry is performed through mutual interactions between two parties, which is not suitable in an untrusted cloud computing environment. In this paper, we first design a basic protocol for a secure Euclidean distance calculation that is suitable for cloud platforms and can serve as a building block for other protocols on cloud platforms. Using the solution of the Euclidean distance problem as such a building block, we provide a new method that converts the problems of calculating solid triangular areas and solid tetrahedral volumes into the calculation of distances and determinants in three-dimensional space. Then, we discuss solid point-line distance calculations, which extent the idea of the spatial geometry security problem. We present protocols for the above problems and prove that the proposed protocols can resist conspiracy among users and the untrusted cloud platform so that they can effectively ensure the privacy of the users. We also analyze the performances of these solutions. The analysis results show that our scheme is more versatile.}, } @article {pmid31194095, year = {2019}, author = {Shahbazi, M and Ménard, P and Sohn, G and Théau, J}, title = {Unmanned aerial image dataset: Ready for 3D reconstruction.}, journal = {Data in brief}, volume = {25}, number = {}, pages = {103962}, pmid = {31194095}, issn = {2352-3409}, abstract = {Unmanned aerial vehicles (UAVs) have become popular platforms for collecting various types of geospatial data for various mapping, monitoring and modelling applications. With the advancement of imaging and computing technologies, a vast variety of photogrammetric, computer-vision and, nowadays, end-to-end learning workflows are introduced to produce three-dimensional (3D) information in form of digital surface and terrain models, textured meshes, rectified mosaics, CAD models, etc. These 3D products might be used in applications where accuracy and precision play a vital role, e.g. structural health monitoring. Therefore, extensive tests against data with relevant characteristics and reliable ground-truth are required to assess and ensure the performance of 3D modelling workflows. This article describes the images collected by a customized unmanned aerial vehicle (UAV) system from an open-pit gravel mine accompanied with additional data that will allow implementing and evaluating any structure-from-motion or photogrammetric approach for sparse or dense 3D reconstruction. This dataset includes total of 158 high-quality images captured with more than 80% endlap and spatial resolution higher than 1.5 cm, the 3D coordinates of 109 ground control points and checkpoints, 2D coordinates of more than 40K corresponding points among the images, a subset of 25 multi-view stereo images selected from an area of approximately 30 m × 40 m within the scene accompanied with a dense point cloud measured by a terrestrial laser scanner.}, } @article {pmid31187288, year = {2019}, author = {Mohammed, KI and Zaidan, AA and Zaidan, BB and Albahri, OS and Alsalem, MA and Albahri, AS and Hadi, A and Hashim, M}, title = {Real-Time Remote-Health Monitoring Systems: a Review on Patients Prioritisation for Multiple-Chronic Diseases, Taxonomy Analysis, Concerns and Solution Procedure.}, journal = {Journal of medical systems}, volume = {43}, number = {7}, pages = {223}, pmid = {31187288}, issn = {1573-689X}, support = {FRGS/2016-0066-109-02//UPSI/ ; }, mesh = {*Chronic Disease ; *Comorbidity ; Humans ; *Monitoring, Physiologic ; Telemedicine ; *Wireless Technology ; }, abstract = {Remotely monitoring a patient's condition is a serious issue and must be addressed. Remote health monitoring systems (RHMS) in telemedicine refers to resources, strategies, methods and installations that enable doctors or other medical professionals to work remotely to consult, diagnose and treat patients. The goal of RHMS is to provide timely medical services at remote areas through telecommunication technologies. Through major advancements in technology, particularly in wireless networking, cloud computing and data storage, RHMS is becoming a feasible aspect of modern medicine. RHMS for the prioritisation of patients with multiple chronic diseases (MCDs) plays an important role in sustainably providing high-quality healthcare services. Further investigations are required to highlight the limitations of the prioritisation of patients with MCDs over a telemedicine environment. This study introduces a comprehensive and inclusive review on the prioritisation of patients with MCDs in telemedicine applications. Furthermore, it presents the challenges and open issues regarding patient prioritisation in telemedicine. The findings of this study are as follows: (1) The limitations and problems of existing patients' prioritisation with MCDs are presented and emphasised. (2) Based on the analysis of the academic literature, an accurate solution for remote prioritisation in a large scale of patients with MCDs was not presented. (3) There is an essential need to produce a new multiple-criteria decision-making theory to address the current problems in the prioritisation of patients with MCDs.}, } @article {pmid31185495, year = {2019}, author = {Bourgey, M and Dali, R and Eveleigh, R and Chen, KC and Letourneau, L and Fillon, J and Michaud, M and Caron, M and Sandoval, J and Lefebvre, F and Leveque, G and Mercier, E and Bujold, D and Marquis, P and Van, PT and Anderson de Lima Morais, D and Tremblay, J and Shao, X and Henrion, E and Gonzalez, E and Quirion, PO and Caron, B and Bourque, G}, title = {GenPipes: an open-source framework for distributed and scalable genomic analyses.}, journal = {GigaScience}, volume = {8}, number = {6}, pages = {}, pmid = {31185495}, issn = {2047-217X}, support = {MOP-115090//CIHR/Canada ; }, mesh = {DNA Methylation ; Epigenomics/methods ; Genomics/*methods ; Humans ; Metagenomics/methods ; Sequence Analysis, DNA/methods ; Sequence Analysis, RNA/methods ; *Software ; }, abstract = {BACKGROUND: With the decreasing cost of sequencing and the rapid developments in genomics technologies and protocols, the need for validated bioinformatics software that enables efficient large-scale data processing is growing.

FINDINGS: Here we present GenPipes, a flexible Python-based framework that facilitates the development and deployment of multi-step workflows optimized for high-performance computing clusters and the cloud. GenPipes already implements 12 validated and scalable pipelines for various genomics applications, including RNA sequencing, chromatin immunoprecipitation sequencing, DNA sequencing, methylation sequencing, Hi-C, capture Hi-C, metagenomics, and Pacific Biosciences long-read assembly. The software is available under a GPLv3 open source license and is continuously updated to follow recent advances in genomics and bioinformatics. The framework has already been configured on several servers, and a Docker image is also available to facilitate additional installations.

CONCLUSIONS: GenPipes offers genomics researchers a simple method to analyze different types of data, customizable to their needs and resources, as well as the flexibility to create their own workflows.}, } @article {pmid31184076, year = {2019}, author = {Wang, J and Ma, R and Qu, Y}, title = {[Progress of Telerehabilitation Techniques in Stroke Patients with Lower Extremity Dysfunction].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {43}, number = {3}, pages = {188-191}, doi = {10.3969/j.issn.1671-7104.2019.03.009}, pmid = {31184076}, issn = {1671-7104}, mesh = {China ; Humans ; *Leg/physiopathology ; *Stroke ; *Stroke Rehabilitation ; *Telerehabilitation/instrumentation ; }, abstract = {Stroke has the characteristics of high prevalence, high morbidity, and high mortality, which seriously affects life quality of patients and also creates a huge social burden. Telerehabilitation technology is on the basis of traditional rehabilitation equipment and it integrates with cloud computing and big data technologies. It provides a new way for rehabilitation by providing comprehensive rehabilitation technology and service based on the cloud platform. Therefore, it provides a solution for the situation that the rehabilitation medical resources and the rehabilitation talents in China are relatively insufficient. This article mainly discusses the telerehabilitation technologies of lower extremity motor dysfunction in patients with stroke, the problems and the future development direction.}, } @article {pmid31181691, year = {2019}, author = {Zheng, K and Zheng, K and Fang, F and Yao, H and Yi, Y and Zeng, D}, title = {Real-Time Massive Vector Field Data Processing in Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {11}, pages = {}, pmid = {31181691}, issn = {1424-8220}, support = {No. 2017ZX05036-001-010//National Science and Technology Major Project/ ; No. 2016YFB0502603//National Key R&D Program of China/ ; No. 2018B020207012//Science and Technology Planning Project of Guangdong Province, China./ ; No. 2018YFB1004600//National Key R&D Program of China/ ; }, abstract = {The spread of the sensors and industrial systems has fostered widespread real-time data processing applications. Massive vector field data (MVFD) are generated by vast distributed sensors and are characterized by high distribution, high velocity, and high volume. As a result, computing such kind of data on centralized cloud faces unprecedented challenges, especially on the processing delay due to the distance between the data source and the cloud. Taking advantages of data source proximity and vast distribution, edge computing is ideal for timely computing on MVFD. Therefore, we are motivated to propose an edge computing based MVFD processing framework. In particular, we notice that the high volume feature of MVFD results in high data transmission delay. To solve this problem, we invent Data Fluidization Schedule (DFS) in our framework to reduce the data block volume and the latency on Input/Output (I/O). We evaluated the efficiency of our framework in a practical application on massive wind field data processing for cyclone recognition. The high efficiency our framework was verified by the fact that it significantly outperformed classical big data processing frameworks Spark and MapReduce.}, } @article {pmid31174350, year = {2019}, author = {Zhu, B and Susilo, W and Qin, J and Guo, F and Zhao, Z and Ma, J}, title = {A Secure and Efficient Data Sharing and Searching Scheme in Wireless Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {11}, pages = {}, pmid = {31174350}, issn = {1424-8220}, support = {61772311, 61272091//National Nature Science Foundation of China under Grant/ ; 2019-ZD-03//Open Project of the State Key Laboratory of Information Security, Institute of Information 402 Engineering, Chinese Academy of Sciences/ ; }, abstract = {Wireless sensor networks (WSN) generally utilize cloud computing to store and process sensing data in real time, namely, cloud-assisted WSN. However, the cloud-assisted WSN faces new security challenges, particularly outsourced data confidentiality. Data Encryption is a fundamental approach but it limits target data retrieval in massive encrypted data. Public key encryption with keyword search (PEKS) enables a data receiver to retrieve encrypted data containing some specific keyword in cloud-assisted WSN. However, the traditional PEKS schemes suffer from an inherent problem, namely, the keyword guessing attack (KGA). KGA includes off-line KGA and on-line KGA. To date, the existing literature on PEKS cannot simultaneously resist both off-line KGA and on-line KGA performed by an external adversary and an internal adversary. In this work, we propose a secure and efficient data sharing and searching scheme to address the aforementioned problem such that our scheme is secure against both off-line KGA and on-line KGA performed by external and internal adversaries. We would like to stress that our scheme simultaneously achieves document encryption/decryption and keyword search functions. We also prove our scheme achieves keyword security and document security. Furthermore, our scheme is more efficient than previous schemes by eliminating the pairing computation.}, } @article {pmid31146339, year = {2019}, author = {C da Silva, RA and S da Fonseca, NL}, title = {On the Location of Fog Nodes in Fog-Cloud Infrastructures.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {11}, pages = {}, pmid = {31146339}, issn = {1424-8220}, support = {15/24494-8//Fundação de Amparo à Pesquisa do Estado de São Paulo/ ; 140464/2018-2//Conselho Nacional de Desenvolvimento Científico e Tecnológico/ ; 1545027//Coordenação de Aperfeiçoamento de Pessoal de Nível Superior/ ; }, abstract = {In the fog computing paradigm, fog nodes are placed on the network edge to meet end-user demands with low latency, providing the possibility of new applications. Although the role of the cloud remains unchanged, a new network infrastructure for fog nodes must be created. The design of such an infrastructure must consider user mobility, which causes variations in workload demand over time in different regions. Properly deciding on the location of fog nodes is important to reduce the costs associated with their deployment and maintenance. To meet these demands, this paper discusses the problem of locating fog nodes and proposes a solution which considers time-varying demands, with two classes of workload in terms of latency. The solution was modeled as a mixed-integer linear programming formulation with multiple criteria. An evaluation with real data showed that an improvement in end-user service can be obtained in conjunction with the minimization of the costs by deploying fewer servers in the infrastructure. Furthermore, results show that costs can be further reduced if a limited blocking of requests is tolerated.}, } @article {pmid31145694, year = {2019}, author = {Allmer, J}, title = {Towards an Internet of Science.}, journal = {Journal of integrative bioinformatics}, volume = {16}, number = {3}, pages = {}, pmid = {31145694}, issn = {1613-4516}, mesh = {*Computational Biology ; *Internet ; *Software ; *User-Computer Interface ; *Workflow ; }, abstract = {Big data and complex analysis workflows (pipelines) are common issues in data driven science such as bioinformatics. Large amounts of computational tools are available for data analysis. Additionally, many workflow management systems to piece together such tools into data analysis pipelines have been developed. For example, more than 50 computational tools for read mapping are available representing a large amount of duplicated effort. Furthermore, it is unclear whether these tools are correct and only a few have a user base large enough to have encountered and reported most of the potential problems. Bringing together many largely untested tools in a computational pipeline must lead to unpredictable results. Yet, this is the current state. While presently data analysis is performed on personal computers/workstations/clusters, the future will see development and analysis shift to the cloud. None of the workflow management systems is ready for this transition. This presents the opportunity to build a new system, which will overcome current duplications of effort, introduce proper testing, allow for development and analysis in public and private clouds, and include reporting features leading to interactive documents.}, } @article {pmid31141561, year = {2019}, author = {Ma, H and Guo, X and Ping, Y and Wang, B and Yang, Y and Zhang, Z and Zhou, J}, title = {PPCD: Privacy-preserving clinical decision with cloud support.}, journal = {PloS one}, volume = {14}, number = {5}, pages = {e0217349}, pmid = {31141561}, issn = {1932-6203}, mesh = {Algorithms ; Cloud Computing ; Computer Security/trends ; Confidentiality/*ethics/standards ; Decision Making ; *Decision Making, Computer-Assisted ; Disclosure ; Electronic Health Records ; Humans ; Machine Learning ; Medical Records ; Privacy ; }, abstract = {With the prosperity of machine learning and cloud computing, meaningful information can be mined from mass electronic medical data which help physicians make proper disease diagnosis for patients. However, using medical data and disease information of patients frequently raise privacy concerns. In this paper, based on single-layer perceptron, we propose a scheme of privacy-preserving clinical decision with cloud support (PPCD), which securely conducts disease model training and prediction for the patient. Each party learns nothing about the other's private information. In PPCD, a lightweight secure multiplication is presented and introduced to improve the model training. Security analysis and experimental results on real data confirm the high accuracy of disease prediction achieved by the proposed PPCD without the risk of privacy disclosure.}, } @article {pmid31137767, year = {2019}, author = {Cai, T and Chen, F and He, Q and Niu, D and Wang, J}, title = {The Matrix KV Storage System Based on NVM Devices.}, journal = {Micromachines}, volume = {10}, number = {5}, pages = {}, pmid = {31137767}, issn = {2072-666X}, support = {61806086//National Natural Science Foundation of China/ ; 2016M601737//China Postdoctoral Science Foundation/ ; }, abstract = {The storage device based on Nonvolatile Memory (NVM devices) has high read/write speed and embedded processor. It is a useful way to improve the efficiency of Key-Value (KV) application. However it still has some limitations such as limited capacity, poorer computing power compared with CPU, and complex I/O system software. Thus it is not an effective way to construct KV storage system with NVM devices directly. We analyze the characteristics of NVM devices and demands of KV application to design the matrix KV storage system based on NVM Devices. The group collaboration management based on Bloomfilter, intragroup optimization based on competition, embedded KV management based on B+-tree, and the new interface of KV storage system are presented. Then, the embedded processor in the NVM device and CPU can be comprehensively utilized to construct a matrix KV pair management system. It can improve the storage and management efficiency of massive KV pairs, and it can also support the efficient execution of KV applications. A prototype is implemented named MKVS (the matrix KV storage system based on NVM devices) to test with YCSB (Yahoo! Cloud System Benchmark) and to compare with the current in-memory KV store. The results show that MKVS can improve the throughput by 5.98 times, and reduce the 99.7% read latency and 77.2% write latency.}, } @article {pmid31137517, year = {2019}, author = {Fernández-Ahumada, LM and Ramírez-Faz, J and Torres-Romero, M and López-Luque, R}, title = {Proposal for the Design of Monitoring and Operating Irrigation Networks Based on IoT, Cloud Computing and Free Hardware Technologies.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {10}, pages = {}, pmid = {31137517}, issn = {1424-8220}, abstract = {In recent decades, considerable efforts have been devoted to process automation in agriculture. Regarding irrigation systems, this demand has found several difficulties, including the lack of communication networks and the large distances to electricity supply points. With the recent implementation of LPWAN wireless communication networks (SIGFOX, LoraWan, and NBIoT), and the expanding market of electronic controllers based on free software and hardware (i.e., Arduino, Raspberry, ESP, etc.) with low energy requirements, new perspectives have appeared for the automation of agricultural irrigation networks. This paper presents a low-cost solution for automatic cloud-based irrigation. In this paper, it is proposed the design of a node network based on microcontroller ESP32-Lora and Internet connection through SIGFOX network. The results obtained show the stability and robustness of the designed system.}, } @article {pmid31137231, year = {2019}, author = {Wang, X and Zhang, JB and Zhang, A and Ren, JC}, title = {TKRD: Trusted kernel rootkit detection for cybersecurity of VMs based on machine learning and memory forensic analysis.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {16}, number = {4}, pages = {2650-2667}, doi = {10.3934/mbe.2019132}, pmid = {31137231}, issn = {1551-0018}, mesh = {Algorithms ; Area Under Curve ; Bayes Theorem ; Cloud Computing ; *Computer Security ; Computers ; Decision Making ; False Positive Reactions ; *Machine Learning ; *Support Vector Machine ; }, abstract = {The promotion of cloud computing makes the virtual machine (VM) increasingly a target of malware attacks in cybersecurity such as those by kernel rootkits. Memory forensic, which observes the malicious tracks from the memory aspect, is a useful way for malware detection. In this paper, we propose a novel TKRD method to automatically detect kernel rootkits in VMs from private cloud, by combining VM memory forensic analysis with bio-inspired machine learning technology. Malicious features are extracted from the memory dumps of the VM through memory forensic analysis method. Based on these features, various machine learning classifiers are trained including Decision tree, Rule based classifiers, Bayesian and Support vector machines (SVM). The experiment results show that the Random Forest classifier has the best performance which can effectively detect unknown kernel rootkits with an Accuracy of 0.986 and an AUC value (the area under the receiver operating characteristic curve) of 0.998.}, } @article {pmid31137190, year = {2019}, author = {Ren, YJ and Leng, Y and Cheng, YP and Wang, J}, title = {Secure data storage based on blockchain and coding in edge computing.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {16}, number = {4}, pages = {1874-1892}, doi = {10.3934/mbe.2019091}, pmid = {31137190}, issn = {1551-0018}, mesh = {Algorithms ; *Blockchain ; Cloud Computing ; *Computer Security ; Computers ; Genome, Human ; Humans ; Informatics/*methods ; *Information Storage and Retrieval ; Internet ; Reproducibility of Results ; Software ; }, abstract = {Edge computing is an important tool for smart computing, which brings convenience to data processing as well as security problems. In particular, the security of data storage under edge computing has become an obstacle to its widespread use. To solve the problem, the mechanism combing blockchain with regeneration coding is proposed to improve the security and reliability of stored data under edge computing. Our contribution is as follows. 1) According to the three-tier edge computing architecture and data security storage requirements, we proposed hybrid storage architecture and model specifically adapted to edge computing. 2) Making full use of the data storage advantages of edge network devices and cloud storage servers, we build a global blockchain in the cloud service layer and local blockchain is built on the terminals of the Internet of things. Moreover, the regeneration coding is utilized to further improve the reliability of data storage in blockchains. 3) Our scheme provides a mechanism for periodically validating hash values of data to ensure the integrity of data stored in global blockchain.}, } @article {pmid31133771, year = {2019}, author = {Pritikin, JN and Schmitt, JE and Neale, MC}, title = {Cloud computing for voxel-wise SEM analysis of MRI data.}, journal = {Structural equation modeling : a multidisciplinary journal}, volume = {26}, number = {3}, pages = {470-480}, pmid = {31133771}, issn = {1070-5511}, support = {K01 ES026840/ES/NIEHS NIH HHS/United States ; R01 DA018673/DA/NIDA NIH HHS/United States ; R25 DA026119/DA/NIDA NIH HHS/United States ; }, abstract = {As data collection costs fall and vast quantities of data are collected, data analysis time can become a bottleneck. For massively parallel analyses, cloud computing offers the short-term rental of ample processing power. Recent software innovations have reduced the offort needed to take advantage of cloud computing. To demonstrate, we replicate a voxel-wise examination of the genetic contributions to cortical development by age using evidence from 1,748 MRI scans. Specifically, we employ off-the-shelf Kubernetes software that permits us to re-run our analyses using almost the same computer code as was published in the original article. Large, well funded institutions may continue to maintain their own computing clusters. However, the modest cost of renting and ease of utilizing cloud computing services makes unprecedented compute power available to all researchers, whether or not affliated with a research institution. We expect this to spur innovation in the sophisticated modeling of large datasets.}, } @article {pmid31127699, year = {2019}, author = {Bhagroo, S and French, SB and Mathews, JA and Nazareth, DP}, title = {Secondary monitor unit calculations for VMAT using parallelized Monte Carlo simulations.}, journal = {Journal of applied clinical medical physics}, volume = {20}, number = {6}, pages = {60-69}, pmid = {31127699}, issn = {1526-9914}, mesh = {Brain Neoplasms/*radiotherapy ; Head and Neck Neoplasms/*radiotherapy ; Humans ; Male ; *Monte Carlo Method ; Particle Accelerators/instrumentation ; *Phantoms, Imaging ; Prostatic Neoplasms/*radiotherapy ; Radiotherapy Dosage ; Radiotherapy Planning, Computer-Assisted/*methods ; Radiotherapy, Intensity-Modulated/*methods ; }, abstract = {We have developed a fast and accurate in-house Monte Carlo (MC) secondary monitor unit (MU) check method, based on the EGSnrc system, for independent verification of volumetric modulated arc therapy (VMAT) treatment planning system dose calculations, in accordance with TG-114 recommendations. For a VMAT treatment plan created for a Varian Trilogy linac, DICOM information was exported from Eclipse. An open-source platform was used to generate input files for dose calculations using the EGSnrc framework. The full VMAT plan simulation employed 10[7] histories, and was parallelized to run on a computer cluster. The resulting 3ddose matrices were converted to the DICOM format using CERR and imported into Eclipse. The method was evaluated using 35 clinical VMAT plans of various treatment sites. For each plan, the doses calculated with the MC approach at four three-dimensional reference points were compared to the corresponding Eclipse calculations, as well as calculations performed using the clinical software package, MUCheck. Each MC arc simulation of 10[7] particles required 13-25 min of total time, including processing and calculation. The average discrepancies in calculated dose values between the MC method and Eclipse were 2.03% (compared to 3.43% for MUCheck) for prostate cases, 2.45% (3.22% for MUCheck) for head and neck cases, 1.7% (5.51% for MUCheck) for brain cases, and 2.84% (5.64% for MUCheck) for miscellaneous cases. Of 276 comparisons, 201 showed greater agreement between the treatment planning system and MC vs MUCheck. The largest discrepancies between MC and MUCheck were found in regions of high dose gradients and heterogeneous densities. By parallelizing the calculations, point-dose accuracies of 2-7%, sufficient for clinical secondary checks, can be achieved in a reasonable amount of time. As computer clusters and/or cloud computing become more widespread, this method will be useful in most clinical setups.}, } @article {pmid31117186, year = {2019}, author = {Pustišek, M and Dolenc, D and Kos, A}, title = {LDAF: Low-Bandwidth Distributed Applications Framework in a Use Case of Blockchain-Enabled IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {10}, pages = {}, pmid = {31117186}, issn = {1424-8220}, abstract = {In this paper, we present Low-Bandwidth Distributed Applications Framework (LDAF)-an application-aware gateway for communication-constrained Internet of things (IoT) devices. A modular approach facilitates connecting to existing cloud backend servers and managing message formats and APIs' native application logic to meet the communication constraints of resource-limited end devices. We investigated options for positioning the LDAF server in fog computing architectures. We demonstrated the approach in three use cases: (i) a simple domain name system (DNS) query from the device to a DNS server, (ii) a complex interaction of a blockchain-based IoT device with a blockchain network, and (iii) difference based patching of binary (system) files at the IoT end devices. In a blockchain smart meter use case we effectively enabled decentralized applications (DApp) for devices that without our solution could not participate in a blockchain network. Employing the more efficient binary content encoding, we reduced the periodic traffic from 16 kB/s to ~1.1 kB/s, i.e., 7% of the initial traffic. With additional optimization of the application protocol in the gateway and message filtering, the periodic traffic was reduced to ~1% of the initial traffic, without any tradeoffs in the application's functionality or security. Using a function of binary difference we managed to reduce the size of the communication traffic to the end device, at least when the binary patch was smaller than the patching file.}, } @article {pmid31112896, year = {2019}, author = {Richter, AN and Khoshgoftaar, TM}, title = {Efficient learning from big data for cancer risk modeling: A case study with melanoma.}, journal = {Computers in biology and medicine}, volume = {110}, number = {}, pages = {29-39}, doi = {10.1016/j.compbiomed.2019.04.039}, pmid = {31112896}, issn = {1879-0534}, mesh = {*Big Data ; *Electronic Health Records ; Humans ; *Machine Learning ; *Melanoma/epidemiology/metabolism/pathology ; *Models, Biological ; Predictive Value of Tests ; Risk Assessment ; Risk Factors ; }, abstract = {BACKGROUND: Building cancer risk models from real-world data requires overcoming challenges in data preprocessing, efficient representation, and computational performance. We present a case study of a cloud-based approach to learning from de-identified electronic health record data and demonstrate its effectiveness for melanoma risk prediction.

METHODS: We used a hybrid distributed and non-distributed approach to computing in the cloud: distributed processing with Apache Spark for data preprocessing and labeling, and non-distributed processing for machine learning model training with scikit-learn. Moreover, we explored the effects of sampling the training dataset to improve computational performance. Risk factors were evaluated using regression weights as well as tree SHAP values.

RESULTS: Among 4,061,172 patients who did not have melanoma through the 2016 calendar year, 10,129 were diagnosed with melanoma within one year. A gradient-boosted classifier achieved the best predictive performance with cross-validation (AUC = 0.799, Sensitivity = 0.753, Specificity = 0.688). Compared to a model built on the original data, a dataset two orders of magnitude smaller could achieve statistically similar or better performance with less than 1% of the training time and cost.

CONCLUSIONS: We produced a model that can effectively predict melanoma risk for a diverse dermatology population in the U.S. by using hybrid computing infrastructure and data sampling. For this de-identified clinical dataset, sampling approaches significantly shortened the time for model building while retaining predictive accuracy, allowing for more rapid machine learning model experimentation on familiar computing machinery. A large number of risk factors (>300) were required to produce the best model.}, } @article {pmid31110969, year = {2019}, author = {Gholamhosseini, L and Sadoughi, F and Safaei, A}, title = {Hospital Real-Time Location System (A Practical Approach in Healthcare): A Narrative Review Article.}, journal = {Iranian journal of public health}, volume = {48}, number = {4}, pages = {593-602}, pmid = {31110969}, issn = {2251-6085}, abstract = {BACKGROUND: The Hospital Real-time Location Systems (HRTLS), deal with monitoring the patients, medical staff and valuable medical equipment in emergency situations. Therefore, the study aimed to propose Hospital Real-Time Location Systems based on the novel technologies in Iran.

METHODS: In this narrative-review, the articles and official reports on HRTLS, were gathered and analyzed from related textbooks and indexing sites with the defined keywords in English or Persian. The search of databases such as IDTechEx, IEEE, PubMed Central, Science Direct, EMBASE/Excerpta Medica, Scopus, Web of Science, Elsevier journals, WHO publications and Google Scholar was performed to reconfirm the efficiency of HRTLS from 2006 to 2017.

RESULTS: Various technologies have been used in the current systems, which have led to the reduced error rate, costs and increased speed of providing the healthcare services. Applications of these systems include tracking of patient's, medical staff and valuable medical assets. Besides, achieving the patient & staff satisfaction is among other basic applications of these Systems. The accurate data exchange and processes control are considered as positive aspects of this technology.

CONCLUSION: HRTLS has great importance in healthcare systems and its efficiency in medical centers is reliable; hence, it seems necessary to determine the organization's requirements, apply novel technologies such as cloud computing and Internet of things, and integrate them to get access to maximum advantages in Iranian healthcare centers.}, } @article {pmid31100083, year = {2019}, author = {Bosma, TJ and Karagiannis, K and Santana-Quintero, L and Ilyushina, N and Zagorodnyaya, T and Petrovskaya, S and Laassri, M and Donnelly, RP and Rubin, S and Simonyan, V and Sauder, CJ}, title = {Identification and quantification of defective virus genomes in high throughput sequencing data using DVG-profiler, a novel post-sequence alignment processing algorithm.}, journal = {PloS one}, volume = {14}, number = {5}, pages = {e0216944}, pmid = {31100083}, issn = {1932-6203}, mesh = {*Algorithms ; Animals ; Chromosome Mapping/methods/*statistics & numerical data ; DNA Primers/chemical synthesis/metabolism ; Datasets as Topic ; Defective Viruses/classification/*genetics ; *Genome, Viral ; High-Throughput Nucleotide Sequencing/statistics & numerical data ; Humans ; Molecular Typing ; Mumps virus/classification/*genetics ; Parainfluenza Virus 5/classification/*genetics ; Real-Time Polymerase Chain Reaction ; Sendai virus/classification/*genetics ; Sensitivity and Specificity ; }, abstract = {Most viruses are known to spontaneously generate defective viral genomes (DVG) due to errors during replication. These DVGs are subgenomic and contain deletions that render them unable to complete a full replication cycle in the absence of a co-infecting, non-defective helper virus. DVGs, especially of the copyback type, frequently observed with paramyxoviruses, have been recognized to be important triggers of the antiviral innate immune response. DVGs have therefore gained interest for their potential to alter the attenuation and immunogenicity of vaccines. To investigate this potential, accurate identification and quantification of DVGs is essential. Conventional methods, such as RT-PCR, are labor intensive and will only detect primer sequence-specific species. High throughput sequencing (HTS) is much better suited for this undertaking. Here, we present an HTS-based algorithm called DVG-profiler to identify and quantify all DVG sequences in an HTS data set generated from a virus preparation. DVG-profiler identifies DVG breakpoints relative to a reference genome and reports the directionality of each segment from within the same read. The specificity and sensitivity of the algorithm was assessed using both in silico data sets as well as HTS data obtained from parainfluenza virus 5, Sendai virus and mumps virus preparations. HTS data from the latter were also compared with conventional RT-PCR data and with data obtained using an alternative algorithm. The data presented here demonstrate the high specificity, sensitivity, and robustness of DVG-profiler. This algorithm was implemented within an open source cloud-based computing environment for analyzing HTS data. DVG-profiler might prove valuable not only in basic virus research but also in monitoring live attenuated vaccines for DVG content and to assure vaccine lot to lot consistency.}, } @article {pmid31091903, year = {2019}, author = {Taddese, T and Kitabata, M and Okazaki, S}, title = {All-atom molecular dynamics study on the non-solvent induced phase separation: Thermodynamics of adding water to poly(vinylidene fluoride)/N-methyl-2-pyrrolidone solution.}, journal = {The Journal of chemical physics}, volume = {150}, number = {18}, pages = {184505}, doi = {10.1063/1.5094088}, pmid = {31091903}, issn = {1089-7690}, abstract = {The change in the thermodynamics when adding water in poly(vinylidene fluoride) (PVDF)/N-methyl-2-pyrrolidone (NMP) solution is studied from all atom molecular dynamics (MD) simulations. This is done by estimating the free energy of mixing of PVDF/NMP solution with increasing volume fraction of water (ϕw) using an appropriately chosen thermodynamic cycle and the Bennett acceptance ratio method. The MD calculations predict the thermodynamic phase separation point of water/NMP/PVDF to be at ϕw = 0.08, in close agreement with the experimental cloud point measurement (ϕw = 0.05). Examining the enthalpic and entropic components of the free energy of mixing reveals that at low concentrations of water, the enthalpy term has the most significant contribution to the miscibility of the ternary system, whereas at higher concentrations of water, the entropy term dominates. Finally, the free energy of mixing was compared with the Flory-Huggins (FH) free energy of mixing by computing the concentration-dependent interaction parameters from MD simulations. The FH model inadequately predicted the miscibility of the PVDF solution, mainly due to its negligence of the excess entropy of mixing.}, } @article {pmid31091838, year = {2019}, author = {Santos, J and Wauters, T and Volckaert, B and De Turck, F}, title = {Resource Provisioning in Fog Computing: From Theory to Practice [†].}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {10}, pages = {}, pmid = {31091838}, issn = {1424-8220}, support = {Intelligent DEnse And Longe range IoT networks (IDEAL-IoT) under Grant Agreement #S004017N//Fonds Wetenschappelijk Onderzoek/ ; Service-oriented management of a virtualised future internet//Fonds Wetenschappelijk Onderzoek/ ; }, abstract = {The Internet-of-Things (IoT) and Smart Cities continue to expand at enormous rates. Centralized Cloud architectures cannot sustain the requirements imposed by IoT services. Enormous traffic demands and low latency constraints are among the strictest requirements, making cloud solutions impractical. As an answer, Fog Computing has been introduced to tackle this trend. However, only theoretical foundations have been established and the acceptance of its concepts is still in its early stages. Intelligent allocation decisions would provide proper resource provisioning in Fog environments. In this article, a Fog architecture based on Kubernetes, an open source container orchestration platform, is proposed to solve this challenge. Additionally, a network-aware scheduling approach for container-based applications in Smart City deployments has been implemented as an extension to the default scheduling mechanism available in Kubernetes. Last but not least, an optimization formulation for the IoT service problem has been validated as a container-based application in Kubernetes showing the full applicability of theoretical approaches in practical service deployments. Evaluations have been performed to compare the proposed approaches with the Kubernetes standard scheduling feature. Results show that the proposed approaches achieve reductions of 70% in terms of network latency when compared to the default scheduling mechanism.}, } @article {pmid31083377, year = {2019}, author = {Cano Ortega, A and Sánchez Sutil, FJ and De la Casa Hernández, J}, title = {Power Factor Compensation Using Teaching Learning Based Optimization and Monitoring System by Cloud Data Logger.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {9}, pages = {}, pmid = {31083377}, issn = {1424-8220}, abstract = {The main objective of this paper is to compensate power factor using teaching learning based optimization (TLBO), determine the capacitor bank optimization (CBO) algorithm, and monitor a system in real-time using cloud data logging (CDL). Implemented Power Factor Compensation and Monitoring System (PFCMS) calculates the optimal capacitor combination to improve power factor of the installation by measure of voltage, current, and active power. CBO algorithm determines the best solution of capacitor values to install, by applying TLBO in different phases of the algorithm. Electrical variables acquired by the sensors and the variables calculated are stored in CDL using Google Sheets (GS) to monitor and analyse the installation by means of a TLBO algorithm implemented in PFCMS, that optimizes the compensation power factor of installation and determining which capacitors are connected in real time. Moreover, the optimization of the power factor in facilities means economic and energy savings, as well as the improvement of the quality of the operation of the installation.}, } @article {pmid31077294, year = {2019}, author = {Lee, S and Johnson, J and Vitzthum, C and Kırlı, K and Alver, BH and Park, PJ}, title = {Tibanna: software for scalable execution of portable pipelines on the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {35}, number = {21}, pages = {4424-4426}, pmid = {31077294}, issn = {1367-4811}, support = {U01 CA200059/CA/NCI NIH HHS/United States ; }, mesh = {Computational Biology ; Language ; *Software ; *Workflow ; }, abstract = {SUMMARY: We introduce Tibanna, an open-source software tool for automated execution of bioinformatics pipelines on Amazon Web Services (AWS). Tibanna accepts reproducible and portable pipeline standards including Common Workflow Language (CWL), Workflow Description Language (WDL) and Docker. It adopts a strategy of isolation and optimization of individual executions, combined with a serverless scheduling approach. Pipelines are executed and monitored using local commands or the Python Application Programming Interface (API) and cloud configuration is automatically handled. Tibanna is well suited for projects with a range of computational requirements, including those with large and widely fluctuating loads. Notably, it has been used to process terabytes of data for the 4D Nucleome (4DN) Network.

Source code is available on GitHub at https://github.com/4dn-dcic/tibanna.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid31073299, year = {2019}, author = {Cabral, A and da Silva Cândido, D and Monteiro, SM and Lemos, F and Saitovitch, D and Noronha, IL and Alves, LF and Geraldo, MV and Kalil, J and Cunha-Neto, E and Pinto Ferreira, LR and Coelho, V}, title = {Differential microRNA Profile in Operational Tolerance: A Potential Role in Favoring Cell Survival.}, journal = {Frontiers in immunology}, volume = {10}, number = {}, pages = {740}, pmid = {31073299}, issn = {1664-3224}, mesh = {Adult ; Cell Survival/*genetics ; Computational Biology/methods ; Down-Regulation/genetics ; Female ; Gene Expression Profiling/methods ; Humans ; Immune Tolerance/*genetics ; Male ; MicroRNAs/*genetics ; Middle Aged ; Real-Time Polymerase Chain Reaction/methods ; }, abstract = {Background: Operational tolerance (OT) is a state of graft functional stability that occurs after at least 1 year of immunosuppressant withdrawal. MicroRNAs (microRNA) are small non-coding RNAs that downregulate messenger RNA/protein expression of innumerous molecules and are critical for homeostasis. We investigated whether OT in kidney transplantation displays a differential microRNA profile, which would suggest that microRNAs participate in Operational Tolerance mechanisms, and may reveal potential molecular pathways. Methods: We first compared serum microRNA in OT (n = 8) with chronic rejection (CR) (n = 5) and healthy individuals (HI) (n = 5), using a 768-microRNA qPCR-panel. We used the Thermo Fisher Cloud computing platform to compare the levels of microRNAs in the OT group in relation to the other study groups. We performed validation experiments for miR-885-5p, by q-PCR, in a larger number of study subjects (OT = 8, CR = 12, HI = 12), as individual samples. Results: We detected a differential microRNA profile in OT vs. its opposing clinical outcome-CR-suggesting that microRNAs may integrate transplantation tolerance mechanisms. Some miRNAs were detected at higher levels in OT: miR-885-5p, miR-331-3p, miR-27a-5p vs. CR; others, we found at lower levels: miR-1233-3p, miR-572, miR-638, miR-1260a. Considering highly predicted/experimentally demonstrated targets of these miRNAs, bioinformatics analysis revealed that the granzyme B, and death receptor pathways are dominant, suggesting that cell death regulation integrates transplantation tolerance mechanisms. We confirmed higher miR-885-5p levels in OT vs. CR, and vs. HI, in a larger number of subjects. Conclusions: We propose that epigenetics mechanisms involving microRNAs may integrate human transplantation tolerance mechanisms, and regulate key members of the cell death/survival signaling. miR-885-5p could favor cell survival in OT by diminishing the levels of CRADD/RAIDD and CASP3. Nonetheless, given the nature of any complex phenomenon in humans, only cumulative data will help to determine whether this microRNA differential profile may be related to the cause or consequence of operational tolerance.}, } @article {pmid31071923, year = {2019}, author = {Li, G and Liu, Y and Wu, J and Lin, D and Zhao, S}, title = {Methods of Resource Scheduling Based on Optimized Fuzzy Clustering in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {9}, pages = {}, pmid = {31071923}, issn = {1424-8220}, support = {61771289//National Natural Science Foundation of China/ ; 61672321//National Natural Science Foundation of China/ ; }, abstract = {Cloud computing technology is widely used at present. However, cloud computing servers are far from terminal users, which may lead to high service request delays and low user satisfaction. As a new computing architecture, fog computing is an extension of cloud computing that can effectively solve the aforementioned problems. Resource scheduling is one of the key technologies in fog computing. We propose a resource scheduling method for fog computing in this paper. First, we standardize and normalize the resource attributes. Second, we combine the methods of fuzzy clustering with particle swarm optimization to divide the resources, and the scale of the resource search is reduced. Finally, we propose a new resource scheduling algorithm based on optimized fuzzy clustering. The experimental results show that our method can improve user satisfaction and the efficiency of resource scheduling.}, } @article {pmid31070892, year = {2019}, author = {Mayer, M and Baeumner, AJ}, title = {A Megatrend Challenging Analytical Chemistry: Biosensor and Chemosensor Concepts Ready for the Internet of Things.}, journal = {Chemical reviews}, volume = {119}, number = {13}, pages = {7996-8027}, doi = {10.1021/acs.chemrev.8b00719}, pmid = {31070892}, issn = {1520-6890}, mesh = {Animals ; Biosensing Techniques/instrumentation/methods/*trends ; Chemistry Techniques, Analytical/instrumentation/methods/*trends ; Humans ; Internet of Things/*trends ; Point-of-Care Systems/trends ; }, abstract = {The Internet of Things (IoT) is a megatrend that cuts across all scientific and engineering disciplines and establishes an integrating technical evolution to improve production efficiencies and daily human life. Linked machines and sensors use decision-making routines to work toward a common product or solution. Expanding this technical revolution into the value chain of complex areas such as agriculture, food production, and healthcare requires the implementation and connection of sophisticated (bio)analytical methods. Today, wearable sensors, monitors, and point-of-care diagnostic tests are part of our daily lives and improve patients' medical progression or athletes' monitoring capabilities that are already beyond imagination. Also, early contributions toward sensor networks and finally the IT revolution with wireless data collection and transmission via Bluetooth or smartphones have set the foundation to connect remote sensors and distributed analytical chemical services with centralized laboratories, cloud storage, and cloud computing. Here, we critically review those biosensor and chemosensor technologies and concepts used in an IoT setting or considered IoT-ready that were published in the period 2013-2018, while also pointing to those foundational concepts and ideas that arose over the last two decades. We focus on these sensors due to their unique ability to be remotely stationed and that easily function in networks and have made the greatest progress toward IoT integration. Finally, we highlight requirements and existing and future challenges and provide possible solutions important toward the vision of a seamless integration into a global analytical concept, which includes many more analytical techniques than sensors and includes foremost next-generation sequencing and separation principles coupled with MS detection.}, } @article {pmid31067808, year = {2019}, author = {Hao, B and Ma, M and Li, S and Li, Q and Hao, D and Huang, J and Ge, Z and Yang, H and Han, X}, title = {Land Use Change and Climate Variation in the Three Gorges Reservoir Catchment from 2000 to 2015 Based on the Google Earth Engine.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {9}, pages = {}, pmid = {31067808}, issn = {1424-8220}, support = {41830648, 41771361//NSFC (National Natural Science Foundation of China) project/ ; SWU117035//Southwest University research funding/ ; [2017] 1231//Chongqing R&D Project of the high technology and major industries/ ; }, abstract = {Possible environmental change and ecosystem degradation have received increasing attention since the construction of Three Gorges Reservoir Catchment (TGRC) in China. The advanced Google Earth Engine (GEE) cloud-based platform and the large number of Geosciences and Remote Sensing datasets archived in GEE were used to analyze the land use and land cover change (LULCC) and climate variation in TGRC. GlobeLand30 data were used to evaluate the spatial land dynamics from 2000 to 2010 and Landsat 8 Operational Land Imager (OLI) images were applied for land use in 2015. The interannual variations in the Land Surface Temperature (LST) and seasonally integrated normalized difference vegetation index (SINDVI) were estimated using Moderate Resolution Imaging Spectroradiometer (MODIS) products. The climate factors including air temperature, precipitation and evapotranspiration were investigated based on the data from the Global Land Data Assimilation System (GLDAS). The results indicated that from 2000 to 2015, the cultivated land and grassland decreased by 2.05% and 6.02%, while the forest, wetland, artificial surface, shrub land and waterbody increased by 3.64%, 0.94%, 0.87%, 1.17% and 1.45%, respectively. The SINDVI increased by 3.209 in the period of 2000-2015, while the LST decreased by 0.253 °C from 2001 to 2015. The LST showed an increasing trend primarily in urbanized area, with a decreasing trend mainly in forest area. In particular, Chongqing City had the highest LST during the research period. A marked decrease in SINDVI occurred primarily in urbanized areas. Good vegetation areas were primarily located in the eastern part of the TGRC, such as Wuxi County, Wushan County, and Xingshan County. During the 2000-2015 period, the air temperature, precipitation and evapotranspiration rose by 0.0678 °C/a, 1.0844 mm/a, and 0.4105 mm/a, respectively. The climate change in the TGRC was influenced by LULCC, but the effect was limited. What is more, the climate change was affected by regional climate change in Southwest China. Marked changes in land use have occurred in the TGRC, and they have resulted in changes in the LST and SINDVI. There was a significantly negative relationship between LST and SINDVI in most parts of the TGRC, especially in expanding urban areas and growing forest areas. Our study highlighted the importance of environmental protection, particularly proper management of land use, for sustainable development in the catchment.}, } @article {pmid31067763, year = {2019}, author = {Pushpan, S and Velusamy, B}, title = {Fuzzy-Based Dynamic Time Slot Allocation forWireless Body Area Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {9}, pages = {}, pmid = {31067763}, issn = {1424-8220}, abstract = {With the advancement in networking, information and communication technologies,wireless body area networks (WBANs) are becoming more popular in the field of medical andnon-medical applications. Real-time patient monitoring applications generate periodic data in a shorttime period. In the case of life-critical applications, the data may be bursty. Hence the system needsa reliable energy efficient communication technique which has a limited delay. In such cases thefixed time slot assignment in medium access control standards results in low system performance.This paper deals with a dynamic time slot allocation scheme in a fog-assisted network for a real-timeremote patient monitoring system. Fog computing is an extended version of the cloud computingparadigm, which is suitable for reliable, delay-sensitive life-critical applications. In addition, toenhance the performance of the network, an energy-efficient minimum cost parent selection algorithmhas been proposed for routing data packets. The dynamic time slot allocation uses fuzzy logic withinput variables as energy ratio, buffer ratio, and packet arrival rate. Dynamic slot allocation eliminatesthe time slot wastage, excess delay in the network and attributes a high level of reliability to thenetwork with maximum channel utilization. The efficacy of the proposed scheme is proved in termsof packet delivery ratio, average end to end delay, and average energy consumption when comparedwith the conventional IEEE 802.15.4 standard and the tele-medicine protocol.}, } @article {pmid31064133, year = {2019}, author = {Martínez-Peláez, R and Toral-Cruz, H and Parra-Michel, JR and García, V and Mena, LJ and Félix, VG and Ochoa-Brust, A}, title = {An Enhanced Lightweight IoT-based Authentication Scheme in Cloud Computing Circumstances.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {9}, pages = {}, pmid = {31064133}, issn = {1424-8220}, abstract = {With the rapid deployment of the Internet of Things and cloud computing, it is necessary to enhance authentication protocols to reduce attacks and security vulnerabilities which affect the correct performance of applications. In 2019 a new lightweight IoT-based authentication scheme in cloud computing circumstances was proposed. According to the authors, their protocol is secure and resists very well-known attacks. However, when we evaluated the protocol we found some security vulnerabilities and drawbacks, making the scheme insecure. Therefore, we propose a new version considering login, mutual authentication and key agreement phases to enhance the security. Moreover, we include a sub-phase called evidence of connection attempt which provides proof about the participation of the user and the server. The new scheme achieves the security requirements and resists very well-known attacks, improving previous works. In addition, the performance evaluation demonstrates that the new scheme requires less communication-cost than previous authentication protocols during the registration and login phases.}, } @article {pmid31056517, year = {2021}, author = {Zhang, Q and Bai, C and Yang, LT and Chen, Z and Li, P and Yu, H}, title = {A Unified Smart Chinese Medicine Framework for Healthcare and Medical Services.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {18}, number = {3}, pages = {882-890}, doi = {10.1109/TCBB.2019.2914447}, pmid = {31056517}, issn = {1557-9964}, mesh = {*Cloud Computing ; Delivery of Health Care/*methods ; Humans ; Machine Learning ; Medical Informatics/*methods ; *Medicine, Chinese Traditional ; }, abstract = {Smart Chinese medicine has emerged to contribute to the evolution of healthcare and medical services by applying machine learning together with advanced computing techniques like cloud computing to computer-aided diagnosis and treatment in the health engineering and informatics. Specifically, smart Chinese medicine is considered to have the potential to treat difficult and complicated diseases such as diabetes and cancers. Unfortunately, smart Chinese medicine has made very limited progress in the past few years. In this paper, we present a unified smart Chinese medicine framework based on the edge-cloud computing system. The objective of the framework is to achieve computer-aided syndrome differentiation and prescription recommendation, and thus to provide pervasive, personalized, and patient-centralized services in healthcare and medicine. To accomplish this objective, we integrate deep learning and deep reinforcement learning into the traditional Chinese medicine. Furthermore, we propose a multi-modal deep computation model for syndrome recognition that is a crucial part of syndrome differentiation. Finally, we conduct experiments to validate the proposed model by comparing with the staked auto-encoder and multi-modal deep learning model for syndrome recognition of hypertension and cold.}, } @article {pmid31049978, year = {2019}, author = {Hirschfeld, G and Thiele, C}, title = {Cloud-based simulation studies in R - A tutorial on using doRedis with Amazon spot fleets.}, journal = {Statistics in medicine}, volume = {38}, number = {20}, pages = {3947-3959}, doi = {10.1002/sim.8188}, pmid = {31049978}, issn = {1097-0258}, mesh = {*Cloud Computing ; Computer Simulation ; Humans ; Internet ; *Software ; }, abstract = {Simulation studies are helpful in testing novel statistical methods. From a computational perspective, they constitute embarrassingly parallel tasks. We describe parallelization techniques in the programming language R that can be used on Amazon's cloud-based infrastructure. After a short conceptual overview of the parallelization techniques in R, we provide a hands-on tutorial on how the doRedis package in conjunction with the Redis server can be used on Amazon Web Services, specifically running spot fleets. The tutorial proceeds in seven steps, ie, (1) starting up an EC2 instance, (2) installing a Redis server, (3) using doRedis with a local worker, (4) using doRedis with a remote worker, (5) setting up instances that automatically fetch tasks from a specific master, (6) using spot-fleets, and (7) shutting down the instances. As a basic example, we show how these techniques can be used to assess the effects of heteroscedasticity on the equal-variance t-test. Furthermore, we address several advanced issues, such as multiple conditions, cost-management, and chunking.}, } @article {pmid31046465, year = {2020}, author = {Samra, H and Li, A and Soh, B and Zain, MA}, title = {Utilisation of hospital information systems for medical research in Saudi Arabia: A mixed-method exploration of the views of healthcare and IT professionals involved in hospital database management systems.}, journal = {Health information management : journal of the Health Information Management Association of Australia}, volume = {49}, number = {2-3}, pages = {117-126}, doi = {10.1177/1833358319847120}, pmid = {31046465}, issn = {1833-3575}, mesh = {*Biomedical Research ; *Health Knowledge, Attitudes, Practice ; Health Personnel/*psychology ; *Hospital Information Systems ; Humans ; Interviews as Topic ; Qualitative Research ; Saudi Arabia ; Surveys and Questionnaires ; }, abstract = {BACKGROUND: Although in recent times the Saudi government has paid much attention to the adaptation of hospital information systems (HIS) and electronic medical records (EMR), the importance of utilising HIS to enhance medical research has been neglected.

OBJECTIVE: We aimed to (i) investigate the current state of medical research in Saudi Arabia, (ii) identify possible issues that hinder improvement of medical research and (iii) identify possible solutions to enhance the role of HIS in medical research in Saudi Arabia.

METHOD: We used a questionnaire and structured interview approach. Questionnaires were distributed to Saudi healthcare professionals. One hundred responses to our questionnaire were captured by the online Google Form designed specifically for our survey. Structured interviews with two IT professionals were conducted regarding technical aspects of their hospital data management systems.

RESULTS: Six themes contributing to the inefficacy of HIS in medical research in Saudi Arabia emerged from the data: incorrect datasets, difficult data collection and storage, poor data analytics, a lack of system interoperability across different HIS for universal access and negative perception of the usefulness of HIS for medical research.

CONCLUSION AND IMPLICATIONS: Our findings suggest (i) cloud-based HIS would support efficient, reliable and integrated data collection and storage across all hospitals in Saudi Arabia; (ii) EMR data sources should be seamlessly linked to avoid incomplete, fragmented or erroneous EMR in Saudi Arabia; and (iii) collaboration between all hospitals in Saudi Arabia to adopt a uniform standard to support interoperability and improve data exchange and integration is necessary.}, } @article {pmid31035372, year = {2019}, author = {Zhang, H and Zhang, Z and Zhang, L and Yang, Y and Kang, Q and Sun, D}, title = {Object Tracking for a Smart City Using IoT and Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {9}, pages = {}, pmid = {31035372}, issn = {1424-8220}, support = {2016YFE0108100//National Key Research and Development Program of China/ ; 61872019//National Natural Science Foundation of China/ ; }, abstract = {As the Internet-of-Things (IoT) and edge computing have been major paradigms for distributed data collection, communication, and processing, smart city applications in the real world tend to adopt IoT and edge computing broadly. Today, more and more machine learning algorithms would be deployed into front-end sensors, devices, and edge data centres rather than centralised cloud data centres. However, front-end sensors and devices are usually not so capable as those computing units in huge data centres, and for this sake, in practice, engineers choose to compromise for limited capacity of embedded computing and limited memory, e.g., neural network models being pruned to fit embedded devices. Visual object tracking is one of many important elements of a smart city, and in the IoT and edge computing context, high requirements to computing power and memory space severely prevent massive and accurate tracking. In this paper, we report on our contribution to object tracking on lightweight computing including (1) using limited computing capacity and memory space to realise tracking; (2) proposing a new algorithm region proposal correlation filter fitting for most edge devices. Systematic evaluations show that (1) our techniques can fit most IoT devices; (2) our techniques can keep relatively high accuracy; and (3) the generated model size is much less than others.}, } @article {pmid31029080, year = {2019}, author = {Iacoangeli, A and Al Khleifat, A and Sproviero, W and Shatunov, A and Jones, AR and Morgan, SL and Pittman, A and Dobson, RJ and Newhouse, SJ and Al-Chalabi, A}, title = {DNAscan: personal computer compatible NGS analysis, annotation and visualisation.}, journal = {BMC bioinformatics}, volume = {20}, number = {1}, pages = {213}, pmid = {31029080}, issn = {1471-2105}, support = {AL-CHALABI/APR15/844-791/MNDA_/Motor Neurone Disease Association/United Kingdom ; MR/L501529/1/MRC_/Medical Research Council/United Kingdom ; MC_PC_17214/MRC_/Medical Research Council/United Kingdom ; 171/ALZS_/Alzheimer's Society/United Kingdom ; ALCHALABI-DOBSON/APR14/829-791/MNDA_/Motor Neurone Disease Association/United Kingdom ; //Wellcome Trust/United Kingdom ; G0600974/MRC_/Medical Research Council/United Kingdom ; MR/R024804/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Amyotrophic Lateral Sclerosis/genetics/pathology ; Computational Biology/*methods ; DNA, Bacterial/chemistry/genetics/metabolism ; Databases, Factual ; HIV-1/genetics ; *High-Throughput Nucleotide Sequencing ; Humans ; INDEL Mutation ; Polymorphism, Single Nucleotide ; RNA, Viral/chemistry/genetics/metabolism ; *User-Computer Interface ; Whole Genome Sequencing ; }, abstract = {BACKGROUND: Next Generation Sequencing (NGS) is a commonly used technology for studying the genetic basis of biological processes and it underpins the aspirations of precision medicine. However, there are significant challenges when dealing with NGS data. Firstly, a huge number of bioinformatics tools for a wide range of uses exist, therefore it is challenging to design an analysis pipeline. Secondly, NGS analysis is computationally intensive, requiring expensive infrastructure, and many medical and research centres do not have adequate high performance computing facilities and cloud computing is not always an option due to privacy and ownership issues. Finally, the interpretation of the results is not trivial and most available pipelines lack the utilities to favour this crucial step.

RESULTS: We have therefore developed a fast and efficient bioinformatics pipeline that allows for the analysis of DNA sequencing data, while requiring little computational effort and memory usage. DNAscan can analyse a whole exome sequencing sample in 1 h and a 40x whole genome sequencing sample in 13 h, on a midrange computer. The pipeline can look for single nucleotide variants, small indels, structural variants, repeat expansions and viral genetic material (or any other organism). Its results are annotated using a customisable variety of databases and are available for an on-the-fly visualisation with a local deployment of the gene.iobio platform. DNAscan is implemented in Python. Its code and documentation are available on GitHub: https://github.com/KHP-Informatics/DNAscan . Instructions for an easy and fast deployment with Docker and Singularity are also provided on GitHub.

CONCLUSIONS: DNAscan is an extremely fast and computationally efficient pipeline for analysis, visualization and interpretation of NGS data. It is designed to provide a powerful and easy-to-use tool for applications in biomedical research and diagnostic medicine, at minimal computational cost. Its comprehensive approach will maximise the potential audience of users, bringing such analyses within the reach of non-specialist laboratories, and those from centres with limited funding available.}, } @article {pmid31012399, year = {2019}, author = {Ward, DV and Hoss, AG and Kolde, R and van Aggelen, HC and Loving, J and Smith, SA and Mack, DA and Kathirvel, R and Halperin, JA and Buell, DJ and Wong, BE and Ashworth, JL and Fortunato-Habib, MM and Xu, L and Barton, BA and Lazar, P and Carmona, JJ and Mathew, J and Salgo, IS and Gross, BD and Ellison, RT}, title = {Integration of genomic and clinical data augments surveillance of healthcare-acquired infections.}, journal = {Infection control and hospital epidemiology}, volume = {40}, number = {6}, pages = {649-655}, doi = {10.1017/ice.2019.75}, pmid = {31012399}, issn = {1559-6834}, mesh = {Adolescent ; Adult ; Aged ; Aged, 80 and over ; Child ; Child, Preschool ; Cluster Analysis ; Cross Infection/*epidemiology/microbiology/prevention & control ; Disease Outbreaks ; Female ; *Genome, Bacterial ; Humans ; Infant ; Infant, Newborn ; Infection Control/*methods ; Male ; Massachusetts ; Middle Aged ; Molecular Epidemiology/methods ; *Molecular Typing ; *Whole Genome Sequencing ; Young Adult ; }, abstract = {BACKGROUND: Determining infectious cross-transmission events in healthcare settings involves manual surveillance of case clusters by infection control personnel, followed by strain typing of clinical/environmental isolates suspected in said clusters. Recent advances in genomic sequencing and cloud computing now allow for the rapid molecular typing of infecting isolates.

OBJECTIVE: To facilitate rapid recognition of transmission clusters, we aimed to assess infection control surveillance using whole-genome sequencing (WGS) of microbial pathogens to identify cross-transmission events for epidemiologic review.

METHODS: Clinical isolates of Staphylococcus aureus, Enterococcus faecium, Pseudomonas aeruginosa, and Klebsiella pneumoniae were obtained prospectively at an academic medical center, from September 1, 2016, to September 30, 2017. Isolate genomes were sequenced, followed by single-nucleotide variant analysis; a cloud-computing platform was used for whole-genome sequence analysis and cluster identification.

RESULTS: Most strains of the 4 studied pathogens were unrelated, and 34 potential transmission clusters were present. The characteristics of the potential clusters were complex and likely not identifiable by traditional surveillance alone. Notably, only 1 cluster had been suspected by routine manual surveillance.

CONCLUSIONS: Our work supports the assertion that integration of genomic and clinical epidemiologic data can augment infection control surveillance for both the identification of cross-transmission events and the inclusion of missed and exclusion of misidentified outbreaks (ie, false alarms). The integration of clinical data is essential to prioritize suspect clusters for investigation, and for existing infections, a timely review of both the clinical and WGS results can hold promise to reduce HAIs. A richer understanding of cross-transmission events within healthcare settings will require the expansion of current surveillance approaches.}, } @article {pmid31009475, year = {2019}, author = {Dadi, AF and Desyibelew, HD}, title = {Undernutrition and its associated factors among pregnant mothers in Gondar town, Northwest Ethiopia.}, journal = {PloS one}, volume = {14}, number = {4}, pages = {e0215305}, pmid = {31009475}, issn = {1932-6203}, mesh = {Adolescent ; Adult ; Cross-Sectional Studies ; Ethiopia/epidemiology ; Female ; Humans ; Logistic Models ; Malnutrition/*diagnosis/epidemiology ; Middle Aged ; Mothers/*statistics & numerical data ; *Nutritional Status ; Pregnancy ; Prenatal Care/methods/*statistics & numerical data ; Risk Factors ; Young Adult ; }, abstract = {BACKGROUND: Regardless of significant gains and signs of progress in the last decades, maternal undernutrition remains a major public health concern in Ethiopia. Supporting the progress of interventions being taken in the country with evidence might be important to keep the sustainability of the government effort. We aimed at determining the extent of undernutrition and its associated factors among pregnant mothers in Gondar town, Northwest Ethiopia.

METHOD: A community-based cross-sectional study was conducted by including 940 selected pregnant mothers through a cluster sampling. A face-to-face interview was administered to pregnant mothers at a household level. We collected data using an Online Data collection kit (ODK) and the collected data was directly downloaded from the Google Cloud platform and finally imported to Stata 14 for further analysis. A multivariable logistic regression model was fitted to identify factors associated with undernutrition. A crude and adjusted odds ratio with their 95% confidence interval was calculated to declare the association and its significance. Model fitness was assured through the Hosmer and Lemeshow goodness of fit test and model classification accuracy.

RESULT: 14.4% (95%CI: 12.3-16.7) of pregnant mothers were undernourished. After adjusting for the main covariates; as the age of the pregnant mothers increases the odds of being undernourished decreases by 10% (AOR: 0.90; 95%CI: 0.87-0.95) and having a poor marital condition (AOR: 2.18; 95%CI: 1.03-4.59) increased the odds of undernutrition. The risk of undernutrition was also decreased by 43% among those pregnant mothers who consumed coffee sometimes (AOR: 0.57; 95%CI: 0.36-0.89) as compared to daily consumers.

CONCLUSION: A significant proportion of pregnant mother were undernourished. Integration of nutritional interventions with maternity health services would be highly important to improve the nutritional status of the mothers. It is also important to counsel pregnant mothers about a consequence of frequent coffee drinking during their pregnancy.}, } @article {pmid31004900, year = {2019}, author = {Yu, Z and Yao, Y and Yang, G and Wang, X and Vejre, H}, title = {Spatiotemporal patterns and characteristics of remotely sensed region heat islands during the rapid urbanization (1995-2015) of Southern China.}, journal = {The Science of the total environment}, volume = {674}, number = {}, pages = {242-254}, doi = {10.1016/j.scitotenv.2019.04.088}, pmid = {31004900}, issn = {1879-1026}, abstract = {Urban agglomeration has become the most salient feature of global urbanization in recent decades, while spatiotemporal patterns and evolution remain poorly understood in urban agglomerations, which limit the decision-makers to make more informed decisions to improve the regional environment. Here we selected one of the most rapidly urbanized regions in the world - Pearl River Delta Metropolitan Region (PRDR), located in southern China, as the case. Landsat images spanning from 1995 to 2015 were used to retrieve land surface temperature (LST). Four types of regional heat island (RHI) degree were defined for further analysis. Then multi-scale spatiotemporal patterns and characteristics of RHI were identified with the help of cloud-based computing, spatial and landscape analysis. We found that (1) traditional urban heat island (UHI) appears as an RHI on an urban agglomeration scale. In PRDR, we found RHI expended with increasing connectivity, especially in the estuary areas where isolated UHI gradually merged during the rapid urbanization. (2) The contribution of main cities in PRDR to RHI and the evolutionary trends and pattern, which is changed from a west-east to a southwest-northeast gradient, have been revealed. (3) Considering the scale effect and different RHI categories, we revealed that during the urbanization, the aggregation of the RHI is significant on a larger-scale, and the area of 4 °C ≤ Relative LST ≤ 8 °C is the stable and high-risk area, which provide scientific bases for the governance of the thermal environment on the regional scale. (4) The study also indicates the cooling effect of forests and water is better than that of grassland, while the cooling effect of grassland is uncertain. The methods and results of this study not only have implications on environmental planning and management in the PRDR but also provide useful insights into the thermal environment research and practice in other urban agglomerations.}, } @article {pmid30977338, year = {2019}, author = {Liang, H and Luo, Y}, title = {[Common mathematical-physical essence involved from chromatographic separation to intelligent medicine: irreversibility].}, journal = {Se pu = Chinese journal of chromatography}, volume = {37}, number = {4}, pages = {367-375}, doi = {10.3724/SP.J.1123.2018.12008}, pmid = {30977338}, issn = {1872-2059}, mesh = {*Chromatography ; *Mathematics ; *Medicine ; Thermodynamics ; }, abstract = {The chromatographic separation processes for many molecules in solute bands by stationary phase-mobile phase separation can be compared to ordering disease severity. The common features of chromatographic processes and machine-disease diagnosis-doctor's advice is the separation (classification) of components (individual disease states), both of which show irreversibility of time evolution; however, the former is caused by linear non-equilibrium thermodynamics and latter by nonlinear non-equilibrium thermodynamics (dissipative structure). When the scientific view is extended from drug detection and preparation to evidence-based medicine (EBM), discrete mathematics (axiomatic set theory and probability measure), and artificial intelligence (AI) cloud computing, the convection-diffusion equation and irreversibility in non-equilibrium thermodynamics form a common and core essence of mathematical physics that crosses fields of chromatographic separation and intelligent medicine. It is of profound scientific and practical significance to construct and develop a unified and all-encompassing mathematical framework by incorporating the generality and characteristics of the two subjects.}, } @article {pmid30976017, year = {2019}, author = {Singha, M and Dong, J and Zhang, G and Xiao, X}, title = {High resolution paddy rice maps in cloud-prone Bangladesh and Northeast India using Sentinel-1 data.}, journal = {Scientific data}, volume = {6}, number = {1}, pages = {26}, pmid = {30976017}, issn = {2052-4463}, mesh = {*Agriculture ; Bangladesh ; India ; *Oryza ; Satellite Imagery ; }, abstract = {Knowledge of where, when, and how much paddy rice is planted is crucial information for understating of regional food security, freshwater use, climate change, and transmission of avian influenza virus. We developed seasonal paddy rice maps at high resolution (10 m) for Bangladesh and Northeast India, typical cloud-prone regions in South Asia, using cloud-free Synthetic Aperture Radar (SAR) images from Sentinel-1 satellite, the Random Forest classifier, and the Google Earth Engine (GEE) cloud computing platform. The maps were provided for all the three distinct rice growing seasons of the region: Boro, Aus and Aman. The paddy rice maps were evaluated against the independent validation samples, and compared with the existing products from the International Rice Research Institute (IRRI) and the analysis of Moderate Resolution Imaging Spectroradiometer (MODIS) data. The generated paddy rice maps were spatially consistent with the compared maps and had a satisfactory accuracy over 90%. This study showed the potential of Sentinel-1 data and GEE on large scale paddy rice mapping in cloud-prone regions like tropical Asia.}, } @article {pmid30970678, year = {2019}, author = {Al-Dahhan, RR and Shi, Q and Lee, GM and Kifayat, K}, title = {Survey on Revocation in Ciphertext-Policy Attribute-Based Encryption.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {7}, pages = {}, pmid = {30970678}, issn = {1424-8220}, abstract = {Recently, using advanced cryptographic techniques to process, store, and share datasecurely in an untrusted cloud environment has drawn widespread attention from academicresearchers. In particular, Ciphertext-Policy Attribute-Based Encryption (CP-ABE) is a promising,advanced type of encryption technique that resolves an open challenge to regulate fine-grainedaccess control of sensitive data according to attributes, particularly for Internet of Things (IoT)applications. However, although this technique provides several critical functions such as dataconfidentiality and expressiveness, it faces some hurdles including revocation issues and lack ofmanaging a wide range of attributes. These two issues have been highlighted by many existingstudies due to their complexity which is hard to address without high computational cost affectingthe resource-limited IoT devices. In this paper, unlike other survey papers, existing single andmultiauthority CP-ABE schemes are reviewed with the main focus on their ability to address therevocation issues, the techniques used to manage the revocation, and comparisons among themaccording to a number of secure cloud storage criteria. Therefore, this is the first review paperanalysing the major issues of CP-ABE in the IoT paradigm and explaining the existing approachesto addressing these issues.}, } @article {pmid30970399, year = {2019}, author = {Huang, WJ and Yi, HL}, title = {[Advances in the application of cloud platform in diagnosis and treatment of OSA].}, journal = {Lin chuang er bi yan hou tou jing wai ke za zhi = Journal of clinical otorhinolaryngology, head, and neck surgery}, volume = {33}, number = {4}, pages = {310-312}, doi = {10.13201/j.issn.1001-1781.2019.04.006}, pmid = {30970399}, issn = {2096-7993}, mesh = {China ; *Cloud Computing ; Humans ; Internet ; Sleep ; *Sleep Apnea, Obstructive/diagnosis/therapy ; }, abstract = {Obstructive sleep apnea(OSA) has been a major public health problem due to a surge in the number of patients with OSA, which can create a heavy socio-economic burden. How to effectively carry out mass screening and prevention is an important link in China's health management and chronic disease prevention.The development of cloud computing and Internet of things technology has brought new directions and new solutions.For the sake of diagnosis and treatment of disease involving multiple disciplines, cross cooperation problems, we can use the Internet of things, a cloud platform and the database as the core architecture to establish the network which contains family-primary hospitals-tertiary hospitals-specialized subject and sleep medicine laboratory.With the help of handsets, we are able to realize the effectiveness and convenience of remote cooperation and eventually form the cloud platform diagnosis and treatment system of sleep-related breathing disease based on the large sample data.}, } @article {pmid30965629, year = {2019}, author = {Peralta, G and Garrido, P and Bilbao, J and Agüero, R and Crespo, PM}, title = {On the Combination of Multi-Cloud and Network Coding for Cost-Efficient Storage in Industrial Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {7}, pages = {}, pmid = {30965629}, issn = {1424-8220}, support = {KK-2018/00115//Eusko Jaurlaritza/ ; 825473//Horizon 2020/ ; TEC2016-75067-C4-3-R//Ministerio de Economía y Competitividad/ ; TEC2015-71329-C2-1-R//Ministerio de Economía y Competitividad/ ; TEC2015-69648-REDC//Ministerio de Economía y Competitividad/ ; }, abstract = {The adoption of both Cyber[-]Physical Systems (CPSs) and the Internet-of-Things (IoT) has enabled the evolution towards the so-called Industry 4.0. These technologies, together with cloud computing and artificial intelligence, foster new business opportunities. Besides, several industrial applications need immediate decision making and fog computing is emerging as a promising solution to address such requirement. In order to achieve a cost-efficient system, we propose taking advantage from spot instances, a new service offered by cloud providers, which provide resources at lower prices. The main downside of these instances is that they do not ensure service continuity and they might suffer from interruptions. An architecture that combines fog and multi-cloud deployments along with Network Coding (NC) techniques, guarantees the needed fault-tolerance for the cloud environment, and also reduces the required amount of redundant data to provide reliable services. In this paper we analyze how NC can actually help to reduce the storage cost and improve the resource efficiency for industrial applications, based on a multi-cloud infrastructure. The cost analysis has been carried out using both real AWS EC2 spot instance prices and, to complement them, prices obtained from a model based on a finite Markov chain, derived from real measurements. We have analyzed the overall system cost, depending on different parameters, showing that configurations that seek to minimize the storage yield a higher cost reduction, due to the strong impact of storage cost.}, } @article {pmid30956282, year = {2019}, author = {Capela, EV and Santos, JHPM and Boal-Palheiros, I and Coutinho, JAP and Ventura, SPM and Freire, MG}, title = {A simple approach for the determination and characterization of ternary phase diagrams of aqueous two-phase systems composed of water, polyethylene glycol and sodium carbonate.}, journal = {Chemical engineering education}, volume = {53}, number = {2}, pages = {112-120}, pmid = {30956282}, issn = {2165-6428}, support = {337753/ERC_/European Research Council/International ; }, abstract = {In this work, a simple experimental protocol to determine liquid-liquid phase diagrams of aqueous two-phase systems (ATPS) on a Chemical Engineering course is described. Throughout this laboratory set of experiments, the liquid-liquid ternary phase diagrams, tie-lines, tie-line lengths and critical points of ATPS will be determined. Ternary liquid-liquid phase diagrams composed of water, polyethylene glycol (PEG 200, 400 and 600 g·mol[-1]) and sodium carbonate (Na2CO3) were obtained by cloud-point titration method at room temperature. The respective tie-lines, tie-line lengths and critical points were also determined. Phase diagrams were represented both as conventional ternary phase diagrams and orthogonal phase diagrams. Through the analysis of the results obtained it was identified a higher ability to form ATPS with the increase of the polymer molecular weight. The interpretation of phase diagrams, particularly the most complex, the orthogonal ones, is not always easy to grasp by students, so this novel 3-hour-class educational approach could be potentially used to teach and help understanding 3-component liquid-liquid equilibrium and the formation of biphasic systems to undergraduate students, without requiring the use of volatile organic solvents.}, } @article {pmid30953288, year = {2019}, author = {Soorya, M and Issac, A and Dutta, MK}, title = {Automated Framework for Screening of Glaucoma Through Cloud Computing.}, journal = {Journal of medical systems}, volume = {43}, number = {5}, pages = {136}, pmid = {30953288}, issn = {1573-689X}, mesh = {*Cloud Computing ; Diagnosis, Computer-Assisted ; Fundus Oculi ; Glaucoma/*diagnosis/pathology ; Humans ; Image Processing, Computer-Assisted/*methods ; Mass Screening ; Retinal Vessels/pathology ; Telemedicine/*methods ; Time Factors ; }, abstract = {In recent times, the use of computer aided diagnosis for detection of Glaucoma from fundus images has been prevalent. In the proposed work, a cloud based system is proposed for automatic and real-time screening of Glaucoma with the use of automatic image processing techniques. The proposed system offers scalability to the developers and easy accessibility to the consumers. The proposed system is device and location independent. The input digital image is analyzed and a comprehensive diagnostic report is generated consisting of detailed analysis of indicative medical parameters like optic-cup-to-disc ratio, optic neuro-retinal rim, ISNT rules making the report informative and clinically significant. With recent advances in the field of communication technologies, the internet facilities are available that make the proposed system an efficient and economical method for initial screening and offer preventive and diagnostic steps in early disease intervention and management. The proposed system can perform an initial screening test in an average time of 6 s on high resolution fundus images. The proposed system has been tested on a fundus database and an average sensitivity of 93.7% has been achieved for Glaucoma cases. In places where there is scarcity of trained ophthalmologists and lack of awareness of such diseases, the cloud based system can be used as an effective diagnostic assistive tool.}, } @article {pmid30947420, year = {2019}, author = {Jia, XX and Song, YX and Wang, DS and Nie, DX and Wu, JZ}, title = {A collaborative secret sharing scheme based on the Chinese Remainder Theorem.}, journal = {Mathematical biosciences and engineering : MBE}, volume = {16}, number = {3}, pages = {1280-1299}, doi = {10.3934/mbe.2019062}, pmid = {30947420}, issn = {1551-0018}, mesh = {Algorithms ; *Cloud Computing ; *Cooperative Behavior ; *Game Theory ; Humans ; Internet ; Models, Economic ; }, abstract = {Secret sharing (SS) can be used as an important group key management technique for distributed cloud storage and cloud computing. In a traditional threshold SS scheme, a secret is shared among a number of participants and each participant receives one share. In many real-world applications, some participants are involved in multiple SS schemes with group collaboration supports thus have more privileges than the others. To address this issue, we could assign multiple shares to such participants. However, this is not a bandwidth efficient solution. Therefore, a more sophisticated mechanism is required. In this paper, we propose an efficient collaborative secret sharing (CSS) scheme specially tailored for multi-privilege participants in group collaboration. The CSS scheme between two or among more SS schemes is constructed by rearranging multi-privilege participants in each participant set and then formulated into several independent SS schemes with multi-privilege shares that precludes information leakage. Our scheme is based on the Chinese Remainder Theorem with lower recovery complexity and it allows each multi-privilege participant to keep only one share. It can be formally proved that our scheme achieves asymptotically perfect security. The experimental results demonstrate that it is efficient to achieve group collaboration, and it has computational advantages, compared with the existing works in the literature.}, } @article {pmid30943891, year = {2019}, author = {McCoy, MD and Shivakumar, V and Nimmagadda, S and Jafri, MS and Madhavan, S}, title = {SNP2SIM: a modular workflow for standardizing molecular simulation and functional analysis of protein variants.}, journal = {BMC bioinformatics}, volume = {20}, number = {1}, pages = {171}, pmid = {30943891}, issn = {1471-2105}, support = {R01 HL105239/HL/NHLBI NIH HHS/United States ; R01HL105239/HL/NHLBI NIH HHS/United States ; 3U41HG007822-02S1/HG/NHGRI NIH HHS/United States ; U41 HG007822/HG/NHGRI NIH HHS/United States ; U24 HG009650/HG/NHGRI NIH HHS/United States ; U41 HG009650/HG/NHGRI NIH HHS/United States ; }, mesh = {Humans ; Ligands ; Molecular Docking Simulation/*methods ; Molecular Dynamics Simulation ; Mutant Proteins/*chemistry ; Mutation, Missense ; Protein Conformation ; Software ; Workflow ; }, abstract = {BACKGROUND: Molecular simulations are used to provide insight into protein structure and dynamics, and have the potential to provide important context when predicting the impact of sequence variation on protein function. In addition to understanding molecular mechanisms and interactions on the atomic scale, translational applications of those approaches include drug screening, development of novel molecular therapies, and targeted treatment planning. Supporting the continued development of these applications, we have developed the SNP2SIM workflow that generates reproducible molecular dynamics and molecular docking simulations for downstream functional variant analysis. The Python workflow utilizes molecular dynamics software (NAMD (Phillips et al., J Comput Chem 26(16):1781-802, 2005), VMD (Humphrey et al., J Mol Graph 14(1):33-8, 27-8, 1996)) to generate variant specific scaffolds for simulated small molecule docking (AutoDock Vina (Trott and Olson, J Comput Chem 31(2):455-61, 2010)).

RESULTS: SNP2SIM is composed of three independent modules that can be used sequentially to generate the variant scaffolds of missense protein variants from the wildtype protein structure. The workflow first generates the mutant structure and configuration files required to execute molecular dynamics simulations of solvated protein variant structures. The resulting trajectories are clustered based on the structural diversity of residues involved in ligand binding to produce one or more variant scaffolds of the protein structure. Finally, these unique structural conformations are bound to small molecule ligand libraries to predict variant induced changes to drug binding relative to the wildtype protein structure.

CONCLUSIONS: SNP2SIM provides a platform to apply molecular simulation based functional analysis of sequence variation in the protein targets of small molecule therapies. In addition to simplifying the simulation of variant specific drug interactions, the workflow enables large scale computational mutagenesis by controlling the parameterization of molecular simulations across multiple users or distributed computing infrastructures. This enables the parallelization of the computationally intensive molecular simulations to be aggregated for downstream functional analysis, and facilitates comparing various simulation options, such as the specific residues used to define structural variant clusters. The Python scripts that implement the SNP2SIM workflow are available (SNP2SIM Repository. https://github.com/mccoymd/SNP2SIM , Accessed 2019 February), and individual SNP2SIM modules are available as apps on the Seven Bridges Cancer Genomics Cloud (Lau et al., Cancer Res 77(21):e3-e6, 2017; Cancer Genomics Cloud [ www.cancergenomicscloud.org ; Accessed 2018 November]).}, } @article {pmid30942867, year = {2019}, author = {Brown, SM and Chen, H and Hao, Y and Laungani, BP and Ali, TA and Dong, C and Lijeron, C and Kim, B and Wultsch, C and Pei, Z and Krampis, K}, title = {MGS-Fast: Metagenomic shotgun data fast annotation using microbial gene catalogs.}, journal = {GigaScience}, volume = {8}, number = {4}, pages = {}, pmid = {30942867}, issn = {2047-217X}, support = {UH3 CA140233/CA/NCI NIH HHS/United States ; UL1 TR000457/TR/NCATS NIH HHS/United States ; G12 MD007599/MD/NIMHD NIH HHS/United States ; R01 AI110372/AI/NIAID NIH HHS/United States ; U54 CA221705/CA/NCI NIH HHS/United States ; UL1 TR002384/TR/NCATS NIH HHS/United States ; R01 CA159036/CA/NCI NIH HHS/United States ; R21 DE025352/DE/NIDCR NIH HHS/United States ; U01 CA182370/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; Cloud Computing ; Computational Biology/*methods ; Humans ; Metagenome ; Metagenomics/*methods ; Microbiology ; Microbiota ; Molecular Sequence Annotation ; Reproducibility of Results ; *Software ; Workflow ; }, abstract = {BACKGROUND: Current methods used for annotating metagenomics shotgun sequencing (MGS) data rely on a computationally intensive and low-stringency approach of mapping each read to a generic database of proteins or reference microbial genomes.

RESULTS: We developed MGS-Fast, an analysis approach for shotgun whole-genome metagenomic data utilizing Bowtie2 DNA-DNA alignment of reads that is an alternative to using the integrated catalog of reference genes database of well-annotated genes compiled from human microbiome data. This method is rapid and provides high-stringency matches (>90% DNA sequence identity) of the metagenomics reads to genes with annotated functions. We demonstrate the use of this method with data from a study of liver disease and synthetic reads, and Human Microbiome Project shotgun data, to detect differentially abundant Kyoto Encyclopedia of Genes and Genomes gene functions in these experiments. This rapid annotation method is freely available as a Galaxy workflow within a Docker image.

CONCLUSIONS: MGS-Fast can confidently transfer functional annotations from gene databases to metagenomic reads, with speed and accuracy.}, } @article {pmid30934704, year = {2019}, author = {Puliafito, C and Vallati, C and Mingozzi, E and Merlino, G and Longo, F and Puliafito, A}, title = {Container Migration in the Fog: A Performance Evaluation.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {7}, pages = {}, pmid = {30934704}, issn = {1424-8220}, abstract = {The internet of things (IoT) is essential for the implementation of applications and services that require the ability to sense the surrounding environment through sensors and modify it through actuators. However, IoT devices usually have limited computing capabilities and hence are not always sufficient to directly host resource-intensive services. Fog computing, which extends and complements the cloud, can support the IoT with computing resources and services that are deployed close to where data are sensed and actions need to be performed. Virtualisation is an essential feature in the cloud as in the fog, and containers have been recently getting much popularity to encapsulate fog services. Besides, container migration among fog nodes may enable several emerging use cases in different IoT domains (e.g., smart transportation, smart industry). In this paper, we first report container migration use cases in the fog and discuss containerisation. We then provide a comprehensive overview of the state-of-the-art migration techniques for containers, i.e., cold, pre-copy, post-copy, and hybrid migrations. The main contribution of this work is the extensive performance evaluation of these techniques that we conducted over a real fog computing testbed. The obtained results shed light on container migration within fog computing environments by clarifying, in general, which migration technique might be the most appropriate under certain network and service conditions.}, } @article {pmid30922215, year = {2019}, author = {Kangeyan, D and Dunford, A and Iyer, S and Stewart, C and Hanna, M and Getz, G and Aryee, MJ}, title = {A (fire)cloud-based DNA methylation data preprocessing and quality control platform.}, journal = {BMC bioinformatics}, volume = {20}, number = {1}, pages = {160}, pmid = {30922215}, issn = {1471-2105}, support = {T32 CA009337/CA/NCI NIH HHS/United States ; Broad NCI Cloud Pilot Project//National Cancer Institute/ ; T32 CA 009337-37//National Cancer Institute/ ; SPARC Grant//Broad Institute of MIT & Harvard/ ; Merkin Institute Fellowship//Broad Institute of MIT & Harvard/ ; }, mesh = {*Cloud Computing ; *DNA Methylation ; Databases, Nucleic Acid ; Genome, Human ; Genomics ; Humans ; *Quality Control ; Reproducibility of Results ; Sequence Analysis, DNA ; Software ; Whole Genome Sequencing ; Workflow ; }, abstract = {BACKGROUND: Bisulfite sequencing allows base-pair resolution profiling of DNA methylation and has recently been adapted for use in single-cells. Analyzing these data, including making comparisons with existing data, remains challenging due to the scale of the data and differences in preprocessing methods between published datasets.

RESULTS: We present a set of preprocessing pipelines for bisulfite sequencing DNA methylation data that include a new R/Bioconductor package, scmeth, for a series of efficient QC analyses of large datasets. The pipelines go from raw data to CpG-level methylation estimates and can be run, with identical results, either on a single computer, in an HPC cluster or on Google Cloud Compute resources. These pipelines are designed to allow users to 1) ensure reproducibility of analyses, 2) achieve scalability to large whole genome datasets with 100 GB+ of raw data per sample and to single-cell datasets with thousands of cells, 3) enable integration and comparison between user-provided data and publicly available data, as all samples can be processed through the same pipeline, and 4) access to best-practice analysis pipelines. Pipelines are provided for whole genome bisulfite sequencing (WGBS), reduced representation bisulfite sequencing (RRBS) and hybrid selection (capture) bisulfite sequencing (HSBS).

CONCLUSIONS: The workflows produce data quality metrics, visualization tracks, and aggregated output for further downstream analysis. Optional use of cloud computing resources facilitates analysis of large datasets, and integration with existing methylome profiles. The workflow design principles are applicable to other genomic data types.}, } @article {pmid30922212, year = {2019}, author = {Cui, L and Feng, J and Zhang, Z and Yang, L}, title = {High throughput automatic muscle image segmentation using parallel framework.}, journal = {BMC bioinformatics}, volume = {20}, number = {1}, pages = {158}, pmid = {30922212}, issn = {1471-2105}, support = {81727802//National Natural Science Foundation of China/ ; 61701404//National Natural Science Foundation of China/ ; 2017YFB1002504//National Key R&D Program of China under grant/ ; }, mesh = {Algorithms ; Image Processing, Computer-Assisted/*methods ; Muscle, Skeletal/*anatomy & histology ; }, abstract = {BACKGROUND: Fast and accurate automatic segmentation of skeletal muscle cell image is crucial for the diagnosis of muscle related diseases, which extremely reduces the labor-intensive manual annotation. Recently, several methods have been presented for automatic muscle cell segmentation. However, most methods exhibit high model complexity and time cost, and they are not adaptive to large-scale images such as whole-slide scanned specimens.

METHODS: In this paper, we propose a novel distributed computing approach, which adopts both data and model parallel, for fast muscle cell segmentation. With a master-worker parallelism manner, the image data in the master is distributed onto multiple workers based on the Spark cloud computing platform. On each worker node, we first detect cell contours using a structured random forest (SRF) contour detector with fast parallel prediction and generate region candidates using a superpixel technique. Next, we propose a novel hierarchical tree based region selection algorithm for cell segmentation based on the conditional random field (CRF) algorithm. We divide the region selection algorithm into multiple sub-problems, which can be further parallelized using multi-core programming.

RESULTS: We test the performance of the proposed method on a large-scale haematoxylin and eosin (H &E) stained skeletal muscle image dataset. Compared with the standalone implementation, the proposed method achieves more than 10 times speed improvement on very large-scale muscle images containing hundreds to thousands of cells. Meanwhile, our proposed method produces high-quality segmentation results compared with several state-of-the-art methods.

CONCLUSIONS: This paper presents a parallel muscle image segmentation method with both data and model parallelism on multiple machines. The parallel strategy exhibits high compatibility to our muscle segmentation framework. The proposed method achieves high-throughput effective cell segmentation on large-scale muscle images.}, } @article {pmid30919611, year = {2019}, author = {Grimm, DJ}, title = {The Dark Data Quandary.}, journal = {The American University law review}, volume = {68}, number = {3}, pages = {761-821}, pmid = {30919611}, issn = {0003-1453}, mesh = {*Big Data ; Computer Security/*legislation & jurisprudence ; Data Collection/*legislation & jurisprudence ; Health Insurance Portability and Accountability Act/legislation & jurisprudence ; Humans ; Privacy/*legislation & jurisprudence ; United States ; United States Federal Trade Commission/legislation & jurisprudence ; }, abstract = {The digital universe remains a black box. Despite attaining high-technology capabilities like artificial intelligence and cognitive computing, "Big Data" analytics have failed to keep pace with surging data production. At the same time, the falling costs of cloud storage and distributed systems have made mass data storage cheaper and more accessible. These effects have produced a chasm between data that is stored and data that can be readily analyzed and understood. Enticed by the promise of extracting future value from rising data stockpiles, organizations now retain massive quantities of data that they cannot presently know or effectively manage. This rising sea of "dark data" now represents the vast majority of the digital universe. Dark data presents a quandary for organizations and the judicial system. For organizations, the inability to know the contents of retained dark data produces invisible risk under a spreading patchwork of digital privacy and data governance laws, most notably in the medical and consumer protection areas. For courts increasingly confronted with Big Data-derived evidence, dark data may shield critical information from judicial view while embedding subjective influences within seemingly objective methods. To avoid obscuring organizational risk and producing erroneous outcomes in the courtroom, decision-makers must achieve a new awareness of dark data’s presence and its ability to undermine Big Data’s vaunted advantages.}, } @article {pmid30919123, year = {2019}, author = {Essa, YM and Hemdan, EE and El-Mahalawy, A and Attiya, G and El-Sayed, A}, title = {IFHDS: Intelligent Framework for Securing Healthcare BigData.}, journal = {Journal of medical systems}, volume = {43}, number = {5}, pages = {124}, pmid = {30919123}, issn = {1573-689X}, mesh = {Algorithms ; *Big Data ; Cloud Computing/*standards ; Computer Security/*standards ; Confidentiality/*standards ; Electronic Health Records/standards ; Humans ; Information Storage and Retrieval/*methods/standards ; }, abstract = {Big data has become one of the most imperative technologies for collecting, handling and analysing enormous volumes of data in a high-performance environment. Enterprise healthcare organizations needs high compute power for the large volume of sensitive data, as well as large storage for storing both data and results, preferably in the cloud. However, security and privacy of patient data have become a critical issue that restricts many healthcare services from using cloud services to their optimal level. Therefore, this issue has limited healthcare organizations from migrating patient data to a cloud storage, because the cloud operators have chance to access sensitive data without the owner's permission. This paper proposes an intelligent security system called Intelligent Framework for Healthcare Data Security (IFHDS). IFHDS enables to secure and process large-scale data using column-based approach with less impact on the performance of data processing. The intelligent framework intends masking personal data and to encrypt sensitive data only. The proposed IFHDS splits sensitive data into multiple parts according to sensitivity level, where each part is stored separately over distributed cloud storage. Splitting data based on sensitivity level prevents cloud provider to break complete record of data if succeeds to decrypt part of data. The experimental results confirm that the proposed system secure the sensitive patient data with an acceptable computation time compared to recent security approaches.}, } @article {pmid30915546, year = {2020}, author = {Zhou, H and Sinsheimer, JS and Bates, DM and Chu, BB and German, CA and Ji, SS and Keys, KL and Kim, J and Ko, S and Mosher, GD and Papp, JC and Sobel, EM and Zhai, J and Zhou, JJ and Lange, K}, title = {OPENMENDEL: a cooperative programming project for statistical genetics.}, journal = {Human genetics}, volume = {139}, number = {1}, pages = {61-71}, pmid = {30915546}, issn = {1432-1203}, support = {R01 GM105785/GM/NIGMS NIH HHS/United States ; R01-HL135156/GF/NIH HHS/United States ; R01 HG006139/HG/NHGRI NIH HHS/United States ; R01 GM053275/GM/NIGMS NIH HHS/United States ; R01-GM105785/GM/NIGMS NIH HHS/United States ; R01-HG006139/HG/NHGRI NIH HHS/United States ; P30 ES006694/ES/NIEHS NIH HHS/United States ; T32-HG002536/HG/NHGRI NIH HHS/United States ; R01-GM53275/GM/NIGMS NIH HHS/United States ; T32 HG002536/HG/NHGRI NIH HHS/United States ; R01-HL135156//National Institute of Heart, Lung and Blood/ ; DMS-1052210//National Science Foundation/ ; R01 HL135156/HL/NHLBI NIH HHS/United States ; }, mesh = {Algorithms ; Computational Biology/*methods ; *Genome, Human ; *Genome-Wide Association Study ; Humans ; *Models, Statistical ; Polymorphism, Single Nucleotide ; *Programming Languages ; Software ; }, abstract = {Statistical methods for genome-wide association studies (GWAS) continue to improve. However, the increasing volume and variety of genetic and genomic data make computational speed and ease of data manipulation mandatory in future software. In our view, a collaborative effort of statistical geneticists is required to develop open source software targeted to genetic epidemiology. Our attempt to meet this need is called the OPENMENDEL project (https://openmendel.github.io). It aims to (1) enable interactive and reproducible analyses with informative intermediate results, (2) scale to big data analytics, (3) embrace parallel and distributed computing, (4) adapt to rapid hardware evolution, (5) allow cloud computing, (6) allow integration of varied genetic data types, and (7) foster easy communication between clinicians, geneticists, statisticians, and computer scientists. This article reviews and makes recommendations to the genetic epidemiology community in the context of the OPENMENDEL project.}, } @article {pmid30909657, year = {2019}, author = {Huang, L and Feng, X and Zhang, L and Qian, L and Wu, Y}, title = {Multi-Server Multi-User Multi-Task Computation Offloading for Mobile Edge Computing Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {6}, pages = {}, pmid = {30909657}, issn = {1424-8220}, support = {61502428, 61572440//National Natural Science Foundation of China/ ; LY19F020033,LR16F010003//Zhejiang Provincial Natural Science Foundation of China/ ; 2016YCGC011//Young Talent Cultivation Project of Zhejiang Association for Science and Technology/ ; }, abstract = {This paper studies mobile edge computing (MEC) networks where multiple wireless devices (WDs) offload their computation tasks to multiple edge servers and one cloud server. Considering different real-time computation tasks at different WDs, every task is decided to be processed locally at its WD or to be offloaded to and processed at one of the edge servers or the cloud server. In this paper, we investigate low-complexity computation offloading policies to guarantee quality of service of the MEC network and to minimize WDs' energy consumption. Specifically, both a linear programing relaxation-based (LR-based) algorithm and a distributed deep learning-based offloading (DDLO) algorithm are independently studied for MEC networks. We further propose a heterogeneous DDLO to achieve better convergence performance than DDLO. Extensive numerical results show that the DDLO algorithms guarantee better performance than the LR-based algorithm. Furthermore, the DDLO algorithm generates an offloading decision in less than 1 millisecond, which is several orders faster than the LR-based algorithm.}, } @article {pmid30900016, year = {2019}, author = {Nguyen, UNT and Pham, LTH and Dang, TD}, title = {An automatic water detection approach using Landsat 8 OLI and Google Earth Engine cloud computing to map lakes and reservoirs in New Zealand.}, journal = {Environmental monitoring and assessment}, volume = {191}, number = {4}, pages = {235}, pmid = {30900016}, issn = {1573-2959}, support = {UOWX1503//Ministry of Business, Innovation and Employment/ ; }, mesh = {*Cloud Computing ; Environmental Monitoring/*methods ; Humans ; Lakes/analysis ; New Zealand ; *Satellite Imagery ; Water ; *Water Supply ; }, abstract = {Monitoring water surface dynamics is essential for the management of lakes and reservoirs, especially those are intensively impacted by human exploitation and climatic variation. Although modern satellites have provided a superior solution over traditional methods in monitoring water surfaces, manually downloading and processing imagery associated with large study areas or long-time scales are time-consuming. The Google Earth Engine (GEE) platform provides a promising solution for this type of "big data" problems when it is combined with the automatic water extraction index (AWEI) to delineate multi-temporal water pixels from other forms of land use/land cover. The aim of this study is to assess the performance of a completely automatic water extraction framework by combining AWEI, GEE, and Landsat 8 OLI data over the period 2014-2018 in the case study of New Zealand. The overall accuracy (OA) of 0.85 proved the good performance of this combination. Therefore, the framework developed in this research can be used for lake and reservoir monitoring and assessment in the future. We also found that despite the temporal variability of climate during the period 2014-2018, the spatial areas of most of the lakes (3840) in the country remained the same at around 3742 km[2]. Image fusion or aerial photos can be employed to check the areal variation of the lakes at a finer scale.}, } @article {pmid30894349, year = {2019}, author = {Grundy, Q and Chiu, K and Held, F and Continella, A and Bero, L and Holz, R}, title = {Data sharing practices of medicines related apps and the mobile ecosystem: traffic, content, and network analysis.}, journal = {BMJ (Clinical research ed.)}, volume = {364}, number = {}, pages = {l920}, pmid = {30894349}, issn = {1756-1833}, support = {//CIHR/Canada ; }, mesh = {Computer Security/*standards ; Confidentiality/standards ; Health Promotion/methods ; Humans ; Information Dissemination/*methods ; Mobile Applications/*standards ; Smartphone ; Telemedicine/*standards ; }, abstract = {OBJECTIVES: To investigate whether and how user data are shared by top rated medicines related mobile applications (apps) and to characterise privacy risks to app users, both clinicians and consumers.

DESIGN: Traffic, content, and network analysis.

SETTING: Top rated medicines related apps for the Android mobile platform available in the Medical store category of Google Play in the United Kingdom, United States, Canada, and Australia.

PARTICIPANTS: 24 of 821 apps identified by an app store crawling program. Included apps pertained to medicines information, dispensing, administration, prescribing, or use, and were interactive.

INTERVENTIONS: Laboratory based traffic analysis of each app downloaded onto a smartphone, simulating real world use with four dummy scripts. The app's baseline traffic related to 28 different types of user data was observed. To identify privacy leaks, one source of user data was modified and deviations in the resulting traffic observed.

MAIN OUTCOME MEASURES: Identities and characterisation of entities directly receiving user data from sampled apps. Secondary content analysis of company websites and privacy policies identified data recipients' main activities; network analysis characterised their data sharing relations.

RESULTS: 19/24 (79%) of sampled apps shared user data. 55 unique entities, owned by 46 parent companies, received or processed app user data, including developers and parent companies (first parties) and service providers (third parties). 18 (33%) provided infrastructure related services such as cloud services. 37 (67%) provided services related to the collection and analysis of user data, including analytics or advertising, suggesting heightened privacy risks. Network analysis revealed that first and third parties received a median of 3 (interquartile range 1-6, range 1-24) unique transmissions of user data. Third parties advertised the ability to share user data with 216 "fourth parties"; within this network (n=237), entities had access to a median of 3 (interquartile range 1-11, range 1-140) unique transmissions of user data. Several companies occupied central positions within the network with the ability to aggregate and re-identify user data.

CONCLUSIONS: Sharing of user data is routine, yet far from transparent. Clinicians should be conscious of privacy risks in their own use of apps and, when recommending apps, explain the potential for loss of privacy as part of informed consent. Privacy regulation should emphasise the accountabilities of those who control and process user data. Developers should disclose all data sharing practices and allow users to choose precisely what data are shared and with whom.}, } @article {pmid30892238, year = {2020}, author = {Krestinskaya, O and James, AP and Chua, LO}, title = {Neuromemristive Circuits for Edge Computing: A Review.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {31}, number = {1}, pages = {4-23}, doi = {10.1109/TNNLS.2019.2899262}, pmid = {30892238}, issn = {2162-2388}, abstract = {The volume, veracity, variability, and velocity of data produced from the ever increasing network of sensors connected to Internet pose challenges for power management, scalability, and sustainability of cloud computing infrastructure. Increasing the data processing capability of edge computing devices at lower power requirements can reduce several overheads for cloud computing solutions. This paper provides the review of neuromorphic CMOS-memristive architectures that can be integrated into edge computing devices. We discuss why the neuromorphic architectures are useful for edge devices and show the advantages, drawbacks, and open problems in the field of neuromemristive circuits for edge computing.}, } @article {pmid30891516, year = {2019}, author = {Saura, JR and Reyes-Menendez, A and Palos-Sanchez, P}, title = {Mapping multispectral Digital Images using a Cloud Computing software: applications from UAV images.}, journal = {Heliyon}, volume = {5}, number = {2}, pages = {e01277}, pmid = {30891516}, issn = {2405-8440}, abstract = {Due to technology development related to agricultural production, aircrafts such as the Unmanned Aerial Vehicle (UAV) and technologies such as Multispectral photogrammetry and Remote Sensing, have great potential in supporting some of the pressing problems faced by agricultural production in terms of analysis and testing of variables. This paper reports an experience related to the analysis of a vineyard with multispectral photogrammetry technology and UAVs and it demonstrates its great potential to analyze the Normalized Difference Vegetation Index (NDVI), the Near-Infrared Spectroscopy (NIRS) and the Digital Elevation Model (DEM) applied in the agriculture framework to collect information on the vegetative state of the crop, soil and plant moisture, and biomass density maps of. In addition, the collected information is analyzed with the PIX4D Cloud Computing technology software and its advantages over software that work with other data processing are highlighted. This research shows, therefore, the possibility that efficient plantations can be developed with the use of multispectral photogrammetry and the analysis of digital images from this process.}, } @article {pmid30890927, year = {2019}, author = {Kiar, G and Brown, ST and Glatard, T and Evans, AC}, title = {A Serverless Tool for Platform Agnostic Computational Experiment Management.}, journal = {Frontiers in neuroinformatics}, volume = {13}, number = {}, pages = {12}, pmid = {30890927}, issn = {1662-5196}, abstract = {Neuroscience has been carried into the domain of big data and high performance computing (HPC) on the backs of initiatives in data collection and an increasingly compute-intensive tools. While managing HPC experiments requires considerable technical acumen, platforms, and standards have been developed to ease this burden on scientists. While web-portals make resources widely accessible, data organizations such as the Brain Imaging Data Structure and tool description languages such as Boutiques provide researchers with a foothold to tackle these problems using their own datasets, pipelines, and environments. While these standards lower the barrier to adoption of HPC and cloud systems for neuroscience applications, they still require the consolidation of disparate domain-specific knowledge. We present Clowdr, a lightweight tool to launch experiments on HPC systems and clouds, record rich execution records, and enable the accessible sharing and re-launch of experimental summaries and results. Clowdr uniquely sits between web platforms and bare-metal applications for experiment management by preserving the flexibility of do-it-yourself solutions while providing a low barrier for developing, deploying and disseminating neuroscientific analysis.}, } @article {pmid30876108, year = {2019}, author = {Zhao, Y and Hu, L and Zhu, R and Yu, X and Li, Y and Wang, W and Zhang, J}, title = {Crosstalk-aware spectrum defragmentation by re-provisioning advance reservation requests in space division multiplexing enabled elastic optical networks with multi-core fiber.}, journal = {Optics express}, volume = {27}, number = {4}, pages = {5014-5032}, doi = {10.1364/OE.27.005014}, pmid = {30876108}, issn = {1094-4087}, abstract = {Space division multiplexing enabled elastic optical networks (SDM-EONs) with multi-core fiber (MCF) have become a promising candidate for future optical transport networks, due to their high capacity and flexibility. Meanwhile, driven by the development of cloud computing and data centers, more types of requests are allowed in the networks, i.e., the usual immediate reservation (IR) requests, which need to be served immediately, and advance reservation (AR) requests, which support initial-delay tolerance services. However, with the introduction of AR requests, spectrum fragments occur frequently in both spatial and time dimension as lightpaths are set up and torn down, and the issue of spectrum fragmentation could be much more serious in SDM-EONs than in simple EONs. To measure fragments status in both spatial and time dimension in SDM-EONs, we first design a metric, i.e., time-dimensional spectrum compactness (TSC). Then, based on TSC, we propose a crosstalk-aware AR requests re-provisioning algorithm with two strategies to optimize the fragments in SDM-EONs. The performance of the proposed algorithm is evaluated via software simulation in terms of spectrum compactness, blocking probability, spectrum utilization, average moving times, average re-provisioning latency and average start time delay. The results show that the proposed re-provisioning algorithm can effectively improve spectrum compactness and spectrum efficiency of the networks. We also evaluate the proposed re-provisioning algorithm in different TSC thresholds, and it turns out that the proposed re-provisioning algorithm in higher threshold performs better in terms of spectrum compactness and spectrum utilization.}, } @article {pmid30875885, year = {2019}, author = {Lamb, ZW and Agrawal, DP}, title = {Analysis of Mobile Edge Computing for Vehicular Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {6}, pages = {}, pmid = {30875885}, issn = {1424-8220}, abstract = {Vehicular ad-hoc Networks (VANETs) are an integral part of intelligent transportation systems (ITS) that facilitate communications between vehicles and the internet. More recently, VANET communications research has strayed from the antiquated DSRC standard and favored more modern cellular technologies, such as fifth generation (5G). The ability of cellular networks to serve highly mobile devices combined with the drastically increased capacity of 5G, would enable VANETs to accommodate large numbers of vehicles and support range of applications. The addition of thousands of new connected devices not only stresses the cellular networks, but also the computational and storage requirements supporting the applications and software of these devices. Autonomous vehicles, with numerous on-board sensors, are expected to generate large amounts of data that must be transmitted and processed. Realistically, on-board computing and storage resources of the vehicle cannot be expected to handle all data that will be generated over the vehicles lifetime. Cloud computing will be an essential technology in VANETs and will support the majority of computation and long-term data storage. However, the networking overhead and latency associated with remote cloud resources could prove detrimental to overall network performance. Edge computing seeks to reduce the overhead by placing computational resources nearer to the end users of the network. The geographical diversity and varied hardware configurations of resource in a edge-enabled network would require careful management to ensure efficient resource utilization. In this paper, we introduce an architecture which evaluates available resources in real-time and makes allocations to the most logical and feasible resource. We evaluate our approach mathematically with the use of a multi-criteria decision analysis algorithm and validate our results with experiments using a test-bed of cloud resources. Results demonstrate that an algorithmic ranking of physical resources matches very closely with experimental results and provides a means of delegating tasks to the best available resource.}, } @article {pmid30873973, year = {2019}, author = {Makowski, M and Kowalczyk, A and Bieda, M and Suszek, J and Ducin, I and Shimobaba, T and Nagahama, Y and Ito, T}, title = {Miniature holographic projector with cloud computing capability.}, journal = {Applied optics}, volume = {58}, number = {5}, pages = {A156-A160}, doi = {10.1364/AO.58.00A156}, pmid = {30873973}, issn = {1539-4522}, abstract = {A fully functional miniaturized projection head below 5 cm[3] is presented, using computer-generated holograms dynamically displayed on a liquid-crystal spatial light modulator. Spatial division of the modulator is used for color projection without color breakup, and specially designed, anti-reflection coated prisms ensure simple light paths with small losses. Real-time calculations are performed on a remote server with on-the-fly compression of holographic fringes. Cloud computing allows 1 W of local electrical power usage and apparent image brightness equivalent to 15-500 lm/W efficiency, depending on the displayed content. The properties of the projector allow future applications in handheld displays.}, } @article {pmid30871160, year = {2019}, author = {Akintoye, SB and Bagula, A}, title = {Improving Quality-of-Service in Cloud/Fog Computing through Efficient Resource Allocation.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {6}, pages = {}, pmid = {30871160}, issn = {1424-8220}, support = {NRF FREESTANDING//National Research Foundation/ ; }, abstract = {Recently, a massive migration of enterprise applications to the cloud has been recorded in the IT world. One of the challenges of cloud computing is Quality-of-Service management, which includes the adoption of appropriate methods for allocating cloud-user applications to virtual resources, and virtual resources to the physical resources. The effective allocation of resources in cloud data centers is also one of the vital optimization problems in cloud computing, particularly when the cloud service infrastructures are built by lightweight computing devices. In this paper, we formulate and present the task allocation and virtual machine placement problems in a single cloud/fog computing environment, and propose a task allocation algorithmic solution and a Genetic Algorithm Based Virtual Machine Placement as solutions for the task allocation and virtual machine placement problem models. Finally, the experiments are carried out and the results show that the proposed solutions improve Quality-of-Service in the cloud/fog computing environment in terms of the allocation cost.}, } @article {pmid30871046, year = {2019}, author = {Wang, T and Wang, W and Liu, H and Li, T}, title = {Research on a Face Real-time Tracking Algorithm Based on Particle Filter Multi-Feature Fusion.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {5}, pages = {}, pmid = {30871046}, issn = {1424-8220}, abstract = {With the revolutionary development of cloud computing and internet of things, the integration and utilization of "big data" resources is a hot topic of the artificial intelligence research. Face recognition technology information has the advantages of being non-replicable, non-stealing, simple and intuitive. Video face tracking in the context of big data has become an important research hotspot in the field of information security. In this paper, a multi-feature fusion adaptive adjustment target tracking window and an adaptive update template particle filter tracking framework algorithm are proposed. Firstly, the skin color and edge features of the face are extracted in the video sequence. The weighted color histogram are extracted which describes the face features. Then we use the integral histogram method to simplify the histogram calculation of the particles. Finally, according to the change of the average distance, the tracking window is adjusted to accurately track the tracking object. At the same time, the algorithm can adaptively update the tracking template which improves the accuracy and accuracy of the tracking. The experimental results show that the proposed method improves the tracking effect and has strong robustness in complex backgrounds such as skin color, illumination changes and face occlusion.}, } @article {pmid30865284, year = {2019}, author = {Gao, XR and Huang, H}, title = {PleioNet: a web-based visualization tool for exploring pleiotropy across complex traits.}, journal = {Bioinformatics (Oxford, England)}, volume = {35}, number = {20}, pages = {4179-4180}, pmid = {30865284}, issn = {1367-4811}, support = {R01 EY022651/EY/NEI NIH HHS/United States ; R01 EY027315/EY/NEI NIH HHS/United States ; RF1 AG060472/AG/NIA NIH HHS/United States ; }, mesh = {Internet ; *Multifactorial Inheritance ; *Software ; Web Browser ; }, abstract = {SUMMARY: Pleiotropy plays an important role in furthering our understanding of the shared genetic architecture of different human diseases and traits. However, exploring and visualizing pleiotropic information with currently publicly available tools is limiting and challenging. To aid researchers in constructing and digesting pleiotropic networks, we present PleioNet, a web-based visualization tool for exploring this information across human diseases and traits. This program provides an intuitive and interactive web interface that seamlessly integrates large database queries with visualizations that enable users to quickly explore complex high-dimensional pleiotropic information. PleioNet works on all modern computer and mobile web browsers, making pleiotropic information readily available to a broad range of researchers and clinicians with diverse technical backgrounds. We expect that PleioNet will be an important tool for studying the underlying pleiotropic connections among human diseases and traits.

PleioNet is hosted on Google cloud and freely available at http://www.pleionet.com/.}, } @article {pmid30864323, year = {2019}, author = {Srivastava, A and Adusumilli, R and Boyce, H and Garijo, D and Ratnakar, V and Mayani, R and Yu, T and Machiraju, R and Gil, Y and Mallick, P}, title = {Semantic workflows for benchmark challenges: Enhancing comparability, reusability and reproducibility.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {24}, number = {}, pages = {208-219}, pmid = {30864323}, issn = {2335-6936}, support = {R01 GM117097/GM/NIGMS NIH HHS/United States ; T15 LM007033/LM/NLM NIH HHS/United States ; T32 LM012409/LM/NLM NIH HHS/United States ; }, mesh = {Algorithms ; Benchmarking/*methods ; Computational Biology/methods ; Gene Expression Profiling/statistics & numerical data ; Genomics ; Metadata ; Proteins/genetics/metabolism ; Reproducibility of Results ; *Semantics ; Sequence Analysis, RNA/statistics & numerical data ; *Workflow ; }, abstract = {Benchmark challenges, such as the Critical Assessment of Structure Prediction (CASP) and Dialogue for Reverse Engineering Assessments and Methods (DREAM) have been instrumental in driving the development of bioinformatics methods. Typically, challenges are posted, and then competitors perform a prediction based upon blinded test data. Challengers then submit their answers to a central server where they are scored. Recent efforts to automate these challenges have been enabled by systems in which challengers submit Docker containers, a unit of software that packages up code and all of its dependencies, to be run on the cloud. Despite their incredible value for providing an unbiased test-bed for the bioinformatics community, there remain opportunities to further enhance the potential impact of benchmark challenges. Specifically, current approaches only evaluate end-to-end performance; it is nearly impossible to directly compare methodologies or parameters. Furthermore, the scientific community cannot easily reuse challengers' approaches, due to lack of specifics, ambiguity in tools and parameters as well as problems in sharing and maintenance. Lastly, the intuition behind why particular steps are used is not captured, as the proposed workflows are not explicitly defined, making it cumbersome to understand the flow and utilization of data. Here we introduce an approach to overcome these limitations based upon the WINGS semantic workflow system. Specifically, WINGS enables researchers to submit complete semantic workflows as challenge submissions. By submitting entries as workflows, it then becomes possible to compare not just the results and performance of a challenger, but also the methodology employed. This is particularly important when dozens of challenge entries may use nearly identical tools, but with only subtle changes in parameters (and radical differences in results). WINGS uses a component driven workflow design and offers intelligent parameter and data selection by reasoning about data characteristics. This proves to be especially critical in bioinformatics workflows where using default or incorrect parameter values is prone to drastically altering results. Different challenge entries may be readily compared through the use of abstract workflows, which also facilitate reuse. WINGS is housed on a cloud based setup, which stores data, dependencies and workflows for easy sharing and utility. It also has the ability to scale workflow executions using distributed computing through the Pegasus workflow execution system. We demonstrate the application of this architecture to the DREAM proteogenomic challenge.}, } @article {pmid30862110, year = {2019}, author = {Obour Agyekum, KO and Xia, Q and Sifah, EB and Gao, J and Xia, H and Du, X and Guizani, M}, title = {A Secured Proxy-Based Data Sharing Module in IoT Environments Using Blockchain.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {5}, pages = {}, pmid = {30862110}, issn = {1424-8220}, support = {2017HH0028; 2018HH0102;//Programs of International Science and Technology Cooperation and Exchange of Sichuan Province/ ; }, abstract = {Access and utilization of data are central to the cloud computing paradigm. With the advent of the Internet of Things (IoT), the tendency of data sharing on the cloud has seen enormous growth. With data sharing comes numerous security and privacy issues. In the process of ensuring data confidentiality and fine-grained access control to data in the cloud, several studies have proposed Attribute-Based Encryption (ABE) schemes, with Key Policy-ABE (KP-ABE) being the prominent one. Recent works have however suggested that the confidentiality of data is violated through collusion attacks between a revoked user and the cloud server. We present a secured and efficient Proxy Re-Encryption (PRE) scheme that incorporates an Inner-Product Encryption (IPE) scheme in which decryption of data is possible if the inner product of the private key, associated with a set of attributes specified by the data owner, and the associated ciphertext is equal to zero 0 . We utilize a blockchain network whose processing node acts as the proxy server and performs re-encryption on the data. In ensuring data confidentiality and preventing collusion attacks, the data are divided into two, with one part stored on the blockchain network and the other part stored on the cloud. Our approach also achieves fine-grained access control.}, } @article {pmid30862019, year = {2019}, author = {Vega-Barbas, M and Diaz-Olivares, JA and Lu, K and Forsman, M and Seoane, F and Abtahi, F}, title = {P-Ergonomics Platform: Toward Precise, Pervasive, and Personalized Ergonomics using Wearable Sensors and Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {5}, pages = {}, pmid = {30862019}, issn = {1424-8220}, support = {18454//EIT Health/ ; }, mesh = {*Biosensing Techniques ; Ergonomics/methods ; Humans ; Musculoskeletal Diseases/*physiopathology ; Occupational Diseases/physiopathology ; *Wearable Electronic Devices ; }, abstract = {Preventive healthcare has attracted much attention recently. Improving people's lifestyles and promoting a healthy diet and wellbeing are important, but the importance of work-related diseases should not be undermined. Musculoskeletal disorders (MSDs) are among the most common work-related health problems. Ergonomists already assess MSD risk factors and suggest changes in workplaces. However, existing methods are mainly based on visual observations, which have a relatively low reliability and cover only part of the workday. These suggestions concern the overall workplace and the organization of work, but rarely includes individuals' work techniques. In this work, we propose a precise and pervasive ergonomic platform for continuous risk assessment. The system collects data from wearable sensors, which are synchronized and processed by a mobile computing layer, from which exposure statistics and risk assessments may be drawn, and finally, are stored at the server layer for further analyses at both individual and group levels. The platform also enables continuous feedback to the worker to support behavioral changes. The deployed cloud platform in Amazon Web Services instances showed sufficient system flexibility to affordably fulfill requirements of small to medium enterprises, while it is expandable for larger corporations. The system usability scale of 76.6 indicates an acceptable grade of usability.}, } @article {pmid30855564, year = {2019}, author = {Sigdel, D and Kyi, V and Zhang, A and Setty, SP and Liem, DA and Shi, Y and Wang, X and Shen, J and Wang, W and Han, J and Ping, P}, title = {Cloud-Based Phrase Mining and Analysis of User-Defined Phrase-Category Association in Biomedical Publications.}, journal = {Journal of visualized experiments : JoVE}, volume = {}, number = {144}, pages = {}, pmid = {30855564}, issn = {1940-087X}, support = {R35 HL135772/HL/NHLBI NIH HHS/United States ; U54 GM114833/GM/NIGMS NIH HHS/United States ; U54 GM114838/GM/NIGMS NIH HHS/United States ; }, mesh = {Algorithms ; *Biomedical Research ; *Cloud Computing ; Data Mining/*methods ; Databases, Factual ; Humans ; *Publications ; }, abstract = {The rapid accumulation of biomedical textual data has far exceeded the human capacity of manual curation and analysis, necessitating novel text-mining tools to extract biological insights from large volumes of scientific reports. The Context-aware Semantic Online Analytical Processing (CaseOLAP) pipeline, developed in 2016, successfully quantifies user-defined phrase-category relationships through the analysis of textual data. CaseOLAP has many biomedical applications. We have developed a protocol for a cloud-based environment supporting the end-to-end phrase-mining and analyses platform. Our protocol includes data preprocessing (e.g., downloading, extraction, and parsing text documents), indexing and searching with Elasticsearch, creating a functional document structure called Text-Cube, and quantifying phrase-category relationships using the core CaseOLAP algorithm. Our data preprocessing generates key-value mappings for all documents involved. The preprocessed data is indexed to carry out a search of documents including entities, which further facilitates the Text-Cube creation and CaseOLAP score calculation. The obtained raw CaseOLAP scores are interpreted using a series of integrative analyses, including dimensionality reduction, clustering, temporal, and geographical analyses. Additionally, the CaseOLAP scores are used to create a graphical database, which enables semantic mapping of the documents. CaseOLAP defines phrase-category relationships in an accurate (identifies relationships), consistent (highly reproducible), and efficient manner (processes 100,000 words/sec). Following this protocol, users can access a cloud-computing environment to support their own configurations and applications of CaseOLAP. This platform offers enhanced accessibility and empowers the biomedical community with phrase-mining tools for widespread biomedical research applications.}, } @article {pmid30849102, year = {2019}, author = {Shao, X and Xie, Z and Xin, Y and Yang, J}, title = {A deadline constrained scheduling algorithm for cloud computing system based on the driver of dynamic essential path.}, journal = {PloS one}, volume = {14}, number = {3}, pages = {e0213234}, pmid = {30849102}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; Markov Chains ; }, abstract = {To solve the problem of the deadline-constrained task scheduling in the cloud computing system, this paper proposes a deadline-constrained scheduling algorithm for cloud computing based on the driver of dynamic essential path (Deadline-DDEP). According to the changes of the dynamic essential path of each task node in the scheduling process, the dynamic sub-deadline strategy is proposed. The strategy assigns different sub-deadline values to every task node to meet the constraint relations among task nodes and the user's defined deadline. The strategy fully considers the dynamic sub-deadline affected by the dynamic essential path of task node in the scheduling process. The paper proposed the quality assessment of optimization cost strategy to solve the problem of selecting server for each task node. Based on the sub-deadline urgency and the relative execution cost in the scheduling process, the strategy selects the server that not only meets the sub-deadline but also obtains much lower execution cost. In this way, the proposed algorithm will make the task graph complete within its deadline, and minimize its total execution cost. Finally, we demonstrate the proposed algorithm via the simulation experiments using Matlab tools. The experimental results show that, the proposed algorithm produces remarkable performance improvement rate on the total execution cost that ranges between 10.3% and 30.8% under meeting the deadline constraint. In view of the experimental results, the proposed algorithm provides better-quality scheduling solution that is suitable for scientific application task execution in the cloud computing environment than IC-PCP, DCCP and CD-PCP.}, } @article {pmid30836717, year = {2019}, author = {Liu, F and Huang, Z and Wang, L}, title = {Energy-Efficient Collaborative Task ComputationOffloading in Cloud-Assisted Edge Computingfor IoT Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {5}, pages = {}, pmid = {30836717}, issn = {1424-8220}, support = {No. GDDST[2016]176//the Engineering and Technology Research Center of Guangdong Province for Logistics Supply Chain and Internet of Things/ ; No. 2013B090200055//the Provincial Science and Technology Project in Guangdong Province/ ; No. 610245048129//the Key Laboratory of Cloud Computing for Super-integration Cloud Computing in Guangdong Province/ ; No. GDDST[2013]1513-1-11//the Engineering and Technology Research Center of Guangdong Province for Big Data Intelligent Processing/ ; }, abstract = {As an emerging and promising computing paradigm in the Internet of things (IoT),edge computing can significantly reduce energy consumption and enhance computation capabilityfor resource-constrained IoT devices. Computation offloading has recently received considerableattention in edge computing. Many existing studies have investigated the computation offloadingproblem with independent computing tasks. However, due to the inter-task dependency in variousdevices that commonly happens in IoT systems, achieving energy-efficient computation offloadingdecisions remains a challengeable problem. In this paper, a cloud-assisted edge computing frameworkwith a three-tier network in an IoT environment is introduced. In this framework, we first formulatedan energy consumption minimization problem as a mixed integer programming problem consideringtwo constraints, the task-dependency requirement and the completion time deadline of the IoT service.To address this problem, we then proposed an Energy-efficient Collaborative Task ComputationOffloading (ECTCO) algorithm based on a semidefinite relaxation and stochastic mapping approachto obtain strategies of tasks computation offloading for IoT sensors. Simulation results demonstratedthat the cloud-assisted edge computing framework was feasible and the proposed ECTCO algorithmcould effectively reduce the energy cost of IoT sensors.}, } @article {pmid30828438, year = {2019}, author = {Gopaulakrishnan, S and Pollack, S and Stubbs, BJ and Pagès, H and Readey, J and Davis, S and Waldron, L and Morgan, M and Carey, V}, title = {restfulSE: A semantically rich interface for cloud-scale genomics with Bioconductor.}, journal = {F1000Research}, volume = {8}, number = {}, pages = {21}, pmid = {30828438}, issn = {2046-1402}, support = {U01 CA214846/CA/NCI NIH HHS/United States ; U24 CA180996/CA/NCI NIH HHS/United States ; U41 HG004059/HG/NHGRI NIH HHS/United States ; }, mesh = {Genome ; *Genomics ; *Software ; }, abstract = {Bioconductor's SummarizedExperiment class unites numerical assay quantifications with sample- and experiment-level metadata. SummarizedExperiment is the standard Bioconductor class for assays that produce matrix-like data, used by over 200 packages. We describe the restfulSE package, a deployment of this data model that supports remote storage. We illustrate use of SummarizedExperiment with remote HDF5 and Google BigQuery back ends, with two applications in cancer genomics. Our intent is to allow the use of familiar and semantically meaningful programmatic idioms to query genomic data, while abstracting the remote interface from end users and developers.}, } @article {pmid30816928, year = {2019}, author = {Shi, L and Meng, X and Tseng, E and Mascagni, M and Wang, Z}, title = {SpaRC: scalable sequence clustering using Apache Spark.}, journal = {Bioinformatics (Oxford, England)}, volume = {35}, number = {5}, pages = {760-768}, doi = {10.1093/bioinformatics/bty733}, pmid = {30816928}, issn = {1367-4811}, mesh = {*Algorithms ; Cluster Analysis ; High-Throughput Nucleotide Sequencing ; Metagenomics ; Sequence Analysis, DNA ; *Software ; }, abstract = {MOTIVATION: Whole genome shotgun based next-generation transcriptomics and metagenomics studies often generate 100-1000 GB sequence data derived from tens of thousands of different genes or microbial species. Assembly of these data sets requires tradeoffs between scalability and accuracy. Current assembly methods optimized for scalability often sacrifice accuracy and vice versa. An ideal solution would both scale and produce optimal accuracy for individual genes or genomes.

RESULTS: Here we describe an Apache Spark-based scalable sequence clustering application, SparkReadClust (SpaRC), that partitions reads based on their molecule of origin to enable downstream assembly optimization. SpaRC produces high clustering performance on transcriptomes and metagenomes from both short and long read sequencing technologies. It achieves near-linear scalability with input data size and number of compute nodes. SpaRC can run on both cloud computing and HPC environments without modification while delivering similar performance. Our results demonstrate that SpaRC provides a scalable solution for clustering billions of reads from next-generation sequencing experiments, and Apache Spark represents a cost-effective solution with rapid development/deployment cycles for similar large-scale sequence data analysis problems.

https://bitbucket.org/berkeleylab/jgi-sparc.}, } @article {pmid30813277, year = {2019}, author = {Sanchez-Gonzalez, PL and Díaz-Gutiérrez, D and Leo, TJ and Núñez-Rivas, LR}, title = {Toward Digitalization of Maritime Transport?.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {4}, pages = {}, pmid = {30813277}, issn = {1424-8220}, abstract = {Although maritime transport is the backbone of world commerce, its digitalization lags significantly behind when we consider some basic facts. This work verifies the state-of-the-art as it currently applies to eight digital domains: Autonomous vehicles and robotics; artificial intelligence; big data; virtual reality, augmented and mixed reality; internet of things; the cloud and edge computing; digital security; and 3D printing and additive engineering. It also provides insight into each of the three sectors into which this industry has been divided: Ship design and shipbuilding; shipping; and ports. The work, based on a systematic literature review, demonstrates that there are domains on which almost no formal study has been done thus far and concludes that there are major areas that require attention in terms of research. It also illustrates the increasing interest on the subject, arising from the necessity of raising the maritime transport industry to the same level of digitalization as other industries.}, } @article {pmid32647812, year = {2019}, author = {Morris, C and Andreetto, P and Banci, L and Bonvin, AMJJ and Chojnowski, G and Cano, LD and Carazo, JM and Conesa, P and Daenke, S and Damaskos, G and Giachetti, A and Haley, NEC and Hekkelman, ML and Heuser, P and Joosten, RP and Kouřil, D and Křenek, A and Kulhánek, T and Lamzin, VS and Nadzirin, N and Perrakis, A and Rosato, A and Sanderson, F and Segura, J and Schaarschmidt, J and Sobolev, E and Traldi, S and Trellet, ME and Velankar, S and Verlato, M and Winn, M}, title = {West-Life: A Virtual Research Environment for structural biology.}, journal = {Journal of structural biology: X}, volume = {1}, number = {}, pages = {100006}, pmid = {32647812}, issn = {2590-1524}, abstract = {The West-Life project (https://about.west-life.eu/) is a Horizon 2020 project funded by the European Commission to provide data processing and data management services for the international community of structural biologists, and in particular to support integrative experimental approaches within the field of structural biology. It has developed enhancements to existing web services for structure solution and analysis, created new pipelines to link these services into more complex higher-level workflows, and added new data management facilities. Through this work it has striven to make the benefits of European e-Infrastructures more accessible to life-science researchers in general and structural biologists in particular.}, } @article {pmid30798573, year = {2019}, author = {Jia, CY and Chen, LX}, title = {[New opportunity of management strategy of diagnosis and treatment of chronic wound: emerging technology].}, journal = {Zhonghua shao shang za zhi = Zhonghua shaoshang zazhi = Chinese journal of burns}, volume = {35}, number = {2}, pages = {86-89}, doi = {10.3760/cma.j.issn.1009-2587.2019.02.002}, pmid = {30798573}, issn = {1009-2587}, support = {81372051//General Program of National Natural Science Foundation of China/ ; Z151100004015199//Science and Technology Plan of Beijing Municipality/ ; }, mesh = {Artificial Intelligence ; Burns/*therapy ; Humans ; *Plastic Surgery Procedures/trends ; *Surgery, Plastic ; Technology/trends ; }, abstract = {Along with the development of society and the change of disease spectrum, chronic wound is gradually becoming the core of burn and plastic surgery field. Although there have been some progresses in the diagnosis and treatment technology, the management strategy of chronic wound is still in the traditional mode stage. The development of internet of things, cloud computing, big data, artificial intelligence, and other emerging technologies is changing with each passing day, and they have rapidly penetrated into the health care field. To explore the application prospect of emerging technology in the diagnosis and treatment management of chronic wound and to plan its strategy and mode in the diagnosis and treatment of chronic wound can further promote development of discipline of burns.}, } @article {pmid30794647, year = {2019}, author = {Shimahara, Y and Sugawara, K and Kojo, KH and Kawai, H and Yoshida, Y and Hasezawa, S and Kutsuna, N}, title = {IMACEL: A cloud-based bioimage analysis platform for morphological analysis and image classification.}, journal = {PloS one}, volume = {14}, number = {2}, pages = {e0212619}, pmid = {30794647}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Image Processing, Computer-Assisted ; *Machine Learning ; *Software ; }, abstract = {Automated quantitative image analysis is essential for all fields of life science research. Although several software programs and algorithms have been developed for bioimage processing, an advanced knowledge of image processing techniques and high-performance computing resources are required to use them. Hence, we developed a cloud-based image analysis platform called IMACEL, which comprises morphological analysis and machine learning-based image classification. The unique click-based user interface of IMACEL's morphological analysis platform enables researchers with limited resources to evaluate particles rapidly and quantitatively without prior knowledge of image processing. Because all the image processing and machine learning algorithms are performed on high-performance virtual machines, users can access the same analytical environment from anywhere. A validation study of the morphological analysis and image classification of IMACEL was performed. The results indicate that this platform is an accessible and potentially powerful tool for the quantitative evaluation of bioimages that will lower the barriers to life science research.}, } @article {pmid30782166, year = {2019}, author = {Idoga, PE and Toycan, M and Nadiri, H and Çelebi, E}, title = {Assessing factors militating against the acceptance and successful implementation of a cloud based health center from the healthcare professionals' perspective: a survey of hospitals in Benue state, northcentral Nigeria.}, journal = {BMC medical informatics and decision making}, volume = {19}, number = {1}, pages = {34}, pmid = {30782166}, issn = {1472-6947}, mesh = {Adult ; *Attitude of Health Personnel ; *Cloud Computing ; Cross-Sectional Studies ; *Delivery of Health Care ; Female ; *Health Knowledge, Attitudes, Practice ; *Health Personnel ; Humans ; Male ; Middle Aged ; Nigeria ; }, abstract = {BACKGROUND: Cloud based health platforms (CBHP) have tremendous capacity to meet patient's health needs. The benefits inherent in CBHP position it to be relevant for efficient healthcare delivery. Nonetheless, studies have shown that the adoption of new technologies is sometimes a challenge especially in developing nations. This study, therefore, aim to examine, identify and evaluate the factors affecting healthcare professionals' intention to accept the cloud-based health center (CBHC) in developing countries. The research study focuses on hospitals in North-central of Nigeria.

METHODS: Using questionnaire adopted from related studies, a cross-sectional study was carried out of 300 healthcare professionals selected from medical health institutions in Benue State Nigeria. The study adopted the Unified Theory of Acceptance and use of Technology Extended (UTAUT2). Data analysis was carried out using SPSS (V20.0) and LISREL (V9.30) generally employed in Structural Equation Modeling to examine components and path model. The Socio technical design method was used to develop the CBHC.

RESULTS: Findings portrays performance expectancy, cloud based health knowledge, IT infrastructure and social influence to have significant effects on the intentions of healthcare professionals to accept and use the CBHC. These findings, agrees with prior related studies.

CONCLUSIONS: Our findings impacts the body of knowledge in that it identifies important areas the studies can be useful, especially, to managers and healthcare policy makers in the planning/implementation of health cloud. Research findings from the theoretical acceptance model identifies the factors and barriers towards sustainable cloud based health center solutions to meet the healthcare needs of people in remote communities.}, } @article {pmid30781650, year = {2019}, author = {Kang, J and Eom, DS}, title = {Offloading and Transmission Strategies for IoT Edge Devices and Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {4}, pages = {}, pmid = {30781650}, issn = {1424-8220}, support = {Korea University Grant//Korea University/ ; }, abstract = {We present a machine and deep learning method to offload trained deep learning model and transmit packets efficiently on resource-constrained internet of things (IoT) edge devices and networks. Recently, the types of IoT devices have become diverse and the volume of data has been increasing, such as images, voice, and time-series sensory signals generated by various devices. However, transmitting large amounts of data to a server or cloud becomes expensive owing to limited bandwidth, and leads to latency for time-sensitive operations. Therefore, we propose a novel offloading and transmission policy considering energy-efficiency, execution time, and the number of generated packets for resource-constrained IoT edge devices that run a deep learning model and a reinforcement learning method to find an optimal contention window size for effective channel access using a contention-based medium access control (MAC) protocol. A Reinforcement learning is used to improve the performance of the applied MAC protocol. Our proposed method determines the offload and transmission strategies that are better to directly send fragmented packets of raw data or to send the extracted feature vector or the final output of deep learning networks, considering the operation performance and power consumption of the resource-constrained microprocessor, as well as the power consumption of the radio transceiver and latency for transmitting the all the generated packets. In the performance evaluation, we measured the performance parameters of ARM Cortex-M4 and Cortex-M7 processors for the network simulation. The evaluation results show that our proposed adaptive channel access and learning-based offload and transmission methods outperform conventional role-based channel access schemes. They transmit packets of raw data and are effective for IoT edge devices and network protocols.}, } @article {pmid30781639, year = {2019}, author = {Jang, I and Lee, D and Choi, J and Son, Y}, title = {An Approach to Share Self-Taught Knowledge between Home IoT Devices at the Edge.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {4}, pages = {}, pmid = {30781639}, issn = {1424-8220}, abstract = {The traditional Internet of Things (IoT) paradigm has evolved towards intelligent IoT applications which exploit knowledge produced by IoT devices using artificial intelligence techniques. Knowledge sharing between IoT devices is a challenging issue in this trend. In this paper, we propose a Knowledge of Things (KoT) framework which enables sharing self-taught knowledge between IoT devices which require similar or identical knowledge without help from the cloud. The proposed KoT framework allows an IoT device to effectively produce, cumulate, and share its self-taught knowledge with other devices at the edge in the vicinity. This framework can alleviate behavioral repetition in users and computational redundancy in systems in intelligent IoT applications. To demonstrate the feasibility of the proposed concept, we examine a smart home case study and build a prototype of the KoT framework-based smart home system. Experimental results show that the proposed KoT framework reduces the response time to use intelligent IoT devices from a user's perspective and the power consumption for compuation from a system's perspective.}, } @article {pmid30781622, year = {2019}, author = {Pérez-Torres, R and Torres-Huitzil, C and Galeana-Zapién, H}, title = {A Cognitive-Inspired Event-Based Control for Power-Aware Human Mobility Analysis in IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {4}, pages = {}, pmid = {30781622}, issn = {1424-8220}, mesh = {Cognition/*physiology ; Humans ; Range of Motion, Articular/*physiology ; Smartphone ; }, abstract = {Mobile Edge Computing (MEC) relates to the deployment of decision-making processes at the network edge or mobile devices rather than in a centralized network entity like the cloud. This paradigm shift is acknowledged as one key pillar to enable autonomous operation and self-awareness in mobile devices in IoT. Under this paradigm, we focus on mobility-based services (MBSs), where mobile devices are expected to perform energy-efficient GPS data acquisition while also providing location accuracy. We rely on a fully on-device Cognitive Dynamic Systems (CDS) platform to propose and evaluate a cognitive controller aimed at both tackling the presence of uncertainties and exploiting the mobility information learned by such CDS toward energy-efficient and accurate location tracking via mobility-aware sampling policies. We performed a set of experiments and validated that the proposed control strategy outperformed similar approaches in terms of energy savings and spatio-temporal accuracy in LBS and MBS for smartphone devices.}, } @article {pmid30759810, year = {2019}, author = {Dong, C and Wen, W}, title = {Joint Optimization for Task Offloading in Edge Computing: An Evolutionary Game Approach.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {3}, pages = {}, pmid = {30759810}, issn = {1424-8220}, support = {2017YFB1001603//National Key R\&D of China/ ; 2015B010108004//Guangdong science and technology plan/ ; }, abstract = {The mobile edge computing (MEC) paradigm provides a promising solution to solve the resource-insufficiency problem in mobile terminals by offloading computation-intensive and delay-sensitive tasks to nearby edge nodes. However, limited computation resources in edge nodes may not be sufficient to serve excessive offloading tasks exceeding the computation capacities of edge nodes. Therefore, multiple edge clouds with a complementary central cloud coordinated to serve users is the efficient architecture to satisfy users' Quality-of-Service (QoS) requirements while trying to minimize some network service providers' cost. We study a dynamic, decentralized resource-allocation strategy based on evolutionary game theory to deal with task offloading to multiple heterogeneous edge nodes and central clouds among multi-users. In our strategy, the resource competition among multi-users is modeled by the process of replicator dynamics. During the process, our strategy can achieve one evolutionary equilibrium, meeting users' QoS requirements under resource constraints of edge nodes. The stability and fairness of this strategy is also proved by mathematical analysis. Illustrative studies show the effectiveness of our proposed strategy, outperforming other alternative methods.}, } @article {pmid30744013, year = {2019}, author = {Arroyo, P and Herrero, JL and Suárez, JI and Lozano, J}, title = {Wireless Sensor Network Combined with Cloud Computing for Air Quality Monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {3}, pages = {}, pmid = {30744013}, issn = {1424-8220}, support = {TEC2013-48147-C6-5-R//Spanish Ministry of Economy and Competitiveness/ ; IB16048//Junta de Extremadura/ ; }, abstract = {Low-cost air pollution wireless sensors are emerging in densely distributed networks that provide more spatial resolution than typical traditional systems for monitoring ambient air quality. This paper presents an air quality measurement system that is composed of a distributed sensor network connected to a cloud system forming a wireless sensor network (WSN). Sensor nodes are based on low-power ZigBee motes, and transmit field measurement data to the cloud through a gateway. An optimized cloud computing system has been implemented to store, monitor, process, and visualize the data received from the sensor network. Data processing and analysis is performed in the cloud by applying artificial intelligence techniques to optimize the detection of compounds and contaminants. This proposed system is a low-cost, low-size, and low-power consumption method that can greatly enhance the efficiency of air quality measurements, since a great number of nodes could be deployed and provide relevant information for air quality distribution in different areas. Finally, a laboratory case study demonstrates the applicability of the proposed system for the detection of some common volatile organic compounds, including: benzene, toluene, ethylbenzene, and xylene. Principal component analysis, a multilayer perceptron with backpropagation learning algorithm, and support vector machine have been applied for data processing. The results obtained suggest good performance in discriminating and quantifying the concentration of the volatile organic compounds.}, } @article {pmid30741239, year = {2019}, author = {Wang, SL and Chiang, IE and Kuo, A and Lin, HI}, title = {Design and Usability Evaluation of Mobile Cloud Healthcare System for Diabetes Prevention.}, journal = {Studies in health technology and informatics}, volume = {257}, number = {}, pages = {455-459}, pmid = {30741239}, issn = {1879-8365}, mesh = {Aged ; *Cloud Computing ; *Data Mining ; *Delivery of Health Care ; *Diabetes Mellitus/prevention & control ; Humans ; Middle Aged ; }, abstract = {In this study, a mobile cloud healthcare system was implemented to assist middle- and old-aged people with diabetes preventive healthcare. First of all, a prototype system was developed. It was a system relying on data mining computing technology and big data analytics. Besides, it was constructed under the environment architecture of VMware cloud computing. This mobile cloud healthcare system was developed via mobile devices. Its purpose was to set up a diabetes preventive healthcare service for users, and to further assess the usability of this mobile cloud care system.}, } @article {pmid30726283, year = {2019}, author = {Feng, D and Wu, Z and Zuo, D and Zhang, Z}, title = {A multiobjective migration algorithm as a resource consolidation strategy in cloud computing.}, journal = {PloS one}, volume = {14}, number = {2}, pages = {e0211729}, pmid = {30726283}, issn = {1932-6203}, abstract = {To flexibly meet users' demands in cloud computing, it is essential for providers to establish the efficient virtual mapping in datacenters. Accordingly, virtualization has become a key aspect of cloud computing. It is possible to consolidate resources based on the single objective of reducing energy consumption. However, it is challenging for the provider to consolidate resources efficiently based on a multiobjective optimization strategy. In this paper, we present a novel migration algorithm to consolidate resources adaptively using a two-level scheduling algorithm. First, we propose the grey relational analysis (GRA) and technique for order preference by similarity to the ideal solution (TOPSIS) policy to simultaneously determine the hotspots by the main selected factors, including the CPU and the memory. Second, a two-level hybrid heuristic algorithm is designed to consolidate resources in order to reduce costs and energy consumption, mainly depending on the PSO and ACO algorithms. The improved PSO can determine the migrating VMs quickly, and the proposed ACO can locate the positions. Extensive experiments demonstrate that the two-level scheduling algorithm performs the consolidation strategy efficiently during the dynamic allocation process.}, } @article {pmid30717464, year = {2019}, author = {Avgeris, M and Spatharakis, D and Dechouniotis, D and Kalatzis, N and Roussaki, I and Papavassiliou, S}, title = {Where There Is Fire There Is SMOKE: A Scalable Edge Computing Framework for Early Fire Detection.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {3}, pages = {}, pmid = {30717464}, issn = {1424-8220}, abstract = {A Cyber-Physical Social System (CPSS) tightly integrates computer systems with the physical world and human activities. In this article, a three-level CPSS for early fire detection is presented to assist public authorities to promptly identify and act on emergency situations. At the bottom level, the system's architecture involves IoT nodes enabled with sensing and forest monitoring capabilities. Additionally, in this level, the crowd sensing paradigm is exploited to aggregate environmental information collected by end user devices present in the area of interest. Since the IoT nodes suffer from limited computational energy resources, an Edge Computing Infrastructure, at the middle level, facilitates the offloaded data processing regarding possible fire incidents. At the top level, a decision-making service deployed on Cloud nodes integrates data from various sources, including users' information on social media, and evaluates the situation criticality. In our work, a dynamic resource scaling mechanism for the Edge Computing Infrastructure is designed to address the demanding Quality of Service (QoS) requirements of this IoT-enabled time and mission critical application. The experimental results indicate that the vertical and horizontal scaling on the Edge Computing layer is beneficial for both the performance and the energy consumption of the IoT nodes.}, } @article {pmid30715927, year = {2019}, author = {Metzcar, J and Wang, Y and Heiland, R and Macklin, P}, title = {A Review of Cell-Based Computational Modeling in Cancer Biology.}, journal = {JCO clinical cancer informatics}, volume = {3}, number = {}, pages = {1-13}, pmid = {30715927}, issn = {2473-4276}, support = {R01 CA180149/CA/NCI NIH HHS/United States ; U01 CA232137/CA/NCI NIH HHS/United States ; U54 CA143907/CA/NCI NIH HHS/United States ; }, mesh = {Animals ; Cell Biology ; Cell Hypoxia ; Computer Simulation ; Epithelial-Mesenchymal Transition ; Humans ; *Models, Biological ; Neoplasm Invasiveness ; Neoplasms/metabolism/*pathology ; Neoplastic Stem Cells/metabolism/*pathology ; Systems Biology/*methods ; Tumor Microenvironment ; }, abstract = {Cancer biology involves complex, dynamic interactions between cancer cells and their tissue microenvironments. Single-cell effects are critical drivers of clinical progression. Chemical and mechanical communication between tumor and stromal cells can co-opt normal physiologic processes to promote growth and invasion. Cancer cell heterogeneity increases cancer's ability to test strategies to adapt to microenvironmental stresses. Hypoxia and treatment can select for cancer stem cells and drive invasion and resistance. Cell-based computational models (also known as discrete models, agent-based models, or individual-based models) simulate individual cells as they interact in virtual tissues, which allows us to explore how single-cell behaviors lead to the dynamics we observe and work to control in cancer systems. In this review, we introduce the broad range of techniques available for cell-based computational modeling. The approaches can range from highly detailed models of just a few cells and their morphologies to millions of simpler cells in three-dimensional tissues. Modeling individual cells allows us to directly translate biologic observations into simulation rules. In many cases, individual cell agents include molecular-scale models. Most models also simulate the transport of oxygen, drugs, and growth factors, which allow us to link cancer development to microenvironmental conditions. We illustrate these methods with examples drawn from cancer hypoxia, angiogenesis, invasion, stem cells, and immunosurveillance. An ecosystem of interoperable cell-based simulation tools is emerging at a time when cloud computing resources make software easier to access and supercomputing resources make large-scale simulation studies possible. As the field develops, we anticipate that high-throughput simulation studies will allow us to rapidly explore the space of biologic possibilities, prescreen new therapeutic strategies, and even re-engineer tumor and stromal cells to bring cancer systems under control.}, } @article {pmid30714638, year = {2019}, author = {Petegrosso, R and Li, Z and Srour, MA and Saad, Y and Zhang, W and Kuang, R}, title = {Scalable remote homology detection and fold recognition in massive protein networks.}, journal = {Proteins}, volume = {87}, number = {6}, pages = {478-491}, doi = {10.1002/prot.25669}, pmid = {30714638}, issn = {1097-0134}, support = {NIH R01CA225435/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; Computational Biology ; Humans ; Proteins/*chemistry ; Sequence Analysis, Protein ; Software ; }, abstract = {The global connectivities in very large protein similarity networks contain traces of evolution among the proteins for detecting protein remote evolutionary relations or structural similarities. To investigate how well a protein network captures the evolutionary information, a key limitation is the intensive computation of pairwise sequence similarities needed to construct very large protein networks. In this article, we introduce label propagation on low-rank kernel approximation (LP-LOKA) for searching massively large protein networks. LP-LOKA propagates initial protein similarities in a low-rank graph by Nyström approximation without computing all pairwise similarities. With scalable parallel implementations based on distributed-memory using message-passing interface and Apache-Hadoop/Spark on cloud, LP-LOKA can search protein networks with one million proteins or more. In the experiments on Swiss-Prot/ADDA/CASP data, LP-LOKA significantly improved protein ranking over the widely used HMM-HMM or profile-sequence alignment methods utilizing large protein networks. It was observed that the larger the protein similarity network, the better the performance, especially on relatively small protein superfamilies and folds. The results suggest that computing massively large protein network is necessary to meet the growing need of annotating proteins from newly sequenced species and LP-LOKA is both scalable and accurate for searching massively large protein networks.}, } @article {pmid30697861, year = {2019}, author = {Saba, T and Khan, SU and Islam, N and Abbas, N and Rehman, A and Javaid, N and Anjum, A}, title = {Cloud-based decision support system for the detection and classification of malignant cells in breast cancer using breast cytology images.}, journal = {Microscopy research and technique}, volume = {82}, number = {6}, pages = {775-785}, doi = {10.1002/jemt.23222}, pmid = {30697861}, issn = {1097-0029}, support = {RG-CCIS-2017-06-02//Prince Sultan University Riyadh 11586 Saudi Arabia/ ; }, mesh = {Breast Neoplasms/*diagnosis/*pathology ; *Cloud Computing ; Cytological Techniques/*methods ; *Decision Support Techniques ; Female ; Humans ; Image Processing, Computer-Assisted/*methods ; Neoplasm Grading/methods ; Pakistan ; }, abstract = {The advancement of computer- and internet-based technologies has transformed the nature of services in healthcare by using mobile devices in conjunction with cloud computing. The classical phenomenon of patient-doctor diagnostics is extended to a more robust advanced concept of E-health, where remote online/offline treatment and diagnostics can be performed. In this article, we propose a framework which incorporates a cloud-based decision support system for the detection and classification of malignant cells in breast cancer, while using breast cytology images. In the proposed approach, shape-based features are used for the detection of tumor cells. Furthermore, these features are used for the classification of cells into malignant and benign categories using Naive Bayesian and Artificial Neural Network. Moreover, an important phase addressed in the proposed framework is the grading of the affected cells, which could help in grade level necessary medical procedures for patients during the diagnostic process. For demonstrating the e effectiveness of the proposed approach, experiments are performed on real data sets comprising of patients data, which has been collected from the pathology department of Lady Reading Hospital of Pakistan. Moreover, a cross-validation technique has been performed for the evaluation of the classification accuracy, which shows performance accuracy of 98% as compared to physical methods used by a pathologist for the detection and classification of the malignant cell. Experimental results show that the proposed approach has significantly improved the detection and classification of the malignant cells in breast cytology images.}, } @article {pmid30691868, year = {2019}, author = {Grossman, RL}, title = {Data Lakes, Clouds, and Commons: A Review of Platforms for Analyzing and Sharing Genomic Data.}, journal = {Trends in genetics : TIG}, volume = {35}, number = {3}, pages = {223-234}, pmid = {30691868}, issn = {0168-9525}, support = {OT3 HL142481/HL/NHLBI NIH HHS/United States ; OT3 OD025460/OD/NIH HHS/United States ; U2C HD109731/HD/NICHD NIH HHS/United States ; HHSN261200800001C/RC/CCR NIH HHS/United States ; HHSN261200800001E/CA/NCI NIH HHS/United States ; }, mesh = {Big Data ; Biomedical Research/trends ; Cloud Computing/*trends ; Computational Biology/trends ; Genomics/*methods ; Humans ; Information Dissemination/*methods ; *Software ; }, abstract = {Data commons collate data with cloud computing infrastructure and commonly used software services, tools, and applications to create biomedical resources for the large-scale management, analysis, harmonization, and sharing of biomedical data. Over the past few years, data commons have been used to analyze, harmonize, and share large-scale genomics datasets. Data ecosystems can be built by interoperating multiple data commons. It can be quite labor intensive to curate, import, and analyze the data in a data commons. Data lakes provide an alternative to data commons and simply provide access to data, with the data curation and analysis deferred until later and delegated to those that access the data. We review software platforms for managing, analyzing, and sharing genomic data, with an emphasis on data commons, but also cover data ecosystems and data lakes.}, } @article {pmid30682611, year = {2019}, author = {Rumson, AG and Hallett, SH}, title = {Innovations in the use of data facilitating insurance as a resilience mechanism for coastal flood risk.}, journal = {The Science of the total environment}, volume = {661}, number = {}, pages = {598-612}, doi = {10.1016/j.scitotenv.2019.01.114}, pmid = {30682611}, issn = {1879-1026}, abstract = {Insurance plays a crucial role in human efforts to adapt to environmental hazards. Effective insurance can serve as both a measure to distribute, and a method to communicate risk. In order for insurance to fulfil these roles successfully, policy pricing and cover choices must be risk-based and founded on accurate information. This is reliant on a robust evidence base forming the foundation of policy choices. This paper focuses on the evidence available to insurers and emergent innovation in the use of data. The main risk considered is coastal flooding, for which the insurance sector offers an option for potential adaptation, capable of increasing resilience. However, inadequate supply and analysis of data have been highlighted as factors preventing insurance from fulfilling this role. Research was undertaken to evaluate how data are currently, and could potentially, be used within risk evaluations for the insurance industry. This comprised of 50 interviews with those working and associated with the London insurance market. The research reveals new opportunities, which could facilitate improvements in risk-reflective pricing of policies. These relate to a new generation of data collection techniques and analytics, such as those associated with satellite-derived data, IoT (Internet of Things) sensors, cloud computing, and Big Data solutions. Such technologies present opportunities to reduce moral hazard through basing predictions and pricing of risk on large empirical datasets. The value of insurers' claims data is also revealed, and is shown to have the potential to refine, calibrate, and validate models and methods. The adoption of such data-driven techniques could enable insurers to re-evaluate risk ratings, and in some instances, extend coverage to locations and developments, previously rated as too high a risk to insure. Conversely, other areas may be revealed more vulnerable, which could generate negative impacts for residents in these regions, such as increased premiums. However, the enhanced risk awareness generated, by new technology, data and data analytics, could positively alter future planning, development and investment decisions.}, } @article {pmid30678631, year = {2019}, author = {Maraver, P and Armañanzas, R and Gillette, TA and Ascoli, GA}, title = {PaperBot: open-source web-based search and metadata organization of scientific literature.}, journal = {BMC bioinformatics}, volume = {20}, number = {1}, pages = {50}, pmid = {30678631}, issn = {1471-2105}, support = {R01 NS086082/NS/NINDS NIH HHS/United States ; U01 MH114829/MH/NIMH NIH HHS/United States ; R01NS086082//National Institutes of Health/ ; U01MH114829//National Institutes of Health/ ; R01NS39600//National Institutes of Health/ ; R01 NS039600/NS/NINDS NIH HHS/United States ; }, mesh = {Biomedical Research ; *Databases, Bibliographic ; Information Storage and Retrieval ; *Internet ; *Metadata ; *Publications ; Software ; User-Computer Interface ; }, abstract = {BACKGROUND: The biomedical literature is expanding at ever-increasing rates, and it has become extremely challenging for researchers to keep abreast of new data and discoveries even in their own domains of expertise. We introduce PaperBot, a configurable, modular, open-source crawler to automatically find and efficiently index peer-reviewed publications based on periodic full-text searches across publisher web portals.

RESULTS: PaperBot may operate stand-alone or it can be easily integrated with other software platforms and knowledge bases. Without user interactions, PaperBot retrieves and stores the bibliographic information (full reference, corresponding email contact, and full-text keyword hits) based on pre-set search logic from a wide range of sources including Elsevier, Wiley, Springer, PubMed/PubMedCentral, Nature, and Google Scholar. Although different publishing sites require different search configurations, the common interface of PaperBot unifies the process from the user perspective. Once saved, all information becomes web accessible allowing efficient triage of articles based on their actual relevance and seamless annotation of suitable metadata content. The platform allows the agile reconfiguration of all key details, such as the selection of search portals, keywords, and metadata dimensions. The tool also provides a one-click option for adding articles manually via digital object identifier or PubMed ID. The microservice architecture of PaperBot implements these capabilities as a loosely coupled collection of distinct modules devised to work separately, as a whole, or to be integrated with or replaced by additional software. All metadata is stored in a schema-less NoSQL database designed to scale efficiently in clusters by minimizing the impedance mismatch between relational model and in-memory data structures.

CONCLUSIONS: As a testbed, we deployed PaperBot to help identify and manage peer-reviewed articles pertaining to digital reconstructions of neuronal morphology in support of the NeuroMorpho.Org data repository. PaperBot enabled the custom definition of both general and neuroscience-specific metadata dimensions, such as animal species, brain region, neuron type, and digital tracing system. Since deployment, PaperBot helped NeuroMorpho.Org more than quintuple the yearly volume of processed information while maintaining a stable personnel workforce.}, } @article {pmid30662564, year = {2019}, author = {Hwang, DK and Hsu, CC and Chang, KJ and Chao, D and Sun, CH and Jheng, YC and Yarmishyn, AA and Wu, JC and Tsai, CY and Wang, ML and Peng, CH and Chien, KH and Kao, CL and Lin, TC and Woung, LC and Chen, SJ and Chiou, SH}, title = {Artificial intelligence-based decision-making for age-related macular degeneration.}, journal = {Theranostics}, volume = {9}, number = {1}, pages = {232-245}, pmid = {30662564}, issn = {1838-7640}, mesh = {*Artificial Intelligence ; *Decision Making ; Diagnostic Tests, Routine/*methods ; Humans ; Image Processing, Computer-Assisted/*methods ; Macular Degeneration/*diagnosis ; Software ; Telemedicine/methods ; Tomography, Optical Coherence/*methods ; }, abstract = {Artificial intelligence (AI) based on convolutional neural networks (CNNs) has a great potential to enhance medical workflow and improve health care quality. Of particular interest is practical implementation of such AI-based software as a cloud-based tool aimed for telemedicine, the practice of providing medical care from a distance using electronic interfaces. Methods: In this study, we used a dataset of labeled 35,900 optical coherence tomography (OCT) images obtained from age-related macular degeneration (AMD) patients and used them to train three types of CNNs to perform AMD diagnosis. Results: Here, we present an AI- and cloud-based telemedicine interaction tool for diagnosis and proposed treatment of AMD. Through deep learning process based on the analysis of preprocessed optical coherence tomography (OCT) imaging data, our AI-based system achieved the same image discrimination rate as that of retinal specialists in our hospital. The AI platform's detection accuracy was generally higher than 90% and was significantly superior (p < 0.001) to that of medical students (69.4% and 68.9%) and equal (p = 0.99) to that of retinal specialists (92.73% and 91.90%). Furthermore, it provided appropriate treatment recommendations comparable to those of retinal specialists. Conclusions: We therefore developed a website for realistic cloud computing based on this AI platform, available at https://www.ym.edu.tw/~AI-OCT/. Patients can upload their OCT images to the website to verify whether they have AMD and require treatment. Using an AI-based cloud service represents a real solution for medical imaging diagnostics and telemedicine.}, } @article {pmid30653364, year = {2020}, author = {Moonian, O and Jodheea-Jutton, A and Khedo, KK and Baichoo, S and Nagowah, SD and Nagowah, L and Mungloo-Dilmohamud, Z and Cheerkoot-Jalim, S}, title = {Recent advances in computational tools and resources for the self-management of type 2 diabetes.}, journal = {Informatics for health & social care}, volume = {45}, number = {1}, pages = {77-95}, doi = {10.1080/17538157.2018.1559168}, pmid = {30653364}, issn = {1753-8165}, mesh = {Blood Glucose Self-Monitoring/methods ; Diabetes Mellitus, Type 2/*therapy ; *Health Behavior ; Humans ; Internet ; Mobile Applications ; Self-Management/*methods ; Social Media ; Telemedicine ; Video Games ; }, abstract = {Background: While healthcare systems are investing resources on type 2 diabetes patients, self-management is becoming the new trend for these patients. Due to the pervasiveness of computing devices, a number of computerized systems are emerging to support the self-management of patients.Objective: The primary objective of this review is to identify and categorize the computational tools that exist for the self-management of type 2 diabetes, and to identify challenges that need to be addressed.Results: The tools have been categorized into web applications, mobile applications, games and ubiquitous diabetes management systems. We provide a detailed description of the salient features of each category along with a comparison of the various tools, listing their challenges and practical implications. A list of platforms that can be used to develop new tools for the self-management of type 2 diabetes, namely mobile applications development, sensor development, cloud computing, social media, and machine learning and predictive analysis platforms, are also provided.Discussions: This paper identifies a number of challenges in the existing categories of computational tools and consequently presents possible avenues for future research. Failure to address these issues will negatively impact on the adoption rate of the self-management tools and applications.}, } @article {pmid30653336, year = {2019}, author = {Kim, B and Ali, T and Dong, C and Lijeron, C and Mazumder, R and Wultsch, C and Krampis, K}, title = {miCloud: A Plug-n-Play, Extensible, On-Premises Bioinformatics Cloud for Seamless Execution of Complex Next-Generation Sequencing Data Analysis Pipelines.}, journal = {Journal of computational biology : a journal of computational molecular cell biology}, volume = {26}, number = {3}, pages = {280-284}, pmid = {30653336}, issn = {1557-8666}, support = {G12 MD007599/MD/NIMHD NIH HHS/United States ; UL1 TR000457/TR/NCATS NIH HHS/United States ; UL1 TR002384/TR/NCATS NIH HHS/United States ; }, mesh = {Animals ; *Cloud Computing ; Genomics/*methods ; Humans ; RNA-Seq/*methods ; *Software ; }, abstract = {The availability of low-cost small-factor sequencers, such as the Illumina MiSeq, MiniSeq, or iSeq, have paved the way for democratizing genomics sequencing, providing researchers in minority universities with access to the technology that was previously only affordable by institutions with large core facilities. However, these instruments are not bundled with software for performing bioinformatics data analysis, and the data analysis can be the main bottleneck for independent laboratories or even small clinical facilities that consider adopting genomic sequencing for medical applications. To address this issue, we have developed miCloud, a bioinformatics platform that enables genomic data analysis through a fully featured data analysis cloud, which seamlessly integrates with genome sequencers over the local network. The miCloud can be easily deployed without any prior bioinformatics expertise on any computing environment, from a laboratory computer workstation to a university computer cluster. Our platform not only provides access to a set of preconfigured RNA-Seq and CHIP-Seq bioinformatics pipelines, but also enables users to develop or install new preconfigured tools from the large selection available on open-source online Docker container repositories. The miCloud built-in analysis pipelines are also integrated with the Visual Omics Explorer framework (Kim et al., 2016), which provides rich interactive visualizations and publication-ready graphics from the next-generation sequencing data. Ultimately, the miCloud demonstrates a bioinformatics approach that can be adopted in the field for standardizing genomic data analysis, similarly to the way molecular biology sample preparation kits have standardized laboratory operations.}, } @article {pmid30640626, year = {2019}, author = {Wang, C and Qin, Y and Jin, H and Kim, I and Granados Vergara, JD and Dong, C and Jiang, Y and Zhou, Q and Li, J and He, Z and Zou, Z and Zheng, LR and Wu, X and Wang, Y}, title = {A Low Power Cardiovascular Healthcare System With Cross-Layer Optimization From Sensing Patch to Cloud Platform.}, journal = {IEEE transactions on biomedical circuits and systems}, volume = {13}, number = {2}, pages = {314-329}, doi = {10.1109/TBCAS.2019.2892334}, pmid = {30640626}, issn = {1940-9990}, mesh = {Algorithms ; Arrhythmias, Cardiac/diagnosis ; Cardiovascular System/*anatomy & histology ; *Cloud Computing ; Data Compression ; *Delivery of Health Care ; *Electric Power Supplies ; Electrocardiography ; Humans ; Posture ; Running ; Signal Processing, Computer-Assisted ; Wavelet Analysis ; }, abstract = {Nowadays, cardiovascular disease is still one of the primary diseases that limit life expectation of humans. To address this challenge, this work reports an Internet of Medical Things (IoMT)-based cardiovascular healthcare system with cross-layer optimization from sensing patch to cloud platform. A wearable ECG patch with a custom System-on-Chip (SoC) features a miniaturized footprint, low power consumption, and embedded signal processing capability. The patch also integrates wireless connectivity with mobile devices and cloud platform for optimizing the complete system. On the big picture, a "wearable patch-mobile-cloud" hybrid computing framework is proposed with cross-layer optimization for performance-power trade-off in embedded-computing. The measurement results demonstrate that the on-patch compression ratio of the raw ECG signal can reach 12.07 yielding a percentage root mean square variation of 2.29%. In the test with the MIT-BIH database, the average improvement of signal to noise ratio and mean square error are 12.63 dB and 94.47%, respectively. The average accuracy of disease prediction operation executed in cloud platform is 97%.}, } @article {pmid30639324, year = {2019}, author = {Chen, H and Huffman, JE and Brody, JA and Wang, C and Lee, S and Li, Z and Gogarten, SM and Sofer, T and Bielak, LF and Bis, JC and Blangero, J and Bowler, RP and Cade, BE and Cho, MH and Correa, A and Curran, JE and de Vries, PS and Glahn, DC and Guo, X and Johnson, AD and Kardia, S and Kooperberg, C and Lewis, JP and Liu, X and Mathias, RA and Mitchell, BD and O'Connell, JR and Peyser, PA and Post, WS and Reiner, AP and Rich, SS and Rotter, JI and Silverman, EK and Smith, JA and Vasan, RS and Wilson, JG and Yanek, LR and , and , and Redline, S and Smith, NL and Boerwinkle, E and Borecki, IB and Cupples, LA and Laurie, CC and Morrison, AC and Rice, KM and Lin, X}, title = {Efficient Variant Set Mixed Model Association Tests for Continuous and Binary Traits in Large-Scale Whole-Genome Sequencing Studies.}, journal = {American journal of human genetics}, volume = {104}, number = {2}, pages = {260-274}, pmid = {30639324}, issn = {1537-6605}, support = {R35 CA197449/CA/NCI NIH HHS/United States ; R01 HL131136/HL/NHLBI NIH HHS/United States ; U19 CA203654/CA/NCI NIH HHS/United States ; R01 HL139553/HL/NHLBI NIH HHS/United States ; R01 HL137922/HL/NHLBI NIH HHS/United States ; R01 HL119443/HL/NHLBI NIH HHS/United States ; R35 HL135818/HL/NHLBI NIH HHS/United States ; U01 HL137162/HL/NHLBI NIH HHS/United States ; P01 CA134294/CA/NCI NIH HHS/United States ; U01 HG009088/HG/NHGRI NIH HHS/United States ; P20 GM121334/GM/NIGMS NIH HHS/United States ; R00 HL130593/HL/NHLBI NIH HHS/United States ; U54 GM115428/GM/NIGMS NIH HHS/United States ; K01 HL135405/HL/NHLBI NIH HHS/United States ; U01 HL120393/HL/NHLBI NIH HHS/United States ; R01 HL113338/HL/NHLBI NIH HHS/United States ; }, mesh = {Chromosomes, Human, Pair 4/genetics ; Cloud Computing ; Female ; Fibrinogen/analysis/genetics ; *Genetic Association Studies ; Genetics, Population ; Humans ; Male ; *Models, Genetic ; National Heart, Lung, and Blood Institute (U.S.) ; Precision Medicine ; Research Design ; Time Factors ; United States ; *Whole Genome Sequencing ; }, abstract = {With advances in whole-genome sequencing (WGS) technology, more advanced statistical methods for testing genetic association with rare variants are being developed. Methods in which variants are grouped for analysis are also known as variant-set, gene-based, and aggregate unit tests. The burden test and sequence kernel association test (SKAT) are two widely used variant-set tests, which were originally developed for samples of unrelated individuals and later have been extended to family data with known pedigree structures. However, computationally efficient and powerful variant-set tests are needed to make analyses tractable in large-scale WGS studies with complex study samples. In this paper, we propose the variant-set mixed model association tests (SMMAT) for continuous and binary traits using the generalized linear mixed model framework. These tests can be applied to large-scale WGS studies involving samples with population structure and relatedness, such as in the National Heart, Lung, and Blood Institute's Trans-Omics for Precision Medicine (TOPMed) program. SMMATs share the same null model for different variant sets, and a virtue of this null model, which includes covariates only, is that it needs to be fit only once for all tests in each genome-wide analysis. Simulation studies show that all the proposed SMMATs correctly control type I error rates for both continuous and binary traits in the presence of population structure and relatedness. We also illustrate our tests in a real data example of analysis of plasma fibrinogen levels in the TOPMed program (n = 23,763), using the Analysis Commons, a cloud-based computing platform.}, } @article {pmid30634978, year = {2019}, author = {Adapa, SR and Taylor, RA and Wang, C and Thomson-Luque, R and Johnson, LR and Jiang, RHY}, title = {Plasmodium vivax readiness to transmit: implication for malaria eradication.}, journal = {BMC systems biology}, volume = {13}, number = {1}, pages = {5}, pmid = {30634978}, issn = {1752-0509}, support = {R01 AI117017/NIAID NIH HHS/National Institute of Allergy and Infectious Diseases Extramural Activities/United States ; R35 CA197731/NCI NIH HHS/National Cancer Institute/United States ; }, mesh = {Blood/parasitology ; Gametogenesis ; Gene Expression Profiling ; Humans ; Malaria, Vivax/epidemiology/*transmission ; Models, Biological ; Plasmodium vivax/genetics/*physiology ; RNA, Messenger/genetics ; }, abstract = {BACKGROUND: The lack of a continuous long-term in vitro culture system for Plasmodium vivax severely limits our knowledge of pathophysiology of the most widespread malaria parasite. To gain direct understanding of P. vivax human infections, we used Next Generation Sequencing data mining to unravel parasite in vivo expression profiles for P. vivax, and P. falciparum as comparison.

RESULTS: We performed cloud and local computing to extract parasite transcriptomes from publicly available raw data of human blood samples. We developed a Poisson Modelling (PM) method to confidently identify parasite derived transcripts in mixed RNAseq signals of infected host tissues. We successfully retrieved and reconstructed parasite transcriptomes from infected patient blood as early as the first blood stage cycle; and the same methodology did not recover any significant signal from controls. Surprisingly, these first generation blood parasites already show strong signature of transmission, which indicates the commitment from asexual-to-sexual stages. Further, we place the results within the context of P. vivax's complex life cycle, by developing mathematical models for P. vivax and P. falciparum and using sensitivity analysis assess the relative epidemiological impact of possible early stage transmission.

CONCLUSION: The study uncovers the earliest onset of P. vivax blood pathogenesis and highlights the challenges of P. vivax eradication programs.}, } @article {pmid30626151, year = {2019}, author = {Socarrás Bertiz, CA and Fernández Lozano, JJ and Gomez-Ruiz, JA and García-Cerezo, A}, title = {Integration of a Mobile Node into a Hybrid Wireless Sensor Network for Urban Environments.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {1}, pages = {}, pmid = {30626151}, issn = {1424-8220}, support = {DPI2015-65186-R//Ministerio de Economía, Industria y Competitividad, Gobierno de España/ ; }, abstract = {Robots, or in general, intelligent vehicles, require large amounts of data to adapt their behavior to the environment and achieve their goals. When their missions take place in large areas, using additional information to that gathered by the onboard sensors frequently offers a more efficient solution of the problem. The emergence of Cyber-Physical Systems and Cloud computing allows this approach, but integration of sensory information, and its effective availability for the robots or vehicles is challenging. This paper addresses the development and implementation of a modular mobile node of a Wireless Sensor Network (WSN), designed to be mounted onboard vehicles, and capable of using different sensors according to mission needs. The mobile node is integrated with an existing static network, transforming it into a Hybrid Wireless Sensor Network (H-WSN), and adding flexibility and range to it. The integration is achieved without the need for multi-hop routing. A database holds the data acquired by both mobile and static nodes, allowing access in real-time to the gathered information. A Human[-]Machine Interface (HMI) presents this information to users. Finally, the system is tested in real urban scenarios in a use-case of measurement of gas levels.}, } @article {pmid30624962, year = {2019}, author = {Solar, R and Sepulveda, V and Inostrosa-Psijas, A and Rojas, O and Gil-Costa, V and Marin, M}, title = {A Service-Oriented Platform for Approximate Bayesian Computation in Population Genetics.}, journal = {Journal of computational biology : a journal of computational molecular cell biology}, volume = {26}, number = {3}, pages = {266-279}, doi = {10.1089/cmb.2018.0217}, pmid = {30624962}, issn = {1557-8666}, mesh = {Animals ; Bayes Theorem ; Cloud Computing ; Genetics, Population/*methods ; *Software ; }, abstract = {Approximate Bayesian computation (ABC) is a useful technique developed for solving Bayesian inference without explicitly requiring a likelihood function. In population genetics, it is widely used to extract part of the information about the evolutionary history of genetic data. The ABC compares the summary statistics computed on simulated and observed data sets. Typically, a forward-in-time approach is used to simulate the genetic material of a population starting from an initial ancestral population and following the evolution of the individuals by advancing generation by generation under various demographic and genetic forces. This approach is computationally expensive and requires a large number of computations making the use of high-performance computing crucial for decreasing the overall response times. In this work, we propose a fully distributed web service-oriented platform for ABC that is based on forward-in-time simulations. Our proposal is based on a client-server approach. The client enables users to define simulation scenarios. The server enables efficient and scalable population simulations and can be deployed on a distributed cluster of processors or even in the cloud. It is composed of four services: a workload generator, a simulation controller, a simulation results analyzer, and a result builder. The server performs multithread simulations by executing a simulation kernel encapsulated in a proposed libgdrift library. We present and evaluate three different libgdrift library approaches whose algorithms aim to reduce execution times and memory consumption.}, } @article {pmid30623258, year = {2019}, author = {Palanikkumar, D and Priya, S}, title = {Brain Storm Optimization Graph Theory (BSOGT) and Energy Resource Aware Virtual Network Mapping (ERVNM) for Medical Image System in Cloud.}, journal = {Journal of medical systems}, volume = {43}, number = {2}, pages = {37}, pmid = {30623258}, issn = {1573-689X}, mesh = {*Algorithms ; *Cloud Computing ; Health Information Exchange ; Health Information Management/*organization & administration ; Humans ; Information Storage and Retrieval ; User-Computer Interface ; }, abstract = {With the development of Internet and the make use of Internet for medical information, the demand for huge scale and reliable managing medical information has brought out the huge scale Internet data centers. This work that has been presented here highlights the structural lay out and formulation of the medical information model. The aim of presenting this to aid medical departments as well as workers to exchange information and integrate available resources that help facilitate the analysis to be conducted on the given information. Software here comprises of medical information and offers a comprehensive service structure that benefits medical data centers. VNM or Virtual Network Mapping (VNM) essentially relates to substrate network that involves the installation and structuring of on demand virtual machines. These however are subjective to certain limitations that are applicable in relation to latency, capacity as well as bandwidth. Data centers need to dynamically handle cloud workloads effectively and efficiently. Simultaneously, since the mapping of virtual and physical networks with several providers' consumes more time along with energy. In order to resolve this issue, VNM has been mapped by making use of Graph Theory (GT) matching, a well-studied database topic. (i) Brain Storm Optimization Graph Theory (BSOGT) is introduced for modeling a virtual network request in the form of a GT with different resource constraints, and the substrate networks here is considered being a graph. For this graph the nodes and edges comprise of attributes that indicate their constraints. (ii) The algorithm that has been recently introduced executes graph decomposition into several topology patterns. Thereafter the BSOGT is executed to solve any issues that pertain to mapping. (iii) The model that has been presented here, ERVNM and the BSOGT are used with a specific mapping energy computation function.(iv) Issues pertaining to these are categorized as being those related to virtual network mapping as the ACGT and optimal solution are drawn by using effective integer linear programming. ACGT, pragmatic approach, as well as the precise and two-stage algorithms performance is evaluated by means of cloud Simulator environment. The results obtained from simulation indicate that the BSOGT algorithm attains the objectives of cloud service providers with respect to Acceptance ratio, mapping percentage, processing time as well as Convergence Time.}, } @article {pmid30621340, year = {2019}, author = {Mishima, M and Uchiyama, H and Thomas, D and Taniguchi, RI and Roberto, R and Lima, JP and Teichrieb, V}, title = {Incremental 3D Cuboid Modeling with Drift Compensation.}, journal = {Sensors (Basel, Switzerland)}, volume = {19}, number = {1}, pages = {}, pmid = {30621340}, issn = {1424-8220}, abstract = {This paper presents a framework of incremental 3D cuboid modeling by using the mapping results of an RGB-D camera based simultaneous localization and mapping (SLAM) system. This framework is useful in accurately creating cuboid CAD models from a point cloud in an online manner. While performing the RGB-D SLAM, planes are incrementally reconstructed from a point cloud in each frame to create a plane map. Then, cuboids are detected in the plane map by analyzing the positional relationships between the planes, such as orthogonality, convexity, and proximity. Finally, the position, pose, and size of a cuboid are determined by computing the intersection of three perpendicular planes. To suppress the false detection of the cuboids, the cuboid shapes are incrementally updated with sequential measurements to check the uncertainty of the cuboids. In addition, the drift error of the SLAM is compensated by the registration of the cuboids. As an application of our framework, an augmented reality-based interactive cuboid modeling system was developed. In the evaluation at cluttered environments, the precision and recall of the cuboid detection were investigated, compared with a batch-based cuboid detection method, so that the advantages of our proposed method were clarified.}, } @article {pmid30621295, year = {2019}, author = {Mrozek, D and Dąbek, T and Małysiak-Mrozek, B}, title = {Scalable Extraction of Big Macromolecular Data in Azure Data Lake Environment.}, journal = {Molecules (Basel, Switzerland)}, volume = {24}, number = {1}, pages = {}, pmid = {30621295}, issn = {1420-3049}, support = {Microsoft Azure for Research Award//Microsoft Research/ ; grant No 02/020/RGH18/0148//habilitation grant of the Rector of the Silesian University of Technology, Gliwice, Poland/ ; BK/213/RAU2/2018//Statutory Research funds of Institute of Informatics, Silesian University of Technology, Gliwice, Poland/ ; }, mesh = {*Cloud Computing ; Computational Biology/methods ; *Databases, Protein ; Macromolecular Substances/*chemistry ; Nucleic Acid Conformation ; Proteins/*chemistry ; Software ; }, abstract = {Calculation of structural features of proteins, nucleic acids, and nucleic acid-protein complexes on the basis of their geometries and studying various interactions within these macromolecules, for which high-resolution structures are stored in Protein Data Bank (PDB), require parsing and extraction of suitable data stored in text files. To perform these operations on large scale in the face of the growing amount of macromolecular data in public repositories, we propose to perform them in the distributed environment of Azure Data Lake and scale the calculations on the Cloud. In this paper, we present dedicated data extractors for PDB files that can be used in various types of calculations performed over protein and nucleic acids structures in the Azure Data Lake. Results of our tests show that the Cloud storage space occupied by the macromolecular data can be successfully reduced by using compression of PDB files without significant loss of data processing efficiency. Moreover, our experiments show that the performed calculations can be significantly accelerated when using large sequential files for storing macromolecular data and by parallelizing the calculations and data extractions that precede them. Finally, the paper shows how all the calculations can be performed in a declarative way in U-SQL scripts for Data Lake Analytics.}, } @article {pmid30617339, year = {2019}, author = {Topol, EJ}, title = {High-performance medicine: the convergence of human and artificial intelligence.}, journal = {Nature medicine}, volume = {25}, number = {1}, pages = {44-56}, pmid = {30617339}, issn = {1546-170X}, support = {UL1TR002550/NH/NIH HHS/United States ; }, mesh = {Algorithms ; *Artificial Intelligence ; Data Analysis ; Deep Learning ; Humans ; *Medicine ; Physicians ; }, abstract = {The use of artificial intelligence, and the deep-learning subtype in particular, has been enabled by the use of labeled big data, along with markedly enhanced computing power and cloud storage, across all sectors. In medicine, this is beginning to have an impact at three levels: for clinicians, predominantly via rapid, accurate image interpretation; for health systems, by improving workflow and the potential for reducing medical errors; and for patients, by enabling them to process their own data to promote health. The current limitations, including bias, privacy and security, and lack of transparency, along with the future directions of these applications will be discussed in this article. Over time, marked improvements in accuracy, productivity, and workflow will likely be actualized, but whether that will be used to improve the patient-doctor relationship or facilitate its erosion remains to be seen.}, } @article {pmid30595595, year = {2018}, author = {Ensmenger, N}, title = {The Environmental History of Computing.}, journal = {Technology and culture}, volume = {59}, number = {4S}, pages = {S7-S33}, doi = {10.1353/tech.2018.0148}, pmid = {30595595}, issn = {0040-165X}, abstract = {From Charles Babbage's Difference Engine (a product of an increasingly global British maritime empire) to Herman Hollerith's tabulating machine (designed to solve the problem of "seeing like a state" in the newly trans-continental American Republic) to the emergence of the modern petrochemical industry, information technologies have always been closely associated with the human desire to understand and manipulate their physical environment. More recently, humankind has started to realize the environmental impacts of information technology, including not only the toxic byproducts associated with their production, but also the polluting effects of the massive amounts of energy and water required by data centers at Google and Facebook (whose physicality is conveniently and deliberately camouflaged behind the disembodied, ethereal "cloud"). This paper grounds the history of information technology in the material world by focusing on the relationship between "computing power" and more traditional processes of resource extraction, exchange, management, and consumption.}, } @article {pmid30588671, year = {2019}, author = {Li, Y and Liu, P and Li, Y and Fan, H and Su, P and Peng, SL and Park, DC and Rodrigue, KM and Jiang, H and Faria, AV and Ceritoglu, C and Miller, M and Mori, S and Lu, H}, title = {ASL-MRICloud: An online tool for the processing of ASL MRI data.}, journal = {NMR in biomedicine}, volume = {32}, number = {2}, pages = {e4051}, pmid = {30588671}, issn = {1099-1492}, support = {R37 AG006265/GF/NIH HHS/United States ; R01 AG006265/AG/NIA NIH HHS/United States ; R01 NS086888/GF/NIH HHS/United States ; R01 NS106711/NS/NINDS NIH HHS/United States ; R43 NS078917/GF/NIH HHS/United States ; R01 MH084021/MH/NIMH NIH HHS/United States ; R01 AG042753/GF/NIH HHS/United States ; R37 AG006265/AG/NIA NIH HHS/United States ; R01 NS084957/NS/NINDS NIH HHS/United States ; R01 AG047972/GF/NIH HHS/United States ; P41 EB015909/GF/NIH HHS/United States ; R21 NS085634/NS/NINDS NIH HHS/United States ; R21 NS085634/GF/NIH HHS/United States ; R01 NS086888/NS/NINDS NIH HHS/United States ; R01 AG047972/AG/NIA NIH HHS/United States ; RF1 AG006265/AG/NIA NIH HHS/United States ; R21 NS095342/GF/NIH HHS/United States ; R01 MH084021/GF/NIH HHS/United States ; R43 NS078917/NS/NINDS NIH HHS/United States ; R01 NS106711/GF/NIH HHS/United States ; P41 EB015909/EB/NIBIB NIH HHS/United States ; R21 NS095342/NS/NINDS NIH HHS/United States ; R01 NS084957/GF/NIH HHS/United States ; R01 AG042753/AG/NIA NIH HHS/United States ; }, mesh = {Adult ; Aged ; Aged, 80 and over ; Arteries/*physiology ; Cerebrovascular Circulation/physiology ; *Cloud Computing ; Female ; Humans ; *Magnetic Resonance Imaging ; Male ; Middle Aged ; Perfusion ; *Spin Labels ; Young Adult ; }, abstract = {Arterial spin labeling (ASL) MRI is increasingly used in research and clinical settings. The purpose of this work is to develop a cloud-based tool for ASL data processing, referred to as ASL-MRICloud, which may be useful to the MRI community. In contrast to existing ASL toolboxes, which are based on software installation on the user's local computer, ASL-MRICloud uses a web browser for data upload and results download, and the computation is performed on the remote server. As such, this tool is independent of the user's operating system, software version, and CPU speed. The ASL-MRICloud tool was implemented to be compatible with data acquired by scanners from all major MRI manufacturers, is capable of processing several common forms of ASL, including pseudo-continuous ASL and pulsed ASL, and can process single-delay and multi-delay ASL data. The outputs of ASL-MRICloud include absolute and relative values of cerebral blood flow, arterial transit time, voxel-wise masks indicating regions with potential hyper-perfusion and hypo-perfusion, and an image quality index. The ASL tool is also integrated with a T1 -based brain segmentation and normalization tool in MRICloud to allow generation of parametric maps in standard brain space as well as region-of-interest values. The tool was tested on a large data set containing 309 ASL scans as well as on publicly available ASL data from the Alzheimer's Disease Neuroimaging Initiative (ADNI) study.}, } @article {pmid30563267, year = {2018}, author = {Oueida, S and Kotb, Y and Aloqaily, M and Jararweh, Y and Baker, T}, title = {An Edge Computing Based Smart Healthcare Framework for Resource Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {12}, pages = {}, pmid = {30563267}, issn = {1424-8220}, mesh = {Automation ; Computer Simulation ; *Delivery of Health Care ; *Health Resources ; Humans ; *Internet ; Models, Theoretical ; Reproducibility of Results ; Robotics ; }, abstract = {The revolution in information technologies, and the spread of the Internet of Things (IoT) and smart city industrial systems, have fostered widespread use of smart systems. As a complex, 24/7 service, healthcare requires efficient and reliable follow-up on daily operations, service and resources. Cloud and edge computing are essential for smart and efficient healthcare systems in smart cities. Emergency departments (ED) are real-time systems with complex dynamic behavior, and they require tailored techniques to model, simulate and optimize system resources and service flow. ED issues are mainly due to resource shortage and resource assignment efficiency. In this paper, we propose a resource preservation net (RPN) framework using Petri net, integrated with custom cloud and edge computing suitable for ED systems. The proposed framework is designed to model non-consumable resources and is theoretically described and validated. RPN is applicable to a real-life scenario where key performance indicators such as patient length of stay (LoS), resource utilization rate and average patient waiting time are modeled and optimized. As the system must be reliable, efficient and secure, the use of cloud and edge computing is critical. The proposed framework is simulated, which highlights significant improvements in LoS, resource utilization and patient waiting time.}, } @article {pmid30560812, year = {2019}, author = {Baldassano, S and Zhao, X and Brinkmann, B and Kremen, V and Bernabei, J and Cook, M and Denison, T and Worrell, G and Litt, B}, title = {Cloud computing for seizure detection in implanted neural devices.}, journal = {Journal of neural engineering}, volume = {16}, number = {2}, pages = {026016}, pmid = {30560812}, issn = {1741-2552}, support = {U24 NS063930/NS/NINDS NIH HHS/United States ; UH2 NS095495/NS/NINDS NIH HHS/United States ; T32 NS091006/NS/NINDS NIH HHS/United States ; R01 NS099348/NS/NINDS NIH HHS/United States ; K01 ES025436/ES/NIEHS NIH HHS/United States ; UH3 NS095495/NS/NINDS NIH HHS/United States ; R01 NS063039/NS/NINDS NIH HHS/United States ; R01 NS092882/NS/NINDS NIH HHS/United States ; }, mesh = {Algorithms ; *Cloud Computing ; Electric Stimulation Therapy ; *Electrodes, Implanted ; Electroencephalography/instrumentation/methods ; Humans ; Linear Models ; Machine Learning ; ROC Curve ; Seizures/*diagnosis/therapy ; Telemetry ; }, abstract = {OBJECTIVE: Closed-loop implantable neural stimulators are an exciting treatment option for patients with medically refractory epilepsy, with a number of new devices in or nearing clinical trials. These devices must accurately detect a variety of seizure types in order to reliably deliver therapeutic stimulation. While effective, broadly-applicable seizure detection algorithms have recently been published, these methods are too computationally intensive to be directly deployed in an implantable device. We demonstrate a strategy that couples devices to cloud computing resources in order to implement complex seizure detection methods on an implantable device platform.

APPROACH: We use a sensitive gating algorithm capable of running on-board a device to identify potential seizure epochs and transmit these epochs to a cloud-based analysis platform. A precise seizure detection algorithm is then applied to the candidate epochs, leveraging cloud computing resources for accurate seizure event detection. This seizure detection strategy was developed and tested on eleven human implanted device recordings generated using the NeuroVista Seizure Advisory System.

MAIN RESULTS: The gating algorithm achieved high-sensitivity detection using a small feature set as input to a linear classifier, compatible with the computational capability of next-generation implantable devices. The cloud-based precision algorithm successfully identified all seizures transmitted by the gating algorithm while significantly reducing the false positive rate. Across all subjects, this joint approach detected 99% of seizures with a false positive rate of 0.03 h[-1].

SIGNIFICANCE: We present a novel framework for implementing computationally intensive algorithms on human data recorded from an implanted device. By using telemetry to intelligently access cloud-based computational resources, the next generation of neuro-implantable devices will leverage sophisticated algorithms with potential to greatly improve device performance and patient outcomes.}, } @article {pmid30540457, year = {2019}, author = {Kainrad, T and Hunold, S and Seidel, T and Langer, T}, title = {LigandScout Remote: A New User-Friendly Interface for HPC and Cloud Resources.}, journal = {Journal of chemical information and modeling}, volume = {59}, number = {1}, pages = {31-37}, doi = {10.1021/acs.jcim.8b00716}, pmid = {30540457}, issn = {1549-960X}, mesh = {Computational Biology ; *Computing Methodologies ; Information Storage and Retrieval ; *Software ; *User-Computer Interface ; }, abstract = {High-performance computing (HPC) clusters play a major role in scientific research. However, working with these clusters is often cumbersome, especially for researchers without a formal background in computer science. It requires preparation and transfer of the input data, manual gathering of results, and command-line expertise. Current approaches for improving accessibility to remote HPC clusters are focused on providing web-based graphical front-ends that allow jobs to be submitted to the distributed resource management system running on the cluster. This comes with significant usability benefits over command-line usage but does not circumvent the need for manual handling of the input and output files. With LigandScout Remote, we propose a different solution. Our software enables the seamless integration of HPC resources into the LigandScout desktop application that scientists use also in their day-to-day work. By handling necessary data conversion and network communication transparently to the user, this approach completely evades any HPC usability barriers. We show that the developed software combines the usability of local graphical desktop applications with the performance of HPC clusters.}, } @article {pmid30535405, year = {2019}, author = {Peters, K and Bradbury, J and Bergmann, S and Capuccini, M and Cascante, M and de Atauri, P and Ebbels, TMD and Foguet, C and Glen, R and Gonzalez-Beltran, A and Günther, UL and Handakas, E and Hankemeier, T and Haug, K and Herman, S and Holub, P and Izzo, M and Jacob, D and Johnson, D and Jourdan, F and Kale, N and Karaman, I and Khalili, B and Emami Khonsari, P and Kultima, K and Lampa, S and Larsson, A and Ludwig, C and Moreno, P and Neumann, S and Novella, JA and O'Donovan, C and Pearce, JTM and Peluso, A and Piras, ME and Pireddu, L and Reed, MAC and Rocca-Serra, P and Roger, P and Rosato, A and Rueedi, R and Ruttkies, C and Sadawi, N and Salek, RM and Sansone, SA and Selivanov, V and Spjuth, O and Schober, D and Thévenot, EA and Tomasoni, M and van Rijswijk, M and van Vliet, M and Viant, MR and Weber, RJM and Zanetti, G and Steinbeck, C}, title = {PhenoMeNal: processing and analysis of metabolomics data in the cloud.}, journal = {GigaScience}, volume = {8}, number = {2}, pages = {}, pmid = {30535405}, issn = {2047-217X}, support = {BB/H024921/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/I000771/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; MR/M009157/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Cloud Computing ; Humans ; Metabolomics/*methods ; *Software ; Workflow ; }, abstract = {BACKGROUND: Metabolomics is the comprehensive study of a multitude of small molecules to gain insight into an organism's metabolism. The research field is dynamic and expanding with applications across biomedical, biotechnological, and many other applied biological domains. Its computationally intensive nature has driven requirements for open data formats, data repositories, and data analysis tools. However, the rapid progress has resulted in a mosaic of independent, and sometimes incompatible, analysis methods that are difficult to connect into a useful and complete data analysis solution.

FINDINGS: PhenoMeNal (Phenome and Metabolome aNalysis) is an advanced and complete solution to set up Infrastructure-as-a-Service (IaaS) that brings workflow-oriented, interoperable metabolomics data analysis platforms into the cloud. PhenoMeNal seamlessly integrates a wide array of existing open-source tools that are tested and packaged as Docker containers through the project's continuous integration process and deployed based on a kubernetes orchestration framework. It also provides a number of standardized, automated, and published analysis workflows in the user interfaces Galaxy, Jupyter, Luigi, and Pachyderm.

CONCLUSIONS: PhenoMeNal constitutes a keystone solution in cloud e-infrastructures available for metabolomics. PhenoMeNal is a unique and complete solution for setting up cloud e-infrastructures through easy-to-use web interfaces that can be scaled to any custom public and private cloud environment. By harmonizing and automating software installation and configuration and through ready-to-use scientific workflow user interfaces, PhenoMeNal has succeeded in providing scientists with workflow-driven, reproducible, and shareable metabolomics data analysis platforms that are interfaced through standard data formats, representative datasets, versioned, and have been tested for reproducibility and interoperability. The elastic implementation of PhenoMeNal further allows easy adaptation of the infrastructure to other application areas and 'omics research domains.}, } @article {pmid30500772, year = {2019}, author = {Shaikh, MO and Zhu, PY and Wang, CC and Du, YC and Chuang, CH}, title = {Electrochemical immunosensor utilizing electrodeposited Au nanocrystals and dielectrophoretically trapped PS/Ag/ab-HSA nanoprobes for detection of microalbuminuria at point of care.}, journal = {Biosensors & bioelectronics}, volume = {126}, number = {}, pages = {572-580}, doi = {10.1016/j.bios.2018.11.035}, pmid = {30500772}, issn = {1873-4235}, mesh = {Antibodies, Immobilized/chemistry/immunology ; *Biosensing Techniques ; Carbon/chemistry ; Gold/chemistry ; Humans ; *Immunoassay ; Immunoconjugates/*chemistry ; Limit of Detection ; Metal Nanoparticles/chemistry ; Nanoparticles/*chemistry ; Nanoshells/chemistry ; Polystyrenes/chemistry ; }, abstract = {In this study, we have fabricated a simple disposable electrochemical immunosensor for the point of care testing of microalbuminuria, a well-known clinical biomarker for the onset of chronic kidney disease. The immunosensor is fabricated by screen-printing carbon interdigitated microelectrodes on a flexible plastic substrate and utilizes electrochemical impedance spectroscopy to enable direct and label free immunosensing by analyzing interfacial changes on the electrode surface. To improve conductivity and biocompatibility of the screen-printed electrodes, we have modified it with gold nanoparticles, which are electrodeposited using linear sweep voltammetry. To enable efficient immobilization of HSA antibodies, we have developed novel PS/Ag/ab-HSA nanoprobes (polystyrene nanoparticle core with silver nanoshells covalently conjugated to HSA antibodies), and these nanoprobes are trapped on the electrode surface using dielectrophoresis. Each immunosensor has two sensing sites corresponding to test and control to improve specificity by performing differential analysis. Immunosensing results show that the normalized impedance response is linearly dependent on albumin concentration in the clinically relevant range with good repeatability. We have also developed a portable impedance readout module that can analyze the data obtained from the immunosensor and transmit it wirelessly for cloud computing. Consequently, the developed immunosensing platform can be extended to the detection of a range of immunoreactions and shows promise for point of diagnosis and public healthcare monitoring.}, } @article {pmid30496194, year = {2018}, author = {Sun, J and Wang, X and Wang, S and Ren, L}, title = {A searchable personal health records framework with fine-grained access control in cloud-fog computing.}, journal = {PloS one}, volume = {13}, number = {11}, pages = {e0207543}, pmid = {30496194}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Computer Security ; *Data Mining ; *Electronic Health Records ; Humans ; *Mobile Applications ; }, abstract = {Fog computing can extend cloud computing to the edge of the network so as to reduce latency and network congestion. However, existing encryption schemes were rarely used in fog environment, resulting in high computational and storage overhead. Aiming at the demands of local information for terminal device and the shortcomings of cloud computing framework in supporting mobile applications, by taking the hospital scene as an example, a searchable personal health records framework with fine-grained access control in cloud-fog computing is proposed. The proposed framework combines the attribute-based encryption (ABE) technology and search encryption (SE) technology to implement keyword search function and fine-grained access control ability. When keyword index and trapdoor match are successful, the cloud server provider only returns relevant search results to the user, thus achieving a more accurate search. At the same time, the scheme is multi-authority, and the key leakage problem is solved by dividing the user secret key distribution task. Moreover, in the proposed scheme, we securely outsource part of the encryption and decryption operations to the fog node. It is effective both in local resources and in resource-constrained mobile devices. Based on the decisional q-parallel bilinear Diffie-Hellman exponent (q-DBDHE) assumption and decisional bilinear Diffie-Hellman (DBDH) assumption, our scheme is proven to be secure. Simulation experiments show that our scheme is efficient in the cloud-fog environment.}, } @article {pmid30486253, year = {2018}, author = {Nguyen, VC and Dinh, NT and Kim, Y}, title = {A Distributed NFV-Enabled Edge Cloud Architecture for ICN-Based Disaster Management Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {12}, pages = {}, pmid = {30486253}, issn = {1424-8220}, support = {12221-14-1001//MSIP(Ministry of Science, ICT and Future Planning) / IITP(Institute for Information \& Communications Technology Promotion)/ ; }, abstract = {In disaster management services, the dynamic binding between roles and individuals for creating response teams across multiple organizations to act during a disaster recovery time period is an important task. Existing studies have shown that IP-based or traditional telephony solutions are not well-suited to deal with such group communication. Research has also shown the advantages of leveraging information centric networking (ICN) in providing essential communication in disaster management services. However, present studies use a centralized networking architecture for disaster management, in which disaster information is gathered and processed at a centralized management center before incident responses are made and warning messages are sent out. The centralized design can be inefficient in terms of scalability and communication. The reason is that when the network is very large (i.e., country level), the management for disaster services becomes very complicated, with a large number of organizations and offices. Disaster data are required to be transmitted over a long path before reaching the central management center. As a result, the transmission overhead and delay are high. Especially when the network is fragmented and network connectivity from a disaster-affected region to the central management center is disconnected, the service may be corrupted. In this paper, we designed and implemented a distributed edge cloud architecture based on ICN and network function virtualization (NFV) to address the above issues. In the proposed architecture, disaster management functions with predefined disaster templates were implemented at edge clouds closed to local regions to reduce the communication overhead and increase the service availability. The real implementation and performance evaluation showed that the proposed architecture achieves a significant improvement in terms of average bandwidth utilization, disaster notification delivery latency, routing convergence time, and successful request ratio compared to the existing approaches.}, } @article {pmid30484337, year = {2019}, author = {Banegas-Luna, AJ and Imbernón, B and Llanes Castro, A and Pérez-Garrido, A and Cerón-Carrasco, JP and Gesing, S and Merelli, I and D'Agostino, D and Pérez-Sánchez, H}, title = {Advances in distributed computing with modern drug discovery.}, journal = {Expert opinion on drug discovery}, volume = {14}, number = {1}, pages = {9-22}, doi = {10.1080/17460441.2019.1552936}, pmid = {30484337}, issn = {1746-045X}, mesh = {Algorithms ; Animals ; Computational Chemistry/*methods ; *Computer Simulation ; Computing Methodologies ; Drug Discovery/*methods ; Humans ; Software ; Time Factors ; }, abstract = {Computational chemistry dramatically accelerates the drug discovery process and high-performance computing (HPC) can be used to speed up the most expensive calculations. Supporting a local HPC infrastructure is both costly and time-consuming, and, therefore, many research groups are moving from in-house solutions to remote-distributed computing platforms. Areas covered: The authors focus on the use of distributed technologies, solutions, and infrastructures to gain access to HPC capabilities, software tools, and datasets to run the complex simulations required in computational drug discovery (CDD). Expert opinion: The use of computational tools can decrease the time to market of new drugs. HPC has a crucial role in handling the complex algorithms and large volumes of data required to achieve specificity and avoid undesirable side-effects. Distributed computing environments have clear advantages over in-house solutions in terms of cost and sustainability. The use of infrastructures relying on virtualization reduces set-up costs. Distributed computing resources can be difficult to access, although web-based solutions are becoming increasingly available. There is a trade-off between cost-effectiveness and accessibility in using on-demand computing resources rather than free/academic resources. Graphics processing unit computing, with its outstanding parallel computing power, is becoming increasingly important.}, } @article {pmid30483400, year = {2018}, author = {Srivastava, M and Suvarna, S and Srivastava, A and Bharathiraja, S}, title = {Automated emergency paramedical response system.}, journal = {Health information science and systems}, volume = {6}, number = {1}, pages = {22}, pmid = {30483400}, issn = {2047-2501}, abstract = {With the evolution of technology, the fields of medicine and science have also witnessed numerous advancements. In medical emergencies, a few minutes can be the difference between life and death. The obstacles encountered while providing medical assistance can be eliminated by ensuring quicker care and accessible systems. To this effect, the proposed end-to-end system-automated emergency paramedical response system (AEPRS) is semi-autonomous and utilizes aerial distribution by drones, for providing medical supplies on site in cases of paramedical emergencies as well as for patients with a standing history of diseases. Security of confidential medical information is a major area of concern for patients. Confidentiality has been achieved by using decentralised distributed computing to ensure security for the users without involving third-party institutions. AEPRS focuses not only on urban areas but also on semi-urban and rural areas. In urban areas where access to internet is widely available, a healthcare chatbot caters to the individual users and provides a diagnosis based on the symptoms provided by the patients. In semi-urban and rural areas, community hospitals have the option of providing specialised healthcare in spite of the absence of a specialised doctor. Additionally, object recognition and face recognition by using the concept of edge AI enables deep neural networks to run on the edge, without the need for GPU or internet connectivity to connect to the cloud. AEPRS is an airborne emergency medical supply delivery system. It uses the data entered by the user to deduce the best possible solution, in case of an alerted emergency situation and responds to the user accordingly.}, } @article {pmid30462158, year = {2020}, author = {Alnasir, JJ and Shanahan, HP}, title = {The application of Hadoop in structural bioinformatics.}, journal = {Briefings in bioinformatics}, volume = {21}, number = {1}, pages = {96-105}, doi = {10.1093/bib/bby106}, pmid = {30462158}, issn = {1477-4054}, abstract = {The paper reviews the use of the Hadoop platform in structural bioinformatics applications. For structural bioinformatics, Hadoop provides a new framework to analyse large fractions of the Protein Data Bank that is key for high-throughput studies of, for example, protein-ligand docking, clustering of protein-ligand complexes and structural alignment. Specifically we review in the literature a number of implementations using Hadoop of high-throughput analyses and their scalability. We find that these deployments for the most part use known executables called from MapReduce rather than rewriting the algorithms. The scalability exhibits a variable behaviour in comparison with other batch schedulers, particularly as direct comparisons on the same platform are generally not available. Direct comparisons of Hadoop with batch schedulers are absent in the literature but we note there is some evidence that Message Passing Interface implementations scale better than Hadoop. A significant barrier to the use of the Hadoop ecosystem is the difficulty of the interface and configuration of a resource to use Hadoop. This will improve over time as interfaces to Hadoop, e.g. Spark improve, usage of cloud platforms (e.g. Azure and Amazon Web Services (AWS)) increases and standardised approaches such as Workflow Languages (i.e. Workflow Definition Language, Common Workflow Language and Nextflow) are taken up.}, } @article {pmid30459848, year = {2018}, author = {Bouzaglo, D and Chasida, I and Ezra Tsur, E}, title = {Distributed retrieval engine for the development of cloud-deployed biological databases.}, journal = {BioData mining}, volume = {11}, number = {}, pages = {26}, pmid = {30459848}, issn = {1756-0381}, abstract = {The integration of cloud resources with federated data retrieval has the potential of improving the maintenance, accessibility and performance of specialized databases in the biomedical field. However, such an integrative approach requires technical expertise in cloud computing, usage of a data retrieval engine and development of a unified data-model, which can encapsulate the heterogeneity of biological data. Here, a framework for the development of cloud-based biological specialized databases is proposed. It is powered by a distributed biodata retrieval system, able to interface with different data formats, as well as provides an integrated way for data exploration. The proposed framework was implemented using Java as the development environment, and MongoDB as the database manager. Syntactic analysis was based on BSON, jsoup, Apache Commons and w3c.dom open libraries. Framework is available in: http://nbel-lab.com and is distributed under the creative common agreement.}, } @article {pmid30454054, year = {2018}, author = {Xu, B and Li, C and Zhuang, H and Wang, J and Wang, Q and Wang, C and Zhou, X}, title = {Distributed gene clinical decision support system based on cloud computing.}, journal = {BMC medical genomics}, volume = {11}, number = {Suppl 5}, pages = {100}, pmid = {30454054}, issn = {1755-8794}, mesh = {Algorithms ; Cloud Computing ; *Decision Support Systems, Clinical ; Genetic Variation ; Genotype ; High-Throughput Nucleotide Sequencing ; Humans ; Sequence Analysis, DNA ; *Software ; }, abstract = {BACKGROUND: The clinical decision support system can effectively break the limitations of doctors' knowledge and reduce the possibility of misdiagnosis to enhance health care. The traditional genetic data storage and analysis methods based on stand-alone environment are hard to meet the computational requirements with the rapid genetic data growth for the limited scalability.

METHODS: In this paper, we propose a distributed gene clinical decision support system, which is named GCDSS. And a prototype is implemented based on cloud computing technology. At the same time, we present CloudBWA which is a novel distributed read mapping algorithm leveraging batch processing strategy to map reads on Apache Spark.

RESULTS: Experiments show that the distributed gene clinical decision support system GCDSS and the distributed read mapping algorithm CloudBWA have outstanding performance and excellent scalability. Compared with state-of-the-art distributed algorithms, CloudBWA achieves up to 2.63 times speedup over SparkBWA. Compared with stand-alone algorithms, CloudBWA with 16 cores achieves up to 11.59 times speedup over BWA-MEM with 1 core.

CONCLUSIONS: GCDSS is a distributed gene clinical decision support system based on cloud computing techniques. In particular, we incorporated a distributed genetic data analysis pipeline framework in the proposed GCDSS system. To boost the data processing of GCDSS, we propose CloudBWA, which is a novel distributed read mapping algorithm to leverage batch processing technique in mapping stage using Apache Spark platform.}, } @article {pmid30445782, year = {2018}, author = {Dinh, NT and Kim, Y}, title = {An Efficient Availability Guaranteed Deployment Scheme for IoT Service Chains over Fog-Core Cloud Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {11}, pages = {}, pmid = {30445782}, issn = {1424-8220}, support = {12221-14-1001//the IT R&D program of MSIP(Ministry of Science, ICT and 425 Future Planning) / IITP(Institute for Information & Communications Technology Promotion)/ ; }, abstract = {High availability is one of the important requirements of many end-to-end services in the Internet of Things (IoT). This is a critical issue in network function virtualization (NFV) and NFV-enabled service function chaining (SFC) due to hard- and soft-ware failures. Thus, merely mapping primary VNFs is not enough to ensure high availability, especially for SFCs deployed over fog - core cloud networks due to resource limitations of fogs. As a result, additional protection schemes, like VNF redundancy deployments, are required to improve the availability of SFCs to meet predefined requirements. With limited resources of fog instances, a cost-efficient protection scheme is required. This paper proposes a cost-efficient availability guaranteed deployment scheme for IoT services over fog-core cloud networks based on measuring the improvement potential of VNFs for improving the availability of SFCs. In addition, various techniques for redundancy placement for VNFs at the fog layer are also presented. Obtained analysis and simulation results show that the proposed scheme achieves a significant improvement in terms of the cost efficiency and scalability compared to the state-of-the-art approaches.}, } @article {pmid30445723, year = {2018}, author = {Din, IU and Kim, BS and Hassan, S and Guizani, M and Atiquzzaman, M and Rodrigues, JJPC}, title = {Information-Centric Network-Based Vehicular Communications: Overview and Research Opportunities.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {11}, pages = {}, pmid = {30445723}, issn = {1424-8220}, support = {2018R1A2B6002399//National Research Foundation of Korea/ ; }, abstract = {Information Centric Network (ICN) is expected to be the favorable deployable future Internet paradigm. ICN intends to replace the current IP-based model with the name-based content-centric model, as it aims at providing better security, scalability, and content distribution. However, it is a challenging task to conceive how ICN can be linked with the other most emerging paradigm, i.e., Vehicular Ad hoc Network (VANET). In this article, we present an overview of the ICN-based VANET approach in line with its contributions and research challenges.In addition, the connectivity issues of vehicular ICN model is presented with some other emerging paradigms, such as Software Defined Network (SDN), Cloud, and Edge computing. Moreover, some ICN-based VANET research opportunities, in terms of security, mobility, routing, naming, caching, and fifth generation (5G) communications, are also covered at the end of the paper.}, } @article {pmid30443828, year = {2019}, author = {Ali, M and Zafar, J and Zafar, H and O'Halloran, M and Sharif, F}, title = {Multiband ultra-thin flexible on-body transceivers for wearable health informatics.}, journal = {Australasian physical & engineering sciences in medicine}, volume = {42}, number = {1}, pages = {53-63}, doi = {10.1007/s13246-018-0711-2}, pmid = {30443828}, issn = {1879-5447}, mesh = {Absorption, Radiation ; Bone and Bones/physiology ; *Human Body ; Humans ; *Medical Informatics ; *Pliability ; *Wearable Electronic Devices ; }, abstract = {Substantial concentration has been associated to the monitoring of vital signs and human activity using wireless body area networks. However, one of the key technical challenges is to characterize an optimized transceiver geometry for desired isolation/bandwidth and specific absorption rate (SAR) characteristics, independent of transceiver chip on-body location. A microwave performance evaluation of monopole wearable transceiver was completed and results presented. A novel on-body antenna transceiver was designed, simulated and fabricated using an ultra-thin substrate RO 3010 (h = 250 µm) that ensures compactness and enhanced flexibility. The designed transceiver was evolved using very high value of dielectric constant using CST® Studio Suit and FEKO® numerical platforms. The on-body characterization for both fatty and bone tissues was experimentally verified for a bandwidth of 200 MHz. The fabricated configuration and real-time testing provides very promising microwave radiation parameters with a gain of 2.69 dBi, S11 < - 13 dB at an operational frequency of 2.46 GHz. Multi-banding was achieved by introducing fractals in the design of the printed monopole. SAR calculations for feet, head and arm at microwave power levels ranging from 100 to 800 mW are incorporated. Furthermore, the real time data acquisition using developed transceiver and its experimental verification is illustrated.}, } @article {pmid30439700, year = {2020}, author = {Ahlbrandt, J and Lablans, M and Glocker, K and Stahl-Toyota, S and Maier-Hein, K and Maier-Hein, L and Ückert, F}, title = {Modern Information Technology for Cancer Research: What's in IT for Me? An Overview of Technologies and Approaches.}, journal = {Oncology}, volume = {98}, number = {6}, pages = {363-369}, doi = {10.1159/000493638}, pmid = {30439700}, issn = {1423-0232}, mesh = {Biomedical Research/methods ; Humans ; Information Technology ; Machine Learning ; Medical Oncology/*methods ; Neoplasms/*diagnosis/*therapy ; Reproducibility of Results ; }, abstract = {Information technology (IT) can enhance or change many scenarios in cancer research for the better. In this paper, we introduce several examples, starting with clinical data reuse and collaboration including data sharing in research networks. Key challenges are semantic interoperability and data access (including data privacy). We deal with gathering and analyzing genomic information, where cloud computing, uncertainties and reproducibility challenge researchers. Also, new sources for additional phenotypical data are shown in patient-reported outcome and machine learning in imaging. Last, we focus on therapy assistance, introducing tools used in molecular tumor boards and techniques for computer-assisted surgery. We discuss the need for metadata to aggregate and analyze data sets reliably. We conclude with an outlook towards a learning health care system in oncology, which connects bench and bedside by employing modern IT solutions.}, } @article {pmid30424534, year = {2018}, author = {Jia, B and Zhou, T and Li, W and Liu, Z and Zhang, J}, title = {A Blockchain-Based Location Privacy Protection Incentive Mechanism in Crowd Sensing Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {11}, pages = {}, pmid = {30424534}, issn = {1424-8220}, support = {41761086, 61461037, 61761035, 61661041//National Natural Science Foundation of China/ ; No.2016YFB0502102//National Science and Technology Major Project of the Ministry of Science and Technology of China/ ; 2017JQ09//Natural Science Foundation of Inner Mongolia 209 Autonomous Region of China/ ; CYYC5016//the "Grassland Elite" Project of the Inner Mongolia 210 Autonomous Region/ ; }, abstract = {Crowd sensing is a perception mode that recruits mobile device users to complete tasks such as data collection and cloud computing. For the cloud computing platform, crowd sensing can not only enable users to collaborate to complete large-scale awareness tasks but also provide users for types, social attributes, and other information for the cloud platform. In order to improve the effectiveness of crowd sensing, many incentive mechanisms have been proposed. Common incentives are monetary reward, entertainment & gamification, social relation, and virtual credit. However, there are rare incentives based on privacy protection basically. In this paper, we proposed a mixed incentive mechanism which combined privacy protection and virtual credit called a blockchain-based location privacy protection incentive mechanism in crowd sensing networks. Its network structure can be divided into three parts which are intelligence crowd sensing networks, confusion mechanism, and blockchain. We conducted the experiments in the campus environment and the results shows that the incentive mechanism proposed in this paper has the efficacious effect in stimulating user participation.}, } @article {pmid30424383, year = {2018}, author = {Lin, WY and Verma, VK and Lee, MY and Lai, CS}, title = {Activity Monitoring with a Wrist-Worn, Accelerometer-Based Device.}, journal = {Micromachines}, volume = {9}, number = {9}, pages = {}, pmid = {30424383}, issn = {2072-666X}, support = {CIRPG5E0012, CIRPD5E0012//Chang-Gung Medical Research Project/ ; }, abstract = {This study condenses huge amount of raw data measured from a MEMS accelerometer-based, wrist-worn device on different levels of physical activities (PAs) for subjects wearing the device 24 h a day continuously. In this study, we have employed the device to build up assessment models for quantifying activities, to develop an algorithm for sleep duration detection and to assess the regularity of activity of daily living (ADL) quantitatively. A new parameter, the activity index (AI), has been proposed to represent the quantity of activities and can be used to categorize different PAs into 5 levels, namely, rest/sleep, sedentary, light, moderate, and vigorous activity states. Another new parameter, the regularity index (RI), was calculated to represent the degree of regularity for ADL. The methods proposed in this study have been used to monitor a subject's daily PA status and to access sleep quality, along with the quantitative assessment of the regularity of activity of daily living (ADL) with the 24-h continuously recorded data over several months to develop activity-based evaluation models for different medical-care applications. This work provides simple models for activity monitoring based on the accelerometer-based, wrist-worn device without trying to identify the details of types of activity and that are suitable for further applications combined with cloud computing services.}, } @article {pmid30423831, year = {2018}, author = {Suárez-Albela, M and Fraga-Lamas, P and Fernández-Caramés, TM}, title = {A Practical Evaluation on RSA and ECC-Based Cipher Suites for IoT High-Security Energy-Efficient Fog and Mist Computing Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {11}, pages = {}, pmid = {30423831}, issn = {1424-8220}, support = {ED431C 2016-045, ED341D R2016/012, ED431G/01//Xunta de Galicia and ERDF funds of the EU (AEI/FEDER, UE)/ ; TEC2013-47141-C4-1-R, TEC2015-69648-REDC, TEC2016-75067-C4-1-R//Agencia Estatal de Investigación of Spain and ERDF funds of the EU (AEI/FEDER, UE)/ ; }, abstract = {The latest Internet of Things (IoT) edge-centric architectures allow for unburdening higher layers from part of their computational and data processing requirements. In the specific case of fog computing systems, they reduce greatly the requirements of cloud-centric systems by processing in fog gateways part of the data generated by end devices, thus providing services that were previously offered by a remote cloud. Thanks to recent advances in System-on-Chip (SoC) energy efficiency, it is currently possible to create IoT end devices with enough computational power to process the data generated by their sensors and actuators while providing complex services, which in recent years derived into the development of the mist computing paradigm. To allow mist computing nodes to provide the previously mentioned benefits and guarantee the same level of security as in other architectures, end-to-end standard security mechanisms need to be implemented. In this paper, a high-security energy-efficient fog and mist computing architecture and a testbed are presented and evaluated. The testbed makes use of Transport Layer Security (TLS) 1.2 Elliptic Curve Cryptography (ECC) and Rivest-Shamir-Adleman (RSA) cipher suites (that comply with the yet to come TLS 1.3 standard requirements), which are evaluated and compared in terms of energy consumption and data throughput for a fog gateway and two mist end devices. The obtained results allow a conclusion that ECC outperforms RSA in both energy consumption and data throughput for all the tested security levels. Moreover, the importance of selecting a proper ECC curve is demonstrated, showing that, for the tested devices, some curves present worse energy consumption and data throughput than other curves that provide a higher security level. As a result, this article not only presents a novel mist computing testbed, but also provides guidelines for future researchers to find out efficient and secure implementations for advanced IoT devices.}, } @article {pmid30417014, year = {2018}, author = {Al-Absi, AA and Al-Sammarraie, NA and Shaher Yafooz, WM and Kang, DK}, title = {Parallel MapReduce: Maximizing Cloud Resource Utilization and Performance Improvement Using Parallel Execution Strategies.}, journal = {BioMed research international}, volume = {2018}, number = {}, pages = {7501042}, pmid = {30417014}, issn = {2314-6141}, mesh = {Algorithms ; Cloud Computing ; Computational Biology/*methods ; Models, Theoretical ; }, abstract = {MapReduce is the preferred cloud computing framework used in large data analysis and application processing. MapReduce frameworks currently in place suffer performance degradation due to the adoption of sequential processing approaches with little modification and thus exhibit underutilization of cloud resources. To overcome this drawback and reduce costs, we introduce a Parallel MapReduce (PMR) framework in this paper. We design a novel parallel execution strategy of Map and Reduce worker nodes. Our strategy enables further performance improvement and efficient utilization of cloud resources execution of Map and Reduce functions to utilize multicore environments available with computing nodes. We explain in detail makespan modeling and working principle of the PMR framework in the paper. Performance of PMR is compared with Hadoop through experiments considering three biomedical applications. Experiments conducted for BLAST, CAP3, and DeepBind biomedical applications report makespan time reduction of 38.92%, 18.00%, and 34.62% considering the PMR framework against Hadoop framework. Experiments' results prove that the PMR cloud computing platform proposed is robust, cost-effective, and scalable, which sufficiently supports diverse applications on public and private cloud platforms. Consequently, overall presentation and results indicate that there is good matching between theoretical makespan modeling presented and experimental values investigated.}, } @article {pmid30414571, year = {2019}, author = {Homocianu, D and Homocianu, M}, title = {GiPlot: An interactive cloud-based tool for visualizing and interpreting large spectral data sets.}, journal = {Spectrochimica acta. Part A, Molecular and biomolecular spectroscopy}, volume = {209}, number = {}, pages = {234-240}, doi = {10.1016/j.saa.2018.10.046}, pmid = {30414571}, issn = {1873-3557}, abstract = {Latest advances in technology and the growing amount of experimental and business data have increased the number of users accessing on-line tools dedicated to quickly visualize and analyse large data sets. This paper describes the development and functionality of a new interactive cloud computing based plotting tool (GiPlot - Google-based Interactive Plot) easy-to-use for universal data. It has interactive features that facilitate data share and interpretation, and selection of specific data suitable for further uses and detailed studies. It also allows quick and step-by-step visualizations of the impact of various experimental conditions on spectral data sets. For a detailed illustration of the features of this interactive plotting tool, we have used mainly spectral data for a given solute dissolved in mixed solvents and for changes in the absorption and/or fluorescence properties of a solute solution in the presence of different chemical stimuli. The most important features and functionalities of this new tool have also been summarized and suggestively highlighted through a short collection of video tutorials containing many examples, developed by the authors of this paper as a support for both the tool and this paper.}, } @article {pmid30410714, year = {2018}, author = {Park, J and Lee, DH}, title = {Privacy Preserving k-Nearest Neighbor for Medical Diagnosis in e-Health Cloud.}, journal = {Journal of healthcare engineering}, volume = {2018}, number = {}, pages = {4073103}, pmid = {30410714}, issn = {2040-2295}, mesh = {Algorithms ; Cloud Computing/*standards ; Computer Security/*standards ; Confidentiality/*standards ; *Electronic Health Records ; Humans ; *Telemedicine ; }, abstract = {Cloud computing is highly suitable for medical diagnosis in e-health services where strong computing ability is required. However, in spite of the huge benefits of adopting the cloud computing, the medical diagnosis field is not yet ready to adopt the cloud computing because it contains sensitive data and hence using the cloud computing might cause a great concern in privacy infringement. For instance, a compromised e-health cloud server might expose the medical dataset outsourced from multiple medical data owners or infringe on the privacy of a patient inquirer by leaking his/her symptom or diagnosis result. In this paper, we propose a medical diagnosis system using e-health cloud servers in a privacy preserving manner when medical datasets are owned by multiple data owners. The proposed system is the first one that achieves the privacy of medical dataset, symptoms, and diagnosis results and hides the data access pattern even from e-health cloud servers performing computations using the data while it is still robust against collusion of the entities. As a building block of the proposed diagnosis system, we design a novel privacy preserving protocol for finding the k data with the highest similarity (PE-FTK) to a given symptom. The protocol reduces the average running time by 35% compared to that of a previous work in the literature. Moreover, the result of the previous work is probabilistic, i.e., the result can contain some error, while the result of our PE-FTK is deterministic, i.e., the result is correct without any error probability.}, } @article {pmid30407623, year = {2019}, author = {Berg, SJ and Grosso, NR and Sherrier, MP and Mudrick, K and Ohr, M and Hwang, HT and Park, YJ and Callaghan, MV and Frey, SK and Sudicky, EA}, title = {Natural Stimuli Calibration with Fining Direction Regularization in an Integrated Hydrologic Model.}, journal = {Ground water}, volume = {57}, number = {1}, pages = {21-35}, doi = {10.1111/gwat.12842}, pmid = {30407623}, issn = {1745-6584}, mesh = {Calibration ; *Groundwater ; Hydrology ; Models, Theoretical ; Rivers ; Water Movements ; }, abstract = {The interaction between surface water and groundwater during flood events is a complex process that has traditionally been described using simplified analytical solutions, or abstracted numerical models. To make the problem tractable, it is common to idealize the flood event, simplify river channel geometry, and ignore bank soil heterogeneity, often resulting in a model that only loosely represents the site, thus limiting its applicability to any specific river cross-section. In this study, we calibrate a site-specific fully-integrated surface and subsurface HydroGeoSphere model using flood events for a cross-section along the South River near Waynesboro, VA. The calibration approach presented in this study demonstrates the incorporation of fining direction regularization with a highly parameterized inversion driven by natural stimuli, to develop several realistic realizations of hydraulic conductivity fields that reflect the depositional history of the system. Specifically, we calibrate a model with 365 unique material zones to multiple flood events recorded in a dense well network while incorporating possible fining sequences consistent with the depositional history of the riverbank. Over 25,000 individual simulations were completed using calibration software and a cloud platform specifically designed for highly parallelized computing environments. The results of this study demonstrate the use of fining direction regularization during model calibration to generate multiple calibrated model realizations that account for the depositional environment of the system.}, } @article {pmid30404242, year = {2018}, author = {Sun, H and He, R and Zhang, Y and Wang, R and Ip, WH and Yung, KL}, title = {eTPM: A Trusted Cloud Platform Enclave TPM Scheme Based on Intel SGX Technology.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {11}, pages = {}, pmid = {30404242}, issn = {1424-8220}, support = {61572517//the National Science Foundation of China/ ; JCYJ20170302145623566//the Science and Technology Plan Projects of Shenzhen/ ; H-ZG3K//the grants from the Department of Industrial and Systems Engineering, the Hong Kong Polytechnic University, China/ ; }, abstract = {Today cloud computing is widely used in various industries. While benefiting from the services provided by the cloud, users are also faced with some security issues, such as information leakage and data tampering. Utilizing trusted computing technology to enhance the security mechanism, defined as trusted cloud, has become a hot research topic in cloud security. Currently, virtual TPM (vTPM) is commonly used in a trusted cloud to protect the integrity of the cloud environment. However, the existing vTPM scheme lacks protections of vTPM itself at a runtime environment. This paper proposed a novel scheme, which designed a new trusted cloud platform security component, 'enclave TPM (eTPM)' to protect cloud and employed Intel SGX to enhance the security of eTPM. The eTPM is a software component that emulates TPM functions which build trust and security in cloud and runs in 'enclave', an isolation memory zone introduced by SGX. eTPM can ensure its security at runtime, and protect the integrity of Virtual Machines (VM) according to user-specific policies. Finally, a prototype for the eTPM scheme was implemented, and experiment manifested its effectiveness, security, and availability.}, } @article {pmid30396355, year = {2018}, author = {Kim, JD and Lee, SY and Kim, YS and Song, HJ and Park, CY}, title = {A study of polymerase chain reaction device control via cloud using Firebase Cloud Messaging protocol.}, journal = {Biomedical engineering online}, volume = {17}, number = {Suppl 2}, pages = {153}, pmid = {30396355}, issn = {1475-925X}, mesh = {*Cloud Computing ; Computer Security ; Polymerase Chain Reaction/*instrumentation ; Time Factors ; }, abstract = {BACKGROUND: In this paper, we propose a system for data monitoring and control of polymerase chain reaction (PCR) externally. PCR is a technique for amplifying a desired DNA molecule by repeatedly synthesizing a specific part of DNA sequence. Currently, commercially available systems are standalone systems or operate PCR devices through a computer in the vicinity of devices for control purposes. These systems are limited in the number of devices that the host system can monitor at the same time, and there are limitations in controlling devices or accessing experimental data externally. Therefore, we propose a system to control the PCR device via the cloud for the convenience of the user and to overcome the limitation of the place.

METHODS: The cloud system used in this study is Google's Firebase. At this time, we use Firebase Cloud Messaging (FCM) protocol to send and receive data. In this paper, we have experimented on the possibility of data transmission and reception using FCM between device, cloud and user. Since the PCR chips used in the research are generally operated at about 10°/s, and the temperature can be controlled within 0.5°, the processing period of the control process should be made much smaller than 1/20 s (50 ms).

RESULTS: As a result of experiments, the time of the data round-trip using FCM was measured at 150 ms on the average. Therefore, the data exchange time using FCM is three times slower than the reference time of 50 ms.

CONCLUSIONS: Since the data round-trip time using FCM is measured to be three times slower than the reference time of 50 ms, it is impossible for the user to control the device such as the PCR device used in this study through the cloud. However, it is possible for the user to monitor the status of the PCR device from the outside in real time.}, } @article {pmid30395457, year = {2018}, author = {Li, J and Lu, Y and Xu, Y and Liu, C and Tu, Y and Ye, S and Liu, H and Xie, Y and Qian, H and Zhu, X}, title = {AIR-Chem: Authentic Intelligent Robotics for Chemistry.}, journal = {The journal of physical chemistry. A}, volume = {122}, number = {46}, pages = {9142-9148}, doi = {10.1021/acs.jpca.8b10680}, pmid = {30395457}, issn = {1520-5215}, abstract = {The new era with prosperous artificial intelligence (AI) and robotics technology is reshaping the materials discovery process in a more radical fashion. Here we present authentic intelligent robotics for chemistry (AIR-Chem), integrated with technological innovations in the AI and robotics fields, functionalized with modules including gradient descent-based optimization frameworks, multiple external field modulations, a real-time computer vision (CV) system, and automated guided vehicle (AGV) parts. AIR-Chem is portable and remotely controllable by cloud computing. AIR-Chem can learn the parametric procedures for given targets and carry on laboratory operations in standalone mode, with high reproducibility, precision, and availability for knowledge regeneration. Moreover, an improved nucleation theory of size focusing on inorganic perovskite quantum dots (IPQDs) is theoretically proposed and experimentally testified to by AIR-Chem. This work aims to boost the process of an unmanned chemistry laboratory from the synthesis of chemical materials to the analysis of physical chemical properties, and it provides a vivid demonstration for future chemistry reshaped by AI and robotics technology.}, } @article {pmid30384983, year = {2019}, author = {Hutchison, J and Mackenzie, C and Madin, B and Happold, J and Leslie, E and Zalcman, E and Meyer, A and Cameron, A}, title = {New approaches to aquatic and terrestrial animal surveillance: The potential for people and technology to transform epidemiology.}, journal = {Preventive veterinary medicine}, volume = {167}, number = {}, pages = {169-173}, doi = {10.1016/j.prevetmed.2018.10.009}, pmid = {30384983}, issn = {1873-1716}, mesh = {Animal Diseases/*epidemiology ; Animals ; Chile ; Humans ; Indonesia ; *Information Technology ; Population Surveillance ; }, abstract = {Epidemiology provides insights about causes of diseases and how to control them, and is powered by surveillance information. Animal health surveillance systems typically have been designed to meet high-level government informational needs, and any incentives for those who generate data (such as animal owners and animal health workers) to report surveillance information are sometimes outweighed by the negative consequences of reporting; underreporting is a serious constraint. This problem can persist even when modern advances in information and communications technology (ICT) are incorporated into the structure and operation of surveillance systems, although some problems typical of paper-based systems (including timeliness of reporting and response, accuracy of data entry, and level of detail recorded) are reduced. On occasions, however, additional problems including sustainability arise. We describe two examples of a philosophical approach and ICT platform for the development of powerful and sustainable health information systems that are people-centred and do not exhibit these typical problems. iSIKHNAS is Indonesia's integrated animal health information system, and PIISAC is a sustainable secure research platform based on full production data from participating commercial Chilean aquaculture companies. Epidemiologists working with these systems are faced with interesting new challenges, including the need to develop skills in extracting appropriate surveillance outcomes from large volumes of continually-streaming data.}, } @article {pmid30368838, year = {2019}, author = {Wijesooriya, K and Liyanage, NK and Kaluarachchi, M and Sawkey, D}, title = {Part II: Verification of the TrueBeam head shielding model in Varian VirtuaLinac via out-of-field doses.}, journal = {Medical physics}, volume = {46}, number = {2}, pages = {877-884}, doi = {10.1002/mp.13263}, pmid = {30368838}, issn = {2473-4209}, support = {R01 CA093626/CA/NCI NIH HHS/United States ; R01 CA234281/CA/NCI NIH HHS/United States ; }, mesh = {Computer Simulation ; Head/*radiation effects ; Humans ; Monte Carlo Method ; Particle Accelerators/*instrumentation ; *Phantoms, Imaging ; *Photons ; *Radiation Dosage ; Radiometry/methods ; Software ; }, abstract = {PURPOSE: A good Monte Carlo model with an accurate head shielding model is important in estimating the long-term risks of unwanted radiation exposure during radiation therapy. The aim of this paper was to validate the Monte Carlo simulation of a TrueBeam linear accelerator (linac) head shielding model. We approach this by evaluating the accuracy of out-of-field dose predictions at extended distances which are comprised of scatter from within the patient and treatment head leakage and thus reflect the accuracy of the head shielding model. We quantify the out-of-field dose of a TrueBeam linac for low-energy photons, 6X and 6X-FFF beams, and compare measurements to Monte Carlo simulations using Varian VirtuaLinac that include a realistic head shielding model, for a variety of jaw sizes and angles up to a distance of 100 cm from the isocenter, in both positive and negative directions. Given the high value and utility of the VirtuaLinac model, it is critical that this model is validated thoroughly and the results be available to the medical physics community.

MATERIALS AND METHOD: Simulations were done using VirtuaLinac, the GEANT4-based Monte Carlo model of the TrueBeam treatment head from Varian Medical Systems, and an in-house GEANT4-based code. VirtuaLinac included a detailed model of the treatment head shielding and was run on the Amazon Web Services cloud to generate spherical phase space files surrounding the treatment head. These phase space files were imported into the in-house code, which modeled the measurement setup with a solid water buildup, the carbon fiber couch, and the gantry stand. For each jaw size (2 × 2 cm[2] , 4 × 4 cm[2] , 10 × 10 cm[2] , and 20 × 20 cm[2]) and angular setting (0°, 90°, 45°, 135°), the dose was calculated at intervals of 5 cm along each measurement direction.

RESULTS: For the 10 × 10 cm[2] jaw size, both 6X and 6X-FFF showed very good agreement between simulation and measurement in both in-plane directions, with no apparent systematic bias. The percentage deviations for these settings were as follows: (mean, STDEV, maximum) (8.34, 6.44, 24.84) for 6X and (13.21, 8.93, 35.56) for 6X-FFF. For all jaw sizes, simulation agreed well in the in-plane direction going away from the gantry, but, some deviations were observed moving toward the gantry at larger distances. At larger distances, for the jaw sizes smaller than 10 × 10 cm[2] , the simulation underestimates the dose compared with measurement, while for jaw sizes larger than 10 × 10 cm[2] , it overestimates dose. For all comparisons between ±50 cm from isocenter, average absolute agreement between simulation and measurement was better than 28%.

CONCLUSION: We have validated the Varian VirtuaLinac's head shielding model via out-of-field doses and quantified the differences between TrueBeam head shielding model created out-of-field doses and measurements for an extended distance of 100 cm.}, } @article {pmid30366454, year = {2018}, author = {Zhang, Y and Wu, Z and Sun, J and Zhang, Y and Zhu, Y and Liu, J and Zang, Q and Plaza, A}, title = {A Distributed Parallel Algorithm Based on Low-Rank and Sparse Representation for Anomaly Detection in Hyperspectral Images.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {11}, pages = {}, pmid = {30366454}, issn = {1424-8220}, support = {71501096, 61502234, 61502250, 61471199, 61772274, 61872185 and 61802185//the National Natural Science Foundation of China/ ; BK20150785 and BK20180470//Natural Science Foundation of Jiangsu Province of China/ ; 30916011325 and 30917015104//Fundamental Research Funds for the Central Universities/ ; }, abstract = {Anomaly detection aims to separate anomalous pixels from the background, and has become an important application of remotely sensed hyperspectral image processing. Anomaly detection methods based on low-rank and sparse representation (LRASR) can accurately detect anomalous pixels. However, with the significant volume increase of hyperspectral image repositories, such techniques consume a significant amount of time (mainly due to the massive amount of matrix computations involved). In this paper, we propose a novel distributed parallel algorithm (DPA) by redesigning key operators of LRASR in terms of MapReduce model to accelerate LRASR on cloud computing architectures. Independent computation operators are explored and executed in parallel on Spark. Specifically, we reconstitute the hyperspectral images in an appropriate format for efficient DPA processing, design the optimized storage strategy, and develop a pre-merge mechanism to reduce data transmission. Besides, a repartitioning policy is also proposed to improve DPA's efficiency. Our experimental results demonstrate that the newly developed DPA achieves very high speedups when accelerating LRASR, in addition to maintaining similar accuracies. Moreover, our proposed DPA is shown to be scalable with the number of computing nodes and capable of processing big hyperspectral images involving massive amounts of data.}, } @article {pmid30358656, year = {2019}, author = {Barbour, DL and Howard, RT and Song, XD and Metzger, N and Sukesan, KA and DiLorenzo, JC and Snyder, BRD and Chen, JY and Degen, EA and Buchbinder, JM and Heisey, KL}, title = {Online Machine Learning Audiometry.}, journal = {Ear and hearing}, volume = {40}, number = {4}, pages = {918-926}, pmid = {30358656}, issn = {1538-4667}, support = {T32 NS073547/NS/NINDS NIH HHS/United States ; T35 DC008765/DC/NIDCD NIH HHS/United States ; UL1 TR002345/TR/NCATS NIH HHS/United States ; }, mesh = {Adult ; Aged ; Audiometry, Pure-Tone/*methods ; Female ; Hearing Loss/*diagnosis ; Humans ; *Internet ; *Machine Learning ; Male ; Middle Aged ; Reproducibility of Results ; Severity of Illness Index ; Young Adult ; }, abstract = {OBJECTIVES: A confluence of recent developments in cloud computing, real-time web audio and machine learning psychometric function estimation has made wide dissemination of sophisticated turn-key audiometric assessments possible. The authors have combined these capabilities into an online (i.e., web-based) pure-tone audiogram estimator intended to empower researchers and clinicians with advanced hearing tests without the need for custom programming or special hardware. The objective of this study was to assess the accuracy and reliability of this new online machine learning audiogram method relative to a commonly used hearing threshold estimation technique also implemented online for the first time in the same platform.

DESIGN: The authors performed air conduction pure-tone audiometry on 21 participants between the ages of 19 and 79 years (mean 41, SD 21) exhibiting a wide range of hearing abilities. For each ear, two repetitions of online machine learning audiogram estimation and two repetitions of online modified Hughson-Westlake ascending-descending audiogram estimation were acquired by an audiologist using the online software tools. The estimated hearing thresholds of these two techniques were compared at standard audiogram frequencies (i.e., 0.25, 0.5, 1, 2, 4, 8 kHz).

RESULTS: The two threshold estimation methods delivered very similar threshold estimates at standard audiogram frequencies. Specifically, the mean absolute difference between threshold estimates was 3.24 ± 5.15 dB. The mean absolute differences between repeated measurements of the online machine learning procedure and between repeated measurements of the Hughson-Westlake procedure were 2.85 ± 6.57 dB and 1.88 ± 3.56 dB, respectively. The machine learning method generated estimates of both threshold and spread (i.e., the inverse of psychometric slope) continuously across the entire frequency range tested from fewer samples on average than the modified Hughson-Westlake procedure required to estimate six discrete thresholds.

CONCLUSIONS: Online machine learning audiogram estimation in its current form provides all the information of conventional threshold audiometry with similar accuracy and reliability in less time. More importantly, however, this method provides additional audiogram details not provided by other methods. This standardized platform can be readily extended to bone conduction, masking, spectrotemporal modulation, speech perception, etc., unifying audiometric testing into a single comprehensive procedure efficient enough to become part of the standard audiologic workup.}, } @article {pmid30356029, year = {2018}, author = {Fan, K and Yin, J and Zhang, K and Li, H and Yang, Y}, title = {EARS-DM: Efficient Auto Correction Retrieval Scheme for Data Management in Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {11}, pages = {}, pmid = {30356029}, issn = {1424-8220}, support = {2017YFB0802300//National Key R&D Program of China/ ; 61772403//National Natural Science Foundation of China/ ; U1401251//National Natural Science Foundation of China/ ; 2017JM6004//Natural Science Basic Research Plan in Shaanxi Province of China/ ; B16037//National 111 Program of China/ ; B08038//National 111 Program of China/ ; }, abstract = {Edge computing is an extension of cloud computing that enables messages to be acquired and processed at low cost. Many terminal devices are being deployed in the edge network to sense and deal with the massive data. By migrating part of the computing tasks from the original cloud computing model to the edge device, the message is running on computing resources close to the data source. The edge computing model can effectively reduce the pressure on the cloud computing center and lower the network bandwidth consumption. However, the security and privacy issues in edge computing are worth noting. In this paper, we propose an efficient auto-correction retrieval scheme for data management in edge computing, named EARS-DM. With automatic error correction for the query keywords instead of similar words extension, EARS-DM can tolerate spelling mistakes and reduce the complexity of index storage space. By the combination of TF-IDF value of keywords and the syntactic weight of query keywords, keywords who are more important will obtain higher relevance scores. We construct an R-tree index building with the encrypted keywords and the children nodes of which are the encrypted identifier FID and Bloom filter BF of files who contain this keyword. The secure index will be uploaded to the edge computing and the search phrase will be performed by the edge computing which is close to the data source. Then EDs sort the matching encrypted file identifier FID by relevance scores and upload them to the cloud server (CS). Performance analysis with actual data indicated that our scheme is efficient and accurate.}, } @article {pmid30351461, year = {2019}, author = {Botelho, J and Machado, V and Proença, L and Rua, J and Delgado, A and João Mendes, J}, title = {Cloud-based collaboration and productivity tools to enhance self-perception and self-evaluation in senior dental students: A pilot study.}, journal = {European journal of dental education : official journal of the Association for Dental Education in Europe}, volume = {23}, number = {1}, pages = {e53-e58}, doi = {10.1111/eje.12400}, pmid = {30351461}, issn = {1600-0579}, mesh = {*Cloud Computing ; Cross-Sectional Studies ; *Diagnostic Self Evaluation ; Education, Dental/*methods ; Humans ; Personal Satisfaction ; Pilot Projects ; *Self Concept ; Students, Dental/*psychology ; Surveys and Questionnaires ; }, abstract = {CONTEXT: Web/cloud-based collaborations have been successively used in several educational and clinical settings with very satisfactory results against the traditional background. In this study, we assess the usefulness, ease of use, ease of learning and satisfaction of a cloud-based clinical progression practice record comparing to a traditional paper practice record.

METHODS: In this cross-sectional forty senior dental students answered a USE-Questionnaire on the traditional paper book versus a digital clinical book assembled with Google Docs. They were asked about usefulness, ease of use, ease of learning and satisfaction of both gears.

RESULTS: There were an absolute agreement in all four different dimensions, with the cloud-based digital system evidencing more advantages and preference from the users' perspective in usefulness (from P = 0.002 to P < 0.001), ease of use (from P = 0.034 to P < 0.001), ease of learning (P < 0.001) and satisfaction (from P = 0.001 to P < 0.001).

CONCLUSION: The results of this study suggest that a digital clinical book, using free cloud-based collaboration tools, is more useful, easier to use and learn and more satisfactory than a traditional paper recording system. Also, this cloud-based collaboration system presents characteristics that may guide future clinical research.}, } @article {pmid30349859, year = {2018}, author = {Kosa, P and Barbour, C and Wichman, A and Sandford, M and Greenwood, M and Bielekova, B}, title = {NeurEx: digitalized neurological examination offers a novel high-resolution disability scale.}, journal = {Annals of clinical and translational neurology}, volume = {5}, number = {10}, pages = {1241-1249}, pmid = {30349859}, issn = {2328-9503}, abstract = {OBJECTIVE: To develop a sensitive neurological disability scale for broad utilization in clinical practice.

METHODS: We employed advances of mobile computing to develop an iPad-based App for convenient documentation of the neurological examination into a secure, cloud-linked database. We included features present in four traditional neuroimmunological disability scales and codified their automatic computation. By combining spatial distribution of the neurological deficit with quantitative or semiquantitative rating of its severity we developed a new summary score (called NeurEx; ranging from 0 to 1349 with minimal measurable change of 0.25) and compared its performance with clinician- and App-computed traditional clinical scales.

RESULTS: In the cross-sectional comparison of 906 neurological examinations, the variance between App-computed and clinician-scored disability scales was comparable to the variance between rating of the identical neurological examination by multiple sclerosis (MS)-trained clinicians. By eliminating rating ambiguity, App-computed scales achieved greater accuracy in measuring disability progression over time (n = 191 patients studied over 880.6 patient-years). The NeurEx score had no apparent ceiling effect and more than 200-fold higher sensitivity for detecting a measurable yearly disability progression (i.e., median progression slope of 8.13 relative to minimum detectable change of 0.25) than Expanded Disability Status Scale (EDSS) with a median yearly progression slope of 0.071 that is lower than the minimal measurable change on EDSS of 0.5.

INTERPRETATION: NeurEx can be used as a highly sensitive outcome measure in neuroimmunology. The App can be easily modified for use in other areas of neurology and it can bridge private practice practitioners to academic centers in multicenter research studies.}, } @article {pmid30349760, year = {2018}, author = {Harrison, C and Keleş, S and Hudson, R and Shin, S and Dutra, I}, title = {atSNPInfrastructure, a case study for searching billions of records while providing significant cost savings over cloud providers.}, journal = {IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum : [proceedings]. IEEE International Symposium on Parallel & Distributed Processing, Workshops and Phd Forum}, volume = {2018}, number = {}, pages = {497-506}, pmid = {30349760}, issn = {2164-7062}, support = {U54 AI117924/AI/NIAID NIH HHS/United States ; }, abstract = {We explore the feasibility of a database storage engine housing up to 307 billion genetic Single Nucleotide Polymorphisms (SNP) for online access. We evaluate database storage engines and implement a solution utilizing factors such as dataset size, information gain, cost and hardware constraints. Our solution provides a full feature functional model for scalable storage and query-ability for researchers exploring the SNP's in the human genome. We address the scalability problem by building physical infrastructure and comparing final costs to a major cloud provider.}, } @article {pmid30337067, year = {2018}, author = {Chen, L and Aziz, MM and Mohammed, N and Jiang, X}, title = {Secure large-scale genome data storage and query.}, journal = {Computer methods and programs in biomedicine}, volume = {165}, number = {}, pages = {129-137}, pmid = {30337067}, issn = {1872-7565}, support = {R01 GM124111/GM/NIGMS NIH HHS/United States ; U01 TR002062/TR/NCATS NIH HHS/United States ; U54 HL108460/HL/NHLBI NIH HHS/United States ; }, mesh = {Big Data ; Cloud Computing ; *Computer Security ; Databases, Genetic/*statistics & numerical data ; *Genome, Human ; Humans ; *Information Storage and Retrieval ; Polymorphism, Single Nucleotide ; Search Engine ; }, abstract = {BACKGROUND AND OBJECTIVE: Cloud computing plays a vital role in big data science with its scalable and cost-efficient architecture. Large-scale genome data storage and computations would benefit from using these latest cloud computing infrastructures, to save cost and speedup discoveries. However, due to the privacy and security concerns, data owners are often disinclined to put sensitive data in a public cloud environment without enforcing some protective measures. An ideal solution is to develop secure genome database that supports encrypted data deposition and query.

METHODS: Nevertheless, it is a challenging task to make such a system fast and scalable enough to handle real-world demands providing data security as well. In this paper, we propose a novel, secure mechanism to support secure count queries on an open source graph database (Neo4j) and evaluated the performance on a real-world dataset of around 735,317 Single Nucleotide Polymorphisms (SNPs). In particular, we propose a new tree indexing method that offers constant time complexity (proportion to the tree depth), which was the bottleneck of existing approaches.

RESULTS: The proposed method significantly improves the runtime of query execution compared to the existing techniques. It takes less than one minute to execute an arbitrary count query on a dataset of 212  GB, while the best-known algorithm takes around 7  min.

CONCLUSIONS: The outlined framework and experimental results show the applicability of utilizing graph database for securely storing large-scale genome data in untrusted environment. Furthermore, the crypto-system and security assumptions underlined are much suitable for such use cases which be generalized in future work.}, } @article {pmid30334104, year = {2018}, author = {Vijayarangam, S and Megalai, J and Krishnan, S and Nagarajan, S and Devi, MR and Lokesh, S}, title = {Vehicular Cloud for Smart Driving Using Internet of Things.}, journal = {Journal of medical systems}, volume = {42}, number = {12}, pages = {240}, doi = {10.1007/s10916-018-1105-4}, pmid = {30334104}, issn = {1573-689X}, mesh = {*Automobile Driving ; *Cloud Computing ; Humans ; *Internet ; Reproducibility of Results ; Stochastic Processes ; }, abstract = {The vehicular cloud can be made more reliable by having minimum number of vehicles and their accessibility of the vehicles in the given lane; in addition reliability can also be made using the function called movement of vehicles. The number of vehicles present in the area determines the task that can be accessed in the area and with the help of travelling time of the vehicles the validity of the lane can be determined. In this paper, a research is carried based on the stochastic investigation on the some of attributes of traffic with the help of cloud in street portion to accept the necessary attribute prototypes. In this paper two types of activity is done, first one is free flow movement of vehicle and second one is queuing- up activity. For the first activity, a noticeable traffic model is used to find the free flow movement of the vehicle and some parameters like activity thickness, living time and quantity of vehicles. In case of second activity queuing up model is used to find queue flow and parameters like length of line and time in the line are found. The research outcome will be given to all peoples in road traffic and traffic is the problem in many developed countries and they can be free from traffic. This model suggests an alternate route for the user which is free from traffic.}, } @article {pmid30332831, year = {2018}, author = {Bouaynaya, W and Lyu, H and Zhang, ZJ}, title = {Exploring Risks Transferred from Cloud-Based Information Systems: A Quantitative and Longitudinal Model.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {10}, pages = {}, pmid = {30332831}, issn = {1424-8220}, abstract = {With the growing popularity of Internet of Things (IoT) and Cyber-Physical Systems (CPS), cloud- based systems have assumed a greater important role. However, there lacks formal approaches to modeling the risks transferred through information systems implemented in a cloud-based environment. This paper explores formal methods to quantify the risks associated with an information system and evaluate its variation throughout its implementation. Specifically, we study the risk variation through a quantitative and longitudinal model spanning from the launch of a cloud-based information systems project to its completion. In addition, we propose to redefine the risk estimation method to differentiate a mitigated risk from an unmitigated risk. This research makes valuable contributions by helping practitioners understand whether cloud computing presents a competitive advantage or a threat to the sustainability of a company.}, } @article {pmid30327288, year = {2018}, author = {Dur, O and Rhoades, C and Ng, MS and Elsayed, R and van Mourik, R and Majmudar, MD}, title = {Design Rationale and Performance Evaluation of the Wavelet Health Wristband: Benchtop Validation of a Wrist-Worn Physiological Signal Recorder.}, journal = {JMIR mHealth and uHealth}, volume = {6}, number = {10}, pages = {e11040}, pmid = {30327288}, issn = {2291-5222}, abstract = {BACKGROUND: Wearable and connected health devices along with the recent advances in mobile and cloud computing provide a continuous, convenient-to-patient, and scalable way to collect personal health data remotely. The Wavelet Health platform and the Wavelet wristband have been developed to capture multiple physiological signals and to derive biometrics from these signals, including resting heart rate (HR), heart rate variability (HRV), and respiration rate (RR).

OBJECTIVE: This study aimed to evaluate the accuracy of the biometric estimates and signal quality of the wristband.

METHODS: Measurements collected from 35 subjects using the Wavelet wristband were compared with simultaneously recorded electrocardiogram and spirometry measurements.

RESULTS: The HR, HRV SD of normal-to-normal intervals, HRV root mean square of successive differences, and RR estimates matched within 0.7 beats per minute (SD 0.9), 7 milliseconds (SD 10), 11 milliseconds (SD 12), and 1 breaths per minute (SD 1) mean absolute deviation of the reference measurements, respectively. The quality of the raw plethysmography signal collected by the wristband, as determined by the harmonic-to-noise ratio, was comparable with that obtained from measurements from a finger-clip plethysmography device.

CONCLUSIONS: The accuracy of the biometric estimates and high signal quality indicate that the wristband photoplethysmography device is suitable for performing pulse wave analysis and measuring vital signs.}, } @article {pmid30310759, year = {2018}, author = {Kremen, V and Brinkmann, BH and Kim, I and Guragain, H and Nasseri, M and Magee, AL and Pal Attia, T and Nejedly, P and Sladky, V and Nelson, N and Chang, SY and Herron, JA and Adamski, T and Baldassano, S and Cimbalnik, J and Vasoli, V and Fehrmann, E and Chouinard, T and Patterson, EE and Litt, B and Stead, M and Van Gompel, J and Sturges, BK and Jo, HJ and Crowe, CM and Denison, T and Worrell, GA}, title = {Integrating Brain Implants With Local and Distributed Computing Devices: A Next Generation Epilepsy Management System.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {6}, number = {}, pages = {2500112}, pmid = {30310759}, issn = {2168-2372}, support = {R01 NS092882/NS/NINDS NIH HHS/United States ; UH2 NS095495/NS/NINDS NIH HHS/United States ; }, abstract = {Brain stimulation has emerged as an effective treatment for a wide range of neurological and psychiatric diseases. Parkinson's disease, epilepsy, and essential tremor have FDA indications for electrical brain stimulation using intracranially implanted electrodes. Interfacing implantable brain devices with local and cloud computing resources have the potential to improve electrical stimulation efficacy, disease tracking, and management. Epilepsy, in particular, is a neurological disease that might benefit from the integration of brain implants with off-the-body computing for tracking disease and therapy. Recent clinical trials have demonstrated seizure forecasting, seizure detection, and therapeutic electrical stimulation in patients with drug-resistant focal epilepsy. In this paper, we describe a next-generation epilepsy management system that integrates local handheld and cloud-computing resources wirelessly coupled to an implanted device with embedded payloads (sensors, intracranial EEG telemetry, electrical stimulation, classifiers, and control policy implementation). The handheld device and cloud computing resources can provide a seamless interface between patients and physicians, and realtime intracranial EEG can be used to classify brain state (wake/sleep, preseizure, and seizure), implement control policies for electrical stimulation, and track patient health. This system creates a flexible platform in which low demand analytics requiring fast response times are embedded in the implanted device and more complex algorithms are implemented in offthebody local and distributed cloud computing environments. The system enables tracking and management of epileptic neural networks operating over time scales ranging from milliseconds to months.}, } @article {pmid30309364, year = {2018}, author = {Bonte, C and Vercauteren, F}, title = {Privacy-preserving logistic regression training.}, journal = {BMC medical genomics}, volume = {11}, number = {Suppl 4}, pages = {86}, pmid = {30309364}, issn = {1755-8794}, support = {R13 HG009072/HG/NHGRI NIH HHS/United States ; }, mesh = {*Algorithms ; *Computer Security ; Databases as Topic ; Genomics ; Logistic Models ; ROC Curve ; }, abstract = {BACKGROUND: Logistic regression is a popular technique used in machine learning to construct classification models. Since the construction of such models is based on computing with large datasets, it is an appealing idea to outsource this computation to a cloud service. The privacy-sensitive nature of the input data requires appropriate privacy preserving measures before outsourcing it. Homomorphic encryption enables one to compute on encrypted data directly, without decryption and can be used to mitigate the privacy concerns raised by using a cloud service.

METHODS: In this paper, we propose an algorithm (and its implementation) to train a logistic regression model on a homomorphically encrypted dataset. The core of our algorithm consists of a new iterative method that can be seen as a simplified form of the fixed Hessian method, but with a much lower multiplicative complexity.

RESULTS: We test the new method on two interesting real life applications: the first application is in medicine and constructs a model to predict the probability for a patient to have cancer, given genomic data as input; the second application is in finance and the model predicts the probability of a credit card transaction to be fraudulent. The method produces accurate results for both applications, comparable to running standard algorithms on plaintext data.

CONCLUSIONS: This article introduces a new simple iterative algorithm to train a logistic regression model that is tailored to be applied on a homomorphically encrypted dataset. This algorithm can be used as a privacy-preserving technique to build a binary classification model and can be applied in a wide range of problems that can be modelled with logistic regression. Our implementation results show that our method can handle the large datasets used in logistic regression training.}, } @article {pmid30281592, year = {2018}, author = {Erickson, RA and Fienen, MN and McCalla, SG and Weiser, EL and Bower, ML and Knudson, JM and Thain, G}, title = {Wrangling distributed computing for high-throughput environmental science: An introduction to HTCondor.}, journal = {PLoS computational biology}, volume = {14}, number = {10}, pages = {e1006468}, pmid = {30281592}, issn = {1553-7358}, mesh = {*Computational Biology ; *Computing Methodologies ; *Ecology ; High-Throughput Screening Assays ; Humans ; Internet ; Research ; *Software ; }, abstract = {Biologists and environmental scientists now routinely solve computational problems that were unimaginable a generation ago. Examples include processing geospatial data, analyzing -omics data, and running large-scale simulations. Conventional desktop computing cannot handle these tasks when they are large, and high-performance computing is not always available nor the most appropriate solution for all computationally intense problems. High-throughput computing (HTC) is one method for handling computationally intense research. In contrast to high-performance computing, which uses a single "supercomputer," HTC can distribute tasks over many computers (e.g., idle desktop computers, dedicated servers, or cloud-based resources). HTC facilities exist at many academic and government institutes and are relatively easy to create from commodity hardware. Additionally, consortia such as Open Science Grid facilitate HTC, and commercial entities sell cloud-based solutions for researchers who lack HTC at their institution. We provide an introduction to HTC for biologists and environmental scientists. Our examples from biology and the environmental sciences use HTCondor, an open source HTC system.}, } @article {pmid30279986, year = {2018}, author = {Shakeel, PM and Baskar, S and Dhulipala, VRS and Jaber, MM}, title = {Cloud based framework for diagnosis of diabetes mellitus using K-means clustering.}, journal = {Health information science and systems}, volume = {6}, number = {1}, pages = {16}, pmid = {30279986}, issn = {2047-2501}, abstract = {Diabetes mellitus is a serious health problem affecting the entire population all over the world for many decades. It is a group of metabolic disorder characterized by chronic disease which occurs due to high blood sugar, unhealthy foods, lack of physical activity and also hereditary. The sorts of diabetes mellitus are type1, type2 and gestational diabetes. The type1 appears during childhood and type2 diabetes develop at any age, mostly affects older than 40. The gestational diabetes occurs for pregnant women. According to the statistical report of WHO 79% of deaths occurred in people under the age of 60, due to diabetes. With a specific end goal to deal with the vast volume, speed, assortment, veracity and estimation of information a scalable environment is needed. Cloud computing is an interesting computing model suitable for accommodating huge volume of dynamic data. To overcome the data handling problems this work focused on Hadoop framework along with clustering technique. This work also predicts the occurrence of diabetes under various circumstances which is more useful for the human. This paper also compares the efficiency of two different clustering techniques suitable for the environment. The predicted result is used to diagnose which age group and gender are mostly affected by diabetes. Further some of the attributes such as hyper tension and work nature are also taken into consideration for analysis.}, } @article {pmid30279984, year = {2018}, author = {Jagadeeswari, V and Subramaniyaswamy, V and Logesh, R and Vijayakumar, V}, title = {A study on medical Internet of Things and Big Data in personalized healthcare system.}, journal = {Health information science and systems}, volume = {6}, number = {1}, pages = {14}, pmid = {30279984}, issn = {2047-2501}, abstract = {Personalized healthcare systems deliver e-health services to fulfill the medical and assistive needs of the aging population. Internet of Things (IoT) is a significant advancement in the Big Data era, which supports many real-time engineering applications through enhanced services. Analytics over data streams from IoT has become a source of user data for the healthcare systems to discover new information, predict early detection, and makes decision over the critical situation for the improvement of the quality of life. In this paper, we have made a detailed study on the recent emerging technologies in the personalized healthcare systems with the focus towards cloud computing, fog computing, Big Data analytics, IoT and mobile based applications. We have analyzed the challenges in designing a better healthcare system to make early detection and diagnosis of diseases and discussed the possible solutions while providing e-health services in secure manner. This paper poses a light on the rapidly growing needs of the better healthcare systems in real-time and provides possible future work guidelines.}, } @article {pmid30271293, year = {2018}, author = {Courneya, JP and Mayo, A}, title = {High-performance computing service for bioinformatics and data science.}, journal = {Journal of the Medical Library Association : JMLA}, volume = {106}, number = {4}, pages = {494-495}, doi = {10.5195/jmla.2018.512}, pmid = {30271293}, issn = {1558-9439}, mesh = {Computational Biology/*organization & administration ; Computer Communication Networks/*organization & administration ; Humans ; Internet ; Libraries, Medical/*organization & administration ; User-Computer Interface ; }, abstract = {Despite having an ideal setup in their labs for wet work, researchers often lack the computational infrastructure to analyze the magnitude of data that result from "-omics" experiments. In this innovative project, the library supports analysis of high-throughput data from global molecular profiling experiments by offering a high-performance computer with open source software along with expert bioinformationist support. The audience for this new service is faculty, staff, and students for whom using the university's large scale, CORE computational resources is not warranted because these resources exceed the needs of smaller projects. In the library's approach, users are empowered to analyze high-throughput data that they otherwise would not be able to on their own computers. To develop the project, the library's bioinformationist identified the ideal computing hardware and a group of open source bioinformatics software to provide analysis options for experimental data such as scientific images, sequence reads, and flow cytometry files. To close the loop between learning and practice, the bioinformationist developed self-guided learning materials and workshops or consultations on topics such as the National Center for Biotechnology Information's BLAST, Bioinformatics on the Cloud, and ImageJ. Researchers apply the data analysis techniques that they learned in the classroom in the library's ideal computing environment.}, } @article {pmid30264375, year = {2018}, author = {Liu, MC and Lee, CC}, title = {An Investigation of Pharmacists' Acceptance of NHI-PharmaCloud in Taiwan.}, journal = {Journal of medical systems}, volume = {42}, number = {11}, pages = {213}, pmid = {30264375}, issn = {1573-689X}, mesh = {*Cloud Computing ; Delivery of Health Care ; Humans ; Medication Errors/*prevention & control ; *National Health Programs ; *Pharmacists ; Taiwan ; }, abstract = {Taiwan's National Health Insurance (NHI) is one of the most successful insurance programs in the world. The National Health Insurance Administration (NHIA) established the NHI-PharmaCloud as a platform to reduce medication duplication and other medication errors among the NHI-contracted facilities. The NHI-PharmaCloud can help pharmacists access patient medication information from the preceding 3 months to improve drug safety. The use of NHI-PharmaCloud can improve the quality of healthcare, but improvements cannot occur if pharmacists are unwilling to use the platform. Therefore, the main objective of our study is to investigate the factors affecting pharmacists' adoption of the NHI-PharmaCloud. This study develops a research model using theories of technology adoption, self-efficacy, and perceived risk and uses randomly distributed survey questionnaires to collect data from local pharmacists. The results show that self-efficacy, perceived usefulness, and perceived psychological risk are 3 critical factors that could affect pharmacists' willingness to use the NHI-PharmaCloud. The research results may also help NHIA to effectively promote the usage of the NHI-PharmaCloud in Taiwan. In addition, governments in other countries may refer to the results of this study when implementing their own PharmaCloud-type systems to improve drug safety.}, } @article {pmid30256729, year = {2019}, author = {Yahyaie, M and Tarokh, MJ and Mahmoodyar, MA}, title = {Use of Internet of Things to Provide a New Model for Remote Heart Attack Prediction.}, journal = {Telemedicine journal and e-health : the official journal of the American Telemedicine Association}, volume = {25}, number = {6}, pages = {499-510}, doi = {10.1089/tmj.2018.0076}, pmid = {30256729}, issn = {1556-3669}, mesh = {*Cloud Computing ; Data Mining/*methods ; Electrocardiography, Ambulatory/*methods ; Humans ; *Internet of Things ; Myocardial Infarction/*epidemiology ; Neural Networks, Computer ; Remote Sensing Technology ; }, abstract = {Background:Most of the research on heart attack prediction has been based on the offline decision-making approach. The Internet of Things (IoT), as a new concept in the field of information technology, enables this to happen online.Objective:This study examined an IoT-based model for predicting heart attack. In this model, electrocardiogram (ECG) information at the moment is used, which facilitates decision-making.Methods:A research model was developed to get emergency cardiac data at the moment. The basis of this model is the IoT, which enables the information to be instantly accessible. In addition, cloud computing has also been used to analyze online data. We enrolled 207 healthy and 64 myocardial infarction cases of visitors to Khatam-ol-Anbia Hospital of Shahrood in 2017.Results:Data set included 19 regular features and 1 label feature. Then, neural networks (NNs) were used for model testing. We used IBM SPSS[®] Modeler V.18 for model testing. After selecting 40% of the data as training set and the rest as the testing set, IBM SPSS Modeler returned 89.5%, which means that with the modeling of these data using NN data mining technique with a probability of 89.5%, we will find the right result.Conclusion:Experiments on the real data set showed that using the IoT, along with cloud computing and data mining techniques, predicts a heart attack with acceptable accuracy. This is achieved by receiving vital signs and ECG information instantaneously.}, } @article {pmid30245810, year = {2018}, author = {Li, Z and Dai, Q and Mehmood, MQ and Hu, G and Yanchuk, BL and Tao, J and Hao, C and Kim, I and Jeong, H and Zheng, G and Yu, S and Alù, A and Rho, J and Qiu, CW}, title = {Full-space Cloud of Random Points with a Scrambling Metasurface.}, journal = {Light, science & applications}, volume = {7}, number = {}, pages = {63}, pmid = {30245810}, issn = {2047-7538}, abstract = {With the rapid progress in computer science, including artificial intelligence, big data and cloud computing, full-space spot generation can be pivotal to many practical applications, such as facial recognition, motion detection, augmented reality, etc. These opportunities may be achieved by using diffractive optical elements (DOEs) or light detection and ranging (LIDAR). However, DOEs suffer from intrinsic limitations, such as demanding depth-controlled fabrication techniques, large thicknesses (more than the wavelength), Lambertian operation only in half space, etc. LIDAR nevertheless relies on complex and bulky scanning systems, which hinders the miniaturization of the spot generator. Here, inspired by a Lambertian scatterer, we report a Hermitian-conjugate metasurface scrambling the incident light to a cloud of random points in full space with compressed information density, functioning in both transmission and reflection spaces. Over 4044 random spots are experimentally observed in the entire space, covering angles at nearly 90°. Our scrambling metasurface is made of amorphous silicon with a uniform subwavelength height, a nearly continuous phase coverage, a lightweight, flexible design, and low-heat dissipation. Thus, it may be mass produced by and integrated into existing semiconductor foundry designs. Our work opens important directions for emerging 3D recognition sensors, such as motion sensing, facial recognition, and other applications.}, } @article {pmid30245758, year = {2018}, author = {Espinoza, SE and Brooks, JS and Araujo, J}, title = {Implementing an Information System Strategy: A Cost, Benefit, and Risk Analysis Framework for Evaluating Viable IT Alternatives in the US Federal Government.}, journal = {International journal of communications, network and systems sciences}, volume = {11}, number = {6}, pages = {105-132}, pmid = {30245758}, issn = {1913-3723}, support = {CC999999//Intramural CDC HHS/United States ; }, abstract = {In the US Federal government, an analysis of alternatives (AoA) is required for a significant investment of resources. The AoA yields the recommended alternative from a set of viable alternatives for the investment decision. This paper presents an integrated AoA and project management framework for analyzing new or emerging alternatives (e.g., Cloud computing), as may be driven by an information system strategy that incorporates a methodology for analyzing the costs, benefits, and risks of each viable alternative. The case study in this paper, about a business improvement project to provide public health and safety services to citizens in a US Federal agency, is a practical application of this integrated framework and reveals the benefits of this integrated approach for an investment decision. The decision making process in the framework-as an integrated, organized, and adaptable set of management and control practices-offers a defensible recommendation and provides accountability to stakeholders.}, } @article {pmid30245122, year = {2018}, author = {Yıldırım, Ö and Pławiak, P and Tan, RS and Acharya, UR}, title = {Arrhythmia detection using deep convolutional neural network with long duration ECG signals.}, journal = {Computers in biology and medicine}, volume = {102}, number = {}, pages = {411-420}, doi = {10.1016/j.compbiomed.2018.09.009}, pmid = {30245122}, issn = {1879-0534}, mesh = {Adult ; Aged ; Aged, 80 and over ; Algorithms ; Arrhythmias, Cardiac/*diagnosis/*therapy ; Cardiovascular Diseases/diagnosis ; Cloud Computing ; Deep Learning ; Diagnosis, Computer-Assisted/*methods ; *Electrocardiography ; Electronic Data Processing ; Female ; Humans ; Machine Learning ; Male ; Middle Aged ; Neural Networks, Computer ; *Pacemaker, Artificial ; Reproducibility of Results ; *Signal Processing, Computer-Assisted ; Software ; Telemedicine ; Young Adult ; }, abstract = {This article presents a new deep learning approach for cardiac arrhythmia (17 classes) detection based on long-duration electrocardiography (ECG) signal analysis. Cardiovascular disease prevention is one of the most important tasks of any health care system as about 50 million people are at risk of heart disease in the world. Although automatic analysis of ECG signal is very popular, current methods are not satisfactory. The goal of our research was to design a new method based on deep learning to efficiently and quickly classify cardiac arrhythmias. Described research are based on 1000 ECG signal fragments from the MIT - BIH Arrhythmia database for one lead (MLII) from 45 persons. Approach based on the analysis of 10-s ECG signal fragments (not a single QRS complex) is applied (on average, 13 times less classifications/analysis). A complete end-to-end structure was designed instead of the hand-crafted feature extraction and selection used in traditional methods. Our main contribution is to design a new 1D-Convolutional Neural Network model (1D-CNN). The proposed method is 1) efficient, 2) fast (real-time classification) 3) non-complex and 4) simple to use (combined feature extraction and selection, and classification in one stage). Deep 1D-CNN achieved a recognition overall accuracy of 17 cardiac arrhythmia disorders (classes) at a level of 91.33% and classification time per single sample of 0.015 s. Compared to the current research, our results are one of the best results to date, and our solution can be implemented in mobile devices and cloud computing.}, } @article {pmid30232983, year = {2018}, author = {Bhavaraju, SR}, title = {From subconscious to conscious to artificial intelligence: A focus on electronic health records.}, journal = {Neurology India}, volume = {66}, number = {5}, pages = {1270-1275}, doi = {10.4103/0028-3886.241377}, pmid = {30232983}, issn = {0028-3886}, mesh = {Artificial Intelligence ; Cloud Computing ; *Electronic Health Records ; Humans ; India ; Internet ; Telemedicine ; }, abstract = {A paradigm shift in human evolution, from our predecessors, the 'hunter-gatherers' to the 'era of digital revolution', has made certain human skills more and more machine driven. This digital revolution has made possible a constant connectivity, wearable technologies, customized platforms, enormous data storage and cloud computing at high speeds, smart phones and apps, internet of things, artificial intelligence, telemedicine, etc. These have made real-time monitoring and interventions possible in healthcare. Most advanced countries have made electronic health records (EHR) mandatory. The Government of India has an agenda of Digital India and digital healthcare and might insist on EHRs. EHR is a real-time, patient-centered digital version of a patient's paper record/chart, available instantly and securely to authorized users. EHR contains a patient's medical history, diagnosis, medications, treatment plans, immunization dates, allergies, radiological images, and laboratory results. It can access evidence-based tools that help to make safer decisions about a patient's care with enhanced decision support, clinical alerts, reminders and medical information. The procedure is also more reliable for dispensing medications and introduces the convenience of e-prescriptions. While the advanced technology and digital devices are well received by the healthcare providers, universal acceptance of the EHRs is far from achieving its full potential. The author, in this paper, discusses the current scenario and issues concerned with EHRs in the digital healthcare.}, } @article {pmid30232942, year = {2018}, author = {Tayi, A}, title = {The Internet of Things Is Digitizing and Transforming Science.}, journal = {SLAS technology}, volume = {23}, number = {5}, pages = {407-411}, doi = {10.1177/2472630318788533}, pmid = {30232942}, issn = {2472-6311}, mesh = {Automation, Laboratory/*instrumentation ; Biological Science Disciplines/*instrumentation/trends ; Computers ; Humans ; Information Storage and Retrieval/trends ; *Internet ; *Speech ; Speech Recognition Software/trends ; *User-Computer Interface ; }, abstract = {Over the past decade, cloud software has transformed numerous industries-from finance to logistics, marketing to manufacturing. The simplified aggregation of data, enabled by cloud computing, empowers individuals to glean insights and make data-driven decisions rapidly. In science, however, such a transformation has yet to emerge. The domain lacks centralized, machine-readable repositories of scientific data; this absence inhibits analytics and expedient decision-making. Recently, the Internet of Things (IoT) has served as a catalyst for digitizing and automating science. IoT enables the centralized collection and analysis of scientific data (e.g., instruments, sensors, and environments). Here, we discuss this new technology trend, its applications in laboratories and promise as a platform for improved efficiency, more innovative capabilities, and machine learning/artificial intelligence.}, } @article {pmid30225319, year = {2018}, author = {Al-Faifi, AM and Song, B and Hassan, MM and Alamri, A and Gumaei, A}, title = {Data on performance prediction for cloud service selection.}, journal = {Data in brief}, volume = {20}, number = {}, pages = {1039-1043}, doi = {10.1016/j.dib.2018.08.108}, pmid = {30225319}, issn = {2352-3409}, abstract = {This paper contains data on Performance Prediction for Cloud Service Selection. To measure the performance metrics of any system you need to analyze the features that affect these performance, these features are called " workload parameters". The data described here is collected from the KSA Ministry of Finance that contains 28,147 instances from 13 cloud nodes. It was recorded during the period from March 1, 2016, to February 20, 2017, in continuous time slots. In this article we selected 9 workload parameters: Number of Jobs in a Minute, Number of Jobs in 5 min, Number of Jobs in 15 min, Memory Capacity, Disk Capacity,: Number of CPU Cores, CPU Speed per Core, Average Receive for Network Bandwidth in Kbps and Average Transmit for Network Bandwidth in Kbps. Moreover, we selected 3 performance metrics: Memory utilization, CPU utilization and response time in milliseconds. This data article is related to the research article titled "An Automated Performance Prediction Model for Cloud Service Selection from Smart Data" (Al-Faifi et al., 2018) [1].}, } @article {pmid30213137, year = {2018}, author = {Park, JH and Kim, HS and Kim, WT}, title = {DM-MQTT: An Efficient MQTT Based on SDN Multicast for Massive IoT Communications.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {9}, pages = {}, pmid = {30213137}, issn = {1424-8220}, support = {2017010875//National Research Foundation of Korea/ ; }, abstract = {Edge computing is proposed to solve the problem of centralized cloud computing caused by a large number of IoT (Internet of Things) devices. The IoT protocols need to be modified according to the edge computing paradigm, where the edge computing devices for analyzing IoT data are distributed to the edge networks. The MQTT (Message Queuing Telemetry Transport) protocol, as a data distribution protocol widely adopted in many international IoT standards, is suitable for cloud computing because it uses a centralized broker to effectively collect and transmit data. However, the standard MQTT may suffer from serious traffic congestion problem on the broker, causing long transfer delays if there are massive IoT devices connected to the broker. In addition, the big data exchange between the IoT devices and the broker decreases network capability of the edge networks. The authors in this paper propose a novel MQTT with a multicast mechanism to minimize data transfer delay and network usage for the massive IoT communications. The proposed MQTT reduces data transfer delays by establishing bidirectional SDN (Software Defined Networking) multicast trees between the publishers and the subscribers by means of bypassing the centralized broker. As a result, it can reduce transmission delay by 65% and network usage by 58% compared with the standard MQTT.}, } @article {pmid30205515, year = {2018}, author = {Zhao, Y and Wu, J and Li, W and Lu, S}, title = {Efficient Interference Estimation with Accuracy Control for Data-Driven Resource Allocation in Cloud-RAN.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {9}, pages = {}, pmid = {30205515}, issn = {1424-8220}, support = {61602238,61672278//National Natural Science Foundation of China/ ; BK20160805//Natural Science Foundation of Jiangsu Province/ ; }, abstract = {The emerging edge computing paradigm has given rise to a new promising mobile network architecture, which can address a number of challenges that the operators are facing while trying to support growing end user's needs by shifting the computation from the base station to the edge cloud computing facilities. With such powerfully computational power, traditional unpractical resource allocation algorithms could be feasible. However, even with near optimal algorithms, the allocation result could still be far from optimal due to the inaccurate modeling of interference among sensor nodes. Such a dilemma calls for a measurement data-driven resource allocation to improve the total capacity. Meanwhile, the measurement process of inter-nodes' interference could be tedious, time-consuming and have low accuracy, which further compromise the benefits brought by the edge computing paradigm. To this end, we propose a measurement-based estimation solution to obtain the interference efficiently and intelligently by dynamically controlling the measurement and estimation through an accuracy-driven model. Basically, the measurement cost is reduced through the link similarity model and the channel derivation model. Compared to the exhausting measurement method, it can significantly reduce the time cost to the linear order of the network size with guaranteed accuracy through measurement scheduling and the accuracy control process, which could also balance the tradeoff between accuracy and measurement overhead. Extensive experiments based on real data traces are conducted to show the efficiency of the proposed solutions.}, } @article {pmid30200382, year = {2018}, author = {Li, C and Ji, S and Zhang, X and Wang, H and Li, D and Liu, H}, title = {An Effective and Secure Key Management Protocol for Message Delivery in Autonomous Vehicular Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {9}, pages = {}, pmid = {30200382}, issn = {1424-8220}, abstract = {Autonomous vehicular clouds, as the combination of cloud computing and conventional vehicular ad hoc networks, will provide abundant resources and services by sharing under-utilized resources of future high-end vehicles such as computing power, storage and internet connectivity. Autonomous vehicular clouds will have significant impact if widely implemented in the intelligent transportation system. However, security and privacy issues are still big challenges in autonomous vehicular clouds. In this paper, after analyzing the particularity of autonomous vehicular clouds, we implement a two-layered architecture, in which vehicles are self-organized without the help of roadside units. Then based on the architecture, we put forward an effective key management protocol to distribute a group key efficiently and also provide the authentication and confidentiality that lots of current secure schemes ignore. In addition, according to the different scenarios and security levels we categorize the way of message transmitting into three kinds. At last, with performance evaluations, the proposed protocol can perform more efficiently than other well-known available schemes.}, } @article {pmid30183641, year = {2018}, author = {Chen, J and Li, K and Bilal, K and Metwally, AA and Li, K and Yu, P}, title = {Parallel Protein Community Detection in Large-scale PPI Networks Based on Multi-source Learning.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {}, number = {}, pages = {}, doi = {10.1109/TCBB.2018.2868088}, pmid = {30183641}, issn = {1557-9964}, abstract = {Protein interactions constitute the fundamental building block of almost every life activity. Identifying protein communities from Protein-Protein Interaction (PPI) networks is essential to understand the principles of cellular organization and explore the causes of various diseases. It is critical to integrate multiple data resources to identify reliable protein communities that have biological significance and improve the performance of community detection methods for large-scale PPI networks. In this paper, we propose a Multi-source Learning based Protein Community Detection (MLPCD) algorithm by integrating Gene Expression Data (GED) and a parallel solution of MLPCD using cloud computing technology. GED under different conditions is integrated with the original PPI network to reconstruct a Weighted-PPI network. To flexibly identify protein communities of different scales, we define community modularity and functional cohesion measurements and detect protein communities from WPPI. In addition, we compare the detected communities with known protein complexes and evaluate the function enrichment of protein functional modules using Gene Ontology annotations. We implement a parallel version of MLPCD on the Apache Spark platform to enhance the performance of the algorithm. Extensive experimental results indicate the superiority and notable advantages of the MLPCD algorithm over the relevant algorithms in terms of accuracy and performance.}, } @article {pmid30181454, year = {2018}, author = {Taherizadeh, S and Stankovski, V and Grobelnik, M}, title = {A Capillary Computing Architecture for Dynamic Internet of Things: Orchestration of Microservices from Edge Devices to Fog and Cloud Providers.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {9}, pages = {}, pmid = {30181454}, issn = {1424-8220}, support = {732339, 815141, 636160//Horizon 2020/ ; }, abstract = {The adoption of advanced Internet of Things (IoT) technologies has impressively improved in recent years by placing such services at the extreme Edge of the network. There are, however, specific Quality of Service (QoS) trade-offs that must be considered, particularly in situations when workloads vary over time or when IoT devices are dynamically changing their geographic position. This article proposes an innovative capillary computing architecture, which benefits from mainstream Fog and Cloud computing approaches and relies on a set of new services, including an Edge/Fog/Cloud Monitoring System and a Capillary Container Orchestrator. All necessary Microservices are implemented as Docker containers, and their orchestration is performed from the Edge computing nodes up to Fog and Cloud servers in the geographic vicinity of moving IoT devices. A car equipped with a Motorhome Artificial Intelligence Communication Hardware (MACH) system as an Edge node connected to several Fog and Cloud computing servers was used for testing. Compared to using a fixed centralized Cloud provider, the service response time provided by our proposed capillary computing architecture was almost four times faster according to the 99th percentile value along with a significantly smaller standard deviation, which represents a high QoS.}, } @article {pmid32240399, year = {2018}, author = {Li, Q and Hong, Q and Qi, Q and Ma, X and Han, X and Tian, J}, title = {Towards additive manufacturing oriented geometric modeling using implicit functions.}, journal = {Visual computing for industry, biomedicine, and art}, volume = {1}, number = {1}, pages = {9}, pmid = {32240399}, issn = {2524-4442}, abstract = {Surface-based geometric modeling has many advantages in terms of visualization and traditional subtractive manufacturing using computer-numerical-control cutting-machine tools. However, it is not an ideal solution for additive manufacturing because to digitally print a surface-represented geometric object using a certain additive manufacturing technology, the object has to be converted into a solid representation. However, converting a known surface-based geometric representation into a printable representation is essentially a redesign process, and this is especially the case, when its interior material structure needs to be considered. To specify a 3D geometric object that is ready to be digitally manufactured, its representation has to be in a certain volumetric form. In this research, we show how some of the difficulties experienced in additive manufacturing can be easily solved by using implicitly represented geometric objects. Like surface-based geometric representation is subtractive manufacturing-friendly, implicitly described geometric objects are additive manufacturing-friendly: implicit shapes are 3D printing ready. The implicit geometric representation allows to combine a geometric shape, material colors, an interior material structure, and other required attributes in one single description as a set of implicit functions, and no conversion is needed. In addition, as implicit objects are typically specified procedurally, very little data is used in their specifications, which makes them particularly useful for design and visualization with modern cloud-based mobile devices, which usually do not have very big storage spaces. Finally, implicit modeling is a design procedure that is parallel computing-friendly, as the design of a complex geometric object can be divided into a set of simple shape-designing tasks, owing to the availability of shape-preserving implicit blending operations.}, } @article {pmid30171236, year = {2018}, author = {Haberl, MG and Churas, C and Tindall, L and Boassa, D and Phan, S and Bushong, EA and Madany, M and Akay, R and Deerinck, TJ and Peltier, ST and Ellisman, MH}, title = {CDeep3M-Plug-and-Play cloud-based deep learning for image segmentation.}, journal = {Nature methods}, volume = {15}, number = {9}, pages = {677-680}, pmid = {30171236}, issn = {1548-7105}, support = {P41 GM103412/GM/NIGMS NIH HHS/United States ; P41 GM103426/GM/NIGMS NIH HHS/United States ; R01 GM082949/GM/NIGMS NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Deep Learning ; Image Processing, Computer-Assisted/*methods ; }, abstract = {As biomedical imaging datasets expand, deep neural networks are considered vital for image processing, yet community access is still limited by setting up complex computational environments and availability of high-performance computing resources. We address these bottlenecks with CDeep3M, a ready-to-use image segmentation solution employing a cloud-based deep convolutional neural network. We benchmark CDeep3M on large and complex two-dimensional and three-dimensional imaging datasets from light, X-ray, and electron microscopy.}, } @article {pmid30169739, year = {2019}, author = {Wang, Z and He, E and Sani, K and Jagodnik, KM and Silverstein, MC and Ma'ayan, A}, title = {Drug Gene Budger (DGB): an application for ranking drugs to modulate a specific gene based on transcriptomic signatures.}, journal = {Bioinformatics (Oxford, England)}, volume = {35}, number = {7}, pages = {1247-1248}, pmid = {30169739}, issn = {1367-4811}, support = {OT3 OD025467/OD/NIH HHS/United States ; T32 HL007824/HL/NHLBI NIH HHS/United States ; U24 CA224260/CA/NCI NIH HHS/United States ; U54 HL127624/HL/NHLBI NIH HHS/United States ; }, mesh = {Cell Phone ; *Drug Discovery/methods ; Gene Expression Regulation/drug effects ; Humans ; Internet ; Mobile Applications ; *Transcriptome ; }, abstract = {SUMMARY: Mechanistic molecular studies in biomedical research often discover important genes that are aberrantly over- or under-expressed in disease. However, manipulating these genes in an attempt to improve the disease state is challenging. Herein, we reveal Drug Gene Budger (DGB), a web-based and mobile application developed to assist investigators in order to prioritize small molecules that are predicted to maximally influence the expression of their target gene of interest. With DGB, users can enter a gene symbol along with the wish to up-regulate or down-regulate its expression. The output of the application is a ranked list of small molecules that have been experimentally determined to produce the desired expression effect. The table includes log-transformed fold change, P-value and q-value for each small molecule, reporting the significance of differential expression as determined by the limma method. Relevant links are provided to further explore knowledge about the target gene, the small molecule and the source of evidence from which the relationship between the small molecule and the target gene was derived. The experimental data contained within DGB is compiled from signatures extracted from the LINCS L1000 dataset, the original Connectivity Map (CMap) dataset and the Gene Expression Omnibus (GEO). DGB also presents a specificity measure for a drug-gene connection based on the number of genes a drug modulates. DGB provides a useful preliminary technique for identifying small molecules that can target the expression of a single gene in human cells and tissues.

The application is freely available on the web at http://DGB.cloud and as a mobile phone application on iTunes https://itunes.apple.com/us/app/drug-gene-budger/id1243580241? mt=8 and Google Play https://play.google.com/store/apps/details? id=com.drgenebudger.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid30134778, year = {2020}, author = {AlShaya, MS and Assery, MK and Pani, SC}, title = {Reliability of mobile phone teledentistry in dental diagnosis and treatment planning in mixed dentition.}, journal = {Journal of telemedicine and telecare}, volume = {26}, number = {1-2}, pages = {45-52}, doi = {10.1177/1357633X18793767}, pmid = {30134778}, issn = {1758-1109}, mesh = {Child ; Comparative Effectiveness Research ; Dental Care/*methods ; Dental Caries/*diagnosis ; Dentists/statistics & numerical data ; *Dentition, Mixed ; Female ; Humans ; Male ; Physical Examination/methods ; Reproducibility of Results ; Sensitivity and Specificity ; Telemedicine/*methods ; }, abstract = {INTRODUCTION: The aim of the study was to test the reliability of mobile phone teledentistry in the diagnosis and treatment planning of dental caries of children in mixed dentition.

METHODS: A total of 57 cases, aged 6-12 years, among six examiners were used yielding 342 comparisons. The patients were examined by a dentist who was calibrated in the recording of oral findings in children using the World Health Organization (WHO) oral health assessment form for children (version 2013), which served as the gold standard for diagnosis. Six paediatric dentists calibrated using the same WHO form served as the teledentistry group and made their diagnoses using only the images sent to them without the use of radiographs. The pictures obtained were stored on an online cloud platform (Google Drive). The sharing links for these pictures were forwarded to participating dentists using a social media application (WhatsApp Messenger, Facebook Corp., Mountain View, CA).

RESULTS: This study showed greater sensitivity than specificity, and though both sensitivity and specificity were constantly above 80%, it can be stated that the current model has a higher chance of false positive results than false negative results. The reliability of teledentistry in this study is greater in primary teeth than in permanent teeth.

DISCUSSION: Although the use of teledentistry without radiographs is not as accurate as clinical examination, mobile phone teledentistry offers acceptable reliability for the initial diagnosis of caries in children.}, } @article {pmid30110880, year = {2018}, author = {Belem Pacheco, LA and Pelinson Alchieri, EA and Mendez Barreto, PAS}, title = {Device-Based Security to Improve User Privacy in the Internet of Things [†].}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {8}, pages = {}, pmid = {30110880}, issn = {1424-8220}, abstract = {The use of Internet of Things (IoT) is rapidly growing and a huge amount of data is being generated by IoT devices. Cloud computing is a natural candidate to handle this data since it has enough power and capacity to process, store and control data access. Moreover, this approach brings several benefits to the IoT, such as the aggregation of all IoT data in a common place and the use of cloud services to consume this data and provide useful applications. However, enforcing user privacy when sending sensitive information to the cloud is a challenge. This work presents and evaluates an architecture to provide privacy in the integration of IoT and cloud computing. The proposed architecture, called PROTeCt-Privacy aRquitecture for integratiOn of internet of Things and Cloud computing, improves user privacy by implementing privacy enforcement at the IoT devices instead of at the gateway, as is usually done. Consequently, the proposed approach improves both system security and fault tolerance, since it removes the single point of failure (gateway). The proposed architecture is evaluated through an analytical analysis and simulations with severely constrained devices, where delay and energy consumption are evaluated and compared to other architectures. The obtained results show the practical feasibility of the proposed solutions and demonstrate that the overheads introduced in the IoT devices are worthwhile considering the increased level of privacy and security.}, } @article {pmid30104529, year = {2018}, author = {Froiz-Míguez, I and Fernández-Caramés, TM and Fraga-Lamas, P and Castedo, L}, title = {Design, Implementation and Practical Evaluation of an IoT Home Automation System for Fog Computing Applications Based on MQTT and ZigBee-WiFi Sensor Nodes.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {8}, pages = {}, pmid = {30104529}, issn = {1424-8220}, abstract = {In recent years, the improvement of wireless protocols, the development of cloud services and the lower cost of hardware have started a new era for smart homes. One such enabling technologies is fog computing, which extends cloud computing to the edge of a network allowing for developing novel Internet of Things (IoT) applications and services. Under the IoT fog computing paradigm, IoT gateways are usually utilized to exchange messages with IoT nodes and a cloud. WiFi and ZigBee stand out as preferred communication technologies for smart homes. WiFi has become very popular, but it has a limited application due to its high energy consumption and the lack of standard mesh networking capabilities for low-power devices. For such reasons, ZigBee was selected by many manufacturers for developing wireless home automation devices. As a consequence, these technologies may coexist in the 2.4 GHz band, which leads to collisions, lower speed rates and increased communications latencies. This article presents ZiWi, a distributed fog computing Home Automation System (HAS) that allows for carrying out seamless communications among ZigBee and WiFi devices. This approach diverges from traditional home automation systems, which often rely on expensive central controllers. In addition, to ease the platform's building process, whenever possible, the system makes use of open-source software (all the code of the nodes is available on GitHub) and Commercial Off-The-Shelf (COTS) hardware. The initial results, which were obtained in a number of representative home scenarios, show that the developed fog services respond several times faster than the evaluated cloud services, and that cross-interference has to be taken seriously to prevent collisions. In addition, the current consumption of ZiWi's nodes was measured, showing the impact of encryption mechanisms.}, } @article {pmid30104516, year = {2018}, author = {Zhang, Y and Zhao, J and Zheng, D and Deng, K and Ren, F and Zheng, X and Shu, J}, title = {Privacy-Preserving Data Aggregation against False Data Injection Attacks in Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {8}, pages = {}, pmid = {30104516}, issn = {1424-8220}, abstract = {As an extension of cloud computing, fog computing has received more attention in recent years. It can solve problems such as high latency, lack of support for mobility and location awareness in cloud computing. In the Internet of Things (IoT), a series of IoT devices can be connected to the fog nodes that assist a cloud service center to store and process a part of data in advance. Not only can it reduce the pressure of processing data, but also improve the real-time and service quality. However, data processing at fog nodes suffers from many challenging issues, such as false data injection attacks, data modification attacks, and IoT devices' privacy violation. In this paper, based on the Paillier homomorphic encryption scheme, we use blinding factors to design a privacy-preserving data aggregation scheme in fog computing. No matter whether the fog node and the cloud control center are honest or not, the proposed scheme ensures that the injection data is from legal IoT devices and is not modified and leaked. The proposed scheme also has fault tolerance, which means that the collection of data from other devices will not be affected even if certain fog devices fail to work. In addition, security analysis and performance evaluation indicate the proposed scheme is secure and efficient.}, } @article {pmid30099000, year = {2018}, author = {Chekroud, SR and Gueorguieva, R and Zheutlin, AB and Paulus, M and Krumholz, HM and Krystal, JH and Chekroud, AM}, title = {Association between physical exercise and mental health in 1·2 million individuals in the USA between 2011 and 2015: a cross-sectional study.}, journal = {The lancet. Psychiatry}, volume = {5}, number = {9}, pages = {739-746}, doi = {10.1016/S2215-0366(18)30227-X}, pmid = {30099000}, issn = {2215-0374}, support = {203139/Z/16/Z//Wellcome Trust/United Kingdom ; }, mesh = {Adolescent ; Adult ; Aged ; Aged, 80 and over ; Cross-Sectional Studies ; *Exercise ; Female ; Humans ; Male ; Mental Disorders/*epidemiology ; *Mental Health ; Middle Aged ; Quality of Life ; Regression Analysis ; Self Report ; Socioeconomic Factors ; United States/epidemiology ; Young Adult ; }, abstract = {BACKGROUND: Exercise is known to be associated with reduced risk of all-cause mortality, cardiovascular disease, stroke, and diabetes, but its association with mental health remains unclear. We aimed to examine the association between exercise and mental health burden in a large sample, and to better understand the influence of exercise type, frequency, duration, and intensity.

METHODS: In this cross-sectional study, we analysed data from 1 237 194 people aged 18 years or older in the USA from the 2011, 2013, and 2015 Centers for Disease Control and Prevention Behavioral Risk Factors Surveillance System survey. We compared the number of days of bad self-reported mental health between individuals who exercised and those who did not, using an exact non-parametric matching procedure to balance the two groups in terms of age, race, gender, marital status, income, education level, body-mass index category, self-reported physical health, and previous diagnosis of depression. We examined the effects of exercise type, duration, frequency, and intensity using regression methods adjusted for potential confounders, and did multiple sensitivity analyses.

FINDINGS: Individuals who exercised had 1·49 (43·2%) fewer days of poor mental health in the past month than individuals who did not exercise but were otherwise matched for several physical and sociodemographic characteristics (W=7·42 × 10[10], p<2·2 × 10[-16]). All exercise types were associated with a lower mental health burden (minimum reduction of 11·8% and maximum reduction of 22·3%) than not exercising (p<2·2 × 10[-16] for all exercise types). The largest associations were seen for popular team sports (22·3% lower), cycling (21·6% lower), and aerobic and gym activities (20·1% lower), as well as durations of 45 min and frequencies of three to five times per week.

INTERPRETATION: In a large US sample, physical exercise was significantly and meaningfully associated with self-reported mental health burden in the past month. More exercise was not always better. Differences as a function of exercise were large relative to other demographic variables such as education and income. Specific types, durations, and frequencies of exercise might be more effective clinical targets than others for reducing mental health burden, and merit interventional study.

FUNDING: Cloud computing resources were provided by Microsoft.}, } @article {pmid30089608, year = {2018}, author = {Wang, MH and Chen, HK and Hsu, MH and Wang, HC and Yeh, YT}, title = {Cloud Computing for Infectious Disease Surveillance and Control: Development and Evaluation of a Hospital Automated Laboratory Reporting System.}, journal = {Journal of medical Internet research}, volume = {20}, number = {8}, pages = {e10886}, pmid = {30089608}, issn = {1438-8871}, mesh = {Cloud Computing/*trends ; Communicable Diseases/*epidemiology ; Electronic Health Records/*trends ; Humans ; Population Surveillance/*methods ; }, abstract = {BACKGROUND: Outbreaks of several serious infectious diseases have occurred in recent years. In response, to mitigate public health risks, countries worldwide have dedicated efforts to establish an information system for effective disease monitoring, risk assessment, and early warning management for international disease outbreaks. A cloud computing framework can effectively provide the required hardware resources and information access and exchange to conveniently connect information related to infectious diseases and develop a cross-system surveillance and control system for infectious diseases.

OBJECTIVE: The objective of our study was to develop a Hospital Automated Laboratory Reporting (HALR) system based on such a framework and evaluate its effectiveness.

METHODS: We collected data for 6 months and analyzed the cases reported within this period by the HALR and the Web-based Notifiable Disease Reporting (WebNDR) systems. Furthermore, system evaluation indicators were gathered, including those evaluating sensitivity and specificity.

RESULTS: The HALR system reported 15 pathogens and 5174 cases, and the WebNDR system reported 34 cases. In a comparison of the two systems, sensitivity was 100% and specificity varied according to the reported pathogens. In particular, the specificity for Streptococcus pneumoniae, Mycobacterium tuberculosis complex, and hepatitis C virus were 99.8%, 96.6%, and 97.4%, respectively. However, the specificity for influenza virus and hepatitis B virus were only 79.9% and 47.1%, respectively. After the reported data were integrated with patients' diagnostic results in their electronic medical records (EMRs), the specificity for influenza virus and hepatitis B virus increased to 89.2% and 99.1%, respectively.

CONCLUSIONS: The HALR system can provide early reporting of specified pathogens according to test results, allowing for early detection of outbreaks and providing trends in infectious disease data. The results of this study show that the sensitivity and specificity of early disease detection can be increased by integrating the reported data in the HALR system with the cases' clinical information (eg, diagnostic results) in EMRs, thereby enhancing the control and prevention of infectious diseases.}, } @article {pmid30085034, year = {2018}, author = {Zhang, P and Hung, LH and Lloyd, W and Yeung, KY}, title = {Hot-starting software containers for STAR aligner.}, journal = {GigaScience}, volume = {7}, number = {8}, pages = {}, pmid = {30085034}, issn = {2047-217X}, support = {R01 GM126019/GM/NIGMS NIH HHS/United States ; U54 HL127624/HL/NHLBI NIH HHS/United States ; }, mesh = {Asthma/drug therapy/genetics/metabolism ; Computational Biology/*methods ; Humans ; Myocytes, Smooth Muscle/drug effects/metabolism ; *RNA Splicing ; Sequence Analysis, RNA/*methods ; *Software ; }, abstract = {BACKGROUND: Using software containers has become standard practice to reproducibly deploy and execute biomedical workflows on the cloud. However, some applications that contain time-consuming initialization steps will produce unnecessary costs for repeated executions.

FINDINGS: We demonstrate that hot-starting from containers that have been frozen after the application has already begun execution can speed up bioinformatics workflows by avoiding repetitive initialization steps. We use an open-source tool called Checkpoint and Restore in Userspace (CRIU) to save the state of the containers as a collection of checkpoint files on disk after it has read in the indices. The resulting checkpoint files are migrated to the host, and CRIU is used to regenerate the containers in that ready-to-run hot-start state. As a proof-of-concept example, we create a hot-start container for the spliced transcripts alignment to a reference (STAR) aligner and deploy this container to align RNA sequencing data. We compare the performance of the alignment step with and without checkpoints on cloud platforms using local and network disks.

CONCLUSIONS: We demonstrate that hot-starting Docker containers from snapshots taken after repetitive initialization steps are completed significantly speeds up the execution of the STAR aligner on all experimental platforms, including Amazon Web Services, Microsoft Azure, and local virtual machines. Our method can be potentially employed in other bioinformatics applications in which a checkpoint can be inserted after a repetitive initialization phase.}, } @article {pmid30084865, year = {2019}, author = {Lightbody, G and Haberland, V and Browne, F and Taggart, L and Zheng, H and Parkes, E and Blayney, JK}, title = {Review of applications of high-throughput sequencing in personalized medicine: barriers and facilitators of future progress in research and clinical application.}, journal = {Briefings in bioinformatics}, volume = {20}, number = {5}, pages = {1795-1811}, pmid = {30084865}, issn = {1477-4054}, mesh = {*Biomedical Research ; Cloud Computing ; Computational Biology ; Computer Security ; Ethics ; High-Throughput Nucleotide Sequencing/*methods ; *Precision Medicine ; }, abstract = {There has been an exponential growth in the performance and output of sequencing technologies (omics data) with full genome sequencing now producing gigabases of reads on a daily basis. These data may hold the promise of personalized medicine, leading to routinely available sequencing tests that can guide patient treatment decisions. In the era of high-throughput sequencing (HTS), computational considerations, data governance and clinical translation are the greatest rate-limiting steps. To ensure that the analysis, management and interpretation of such extensive omics data is exploited to its full potential, key factors, including sample sourcing, technology selection and computational expertise and resources, need to be considered, leading to an integrated set of high-performance tools and systems. This article provides an up-to-date overview of the evolution of HTS and the accompanying tools, infrastructure and data management approaches that are emerging in this space, which, if used within in a multidisciplinary context, may ultimately facilitate the development of personalized medicine.}, } @article {pmid30083806, year = {2019}, author = {Verma, P and Sood, SK}, title = {A comprehensive framework for student stress monitoring in fog-cloud IoT environment: m-health perspective.}, journal = {Medical & biological engineering & computing}, volume = {57}, number = {1}, pages = {231-244}, pmid = {30083806}, issn = {1741-0444}, mesh = {Algorithms ; Bayes Theorem ; *Cloud Computing ; Decision Making ; Humans ; *Internet ; Models, Theoretical ; Stress, Psychological/*diagnosis ; Students/*psychology ; *Telemedicine ; Time Factors ; }, abstract = {Over the last few years, Internet of Things (IoT) has opened the doors to innovations that facilitate interactions among things and humans. Focusing on healthcare domain, IoT devices such as medical sensors, visual sensors, cameras, and wireless sensor network are leading this evolutionary trend. In this direction, the paper proposes a novel, IoT-aware student-centric stress monitoring framework to predict student stress index at a particular context. Bayesian Belief Network (BBN) is used to classify the stress event as normal or abnormal using physiological readings collected from medical sensors at fog layer. Abnormal temporal structural data which is time-enriched dataset sequence is analyzed for various stress-related parameters at cloud layer. To compute the student stress index, a two-stage Temporal Dynamic Bayesian Network (TDBN) model is formed. This model computes stress based on four parameters, namely, leaf node evidences, workload, context, and student health trait. After computing the stress index of the student, decisions are taken in the form of alert generation mechanism with the deliverance of time-sensitive information to caretaker or responder. Experiments are conducted both at fog and cloud layer which hold evidence for the utility and accuracy of the BBN classifier and TDBN predictive model in our proposed system. Graphical Abstract Student stress monitoring in IoT-Fog Environment.}, } @article {pmid30078742, year = {2019}, author = {Neborachko, M and Pkhakadze, A and Vlasenko, I}, title = {Current trends of digital solutions for diabetes management.}, journal = {Diabetes & metabolic syndrome}, volume = {13}, number = {5}, pages = {2997-3003}, doi = {10.1016/j.dsx.2018.07.014}, pmid = {30078742}, issn = {1878-0334}, mesh = {*Artificial Intelligence ; Big Data ; Delivery of Health Care/*statistics & numerical data ; Diabetes Mellitus/*prevention & control ; Disease Management ; Humans ; *Internet ; *Machine Learning ; }, abstract = {Industry 4.0 is an updated concept of smart production, which is identified with the fourth industrial revolution and the emergence of cyber-physical systems. Industry 4.0 is the next stage in the digitization of productions and industries, where such technologies and concepts as the Internet of things, big data, predictive analytics, cloud computing, machine learning, machine interaction, artificial intelligence, robotics, 3D printing, augmented reality. As an area of therapy with the best market potential and one of the most expensive global diseases, diabetes attracts the best healthcare players, who use innovative technologies. Current trends in digitalization of diabetes management are presented.}, } @article {pmid30071654, year = {2018}, author = {Wang, J and Li, D}, title = {Adaptive Computing Optimization in Software-Defined Network-Based Industrial Internet of Things with Fog Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {8}, pages = {}, pmid = {30071654}, issn = {1424-8220}, abstract = {In recent years, cloud computing and fog computing have appeared one after the other, as promising technologies for augmenting the computing capability of devices locally. By offloading computational tasks to fog servers or cloud servers, the time for task processing decreases greatly. Thus, to guarantee the Quality of Service (QoS) of smart manufacturing systems, fog servers are deployed at network edge to provide fog computing services. In this paper, we study the following problems in a mixed computing system: (1) which computing mode should be chosen for a task in local computing, fog computing or cloud computing? (2) In the fog computing mode, what is the execution sequence for the tasks cached in a task queue? Thus, to solve the problems above, we design a Software-Defined Network (SDN) framework in a smart factory based on an Industrial Internet of Things (IIoT) system. A method based on Computing Mode Selection (CMS) and execution sequences based on the task priority (ASTP) is proposed in this paper. First, a CMS module is designed in the SDN controller and then, after operating the CMS algorithm, each task obtains an optimal computing mode. Second, the task priorities can be calculated according to their real-time performance and calculated amount. According to the task priority, the SDN controller sends a flow table to the SDN switch to complete the task transmission. In other words, the higher the task priority is, the earlier the fog computing service is obtained. Finally, a series of experiments and simulations are performed to evaluate the performance of the proposed method. The results show that our method can achieve real-time performance and high reliability in IIoT.}, } @article {pmid30068069, year = {2018}, author = {Kang, M and Park, E and Cho, BH and Lee, KS}, title = {Recent Patient Health Monitoring Platforms Incorporating Internet of Things-Enabled Smart Devices.}, journal = {International neurourology journal}, volume = {22}, number = {Suppl 2}, pages = {S76-82}, pmid = {30068069}, issn = {2093-4777}, support = {//Korea Health Industry Development Institute/ ; HI14C3228//Ministry of Health and Welfare/ ; //National Research Foundation of Korea/ ; 2016M3A9B6919189//Ministry of Science and Technology/ ; }, abstract = {Synergistic integration of the Internet of Things (IoT), cloud computing, and big data technologies in healthcare have led to the notion of "smart health." Smart health is an emerging concept that refers to the provision of healthcare services for prevention, diagnosis, treatment, and follow-up management at any time or any place by connecting information technologies and healthcare. As a significant breakthrough in smart healthcare development, IoT-enabled smart devices allow medical centers to carry out preventive care, diagnosis, and treatment more competently. This review focuses on recently developed patient health monitoring platforms based on IoT-enabled smart devices that can collect real-time patient data and transfer information for assessment by healthcare providers, including doctors, hospitals, and clinics, or for self-management. We aimed to summarize the available information about recently approved devices and state-of-the-art developments through a comprehensive, systematic literature review. In this review, we also discuss possible future directions for the integration of cloud computing and blockchain, which may offer unprecedented breakthroughs in on-demand medical services. The combination of IoT with real-time, remote patient monitoring empowers patients to assert more control over their care, thereby allowing them to actively monitor their particular health conditions.}, } @article {pmid30061371, year = {2018}, author = {Zhou, B and Ho, SS and Zhang, X and Pattni, R and Haraksingh, RR and Urban, AE}, title = {Whole-genome sequencing analysis of CNV using low-coverage and paired-end strategies is efficient and outperforms array-based CNV analysis.}, journal = {Journal of medical genetics}, volume = {55}, number = {11}, pages = {735-743}, pmid = {30061371}, issn = {1468-6244}, support = {P50 HG007735/HG/NHGRI NIH HHS/United States ; T32 HL110952/HL/NHLBI NIH HHS/United States ; }, mesh = {*Comparative Genomic Hybridization/methods/standards ; *DNA Copy Number Variations ; Genetic Association Studies/methods/standards ; Genetic Predisposition to Disease ; Genetic Testing ; *Genome, Human ; *Genomics/methods/standards ; Humans ; Reference Standards ; Reproducibility of Results ; Sensitivity and Specificity ; *Whole Genome Sequencing ; }, abstract = {BACKGROUND: Copy number variation (CNV) analysis is an integral component of the study of human genomes in both research and clinical settings. Array-based CNV analysis is the current first-tier approach in clinical cytogenetics. Decreasing costs in high-throughput sequencing and cloud computing have opened doors for the development of sequencing-based CNV analysis pipelines with fast turnaround times. We carry out a systematic and quantitative comparative analysis for several low-coverage whole-genome sequencing (WGS) strategies to detect CNV in the human genome.

METHODS: We compared the CNV detection capabilities of WGS strategies (short insert, 3 kb insert mate pair and 5 kb insert mate pair) each at 1×, 3× and 5× coverages relative to each other and to 17 currently used high-density oligonucleotide arrays. For benchmarking, we used a set of gold standard (GS) CNVs generated for the 1000 Genomes Project CEU subject NA12878.

RESULTS: Overall, low-coverage WGS strategies detect drastically more GS CNVs compared with arrays and are accompanied with smaller percentages of CNV calls without validation. Furthermore, we show that WGS (at ≥1× coverage) is able to detect all seven GS deletion CNVs >100 kb in NA12878, whereas only one is detected by most arrays. Lastly, we show that the much larger 15 Mbp Cri du chat deletion can be readily detected with short-insert paired-end WGS at even just 1× coverage.

CONCLUSIONS: CNV analysis using low-coverage WGS is efficient and outperforms the array-based analysis that is currently used for clinical cytogenetics.}, } @article {pmid30052763, year = {2019}, author = {Zhao, X}, title = {BinDash, software for fast genome distance estimation on a typical personal laptop.}, journal = {Bioinformatics (Oxford, England)}, volume = {35}, number = {4}, pages = {671-673}, doi = {10.1093/bioinformatics/bty651}, pmid = {30052763}, issn = {1367-4811}, mesh = {*Algorithms ; *Data Compression ; *Genomics ; Sequence Analysis, DNA ; *Software ; }, abstract = {MOTIVATION: The number of genomes (including meta-genomes) is increasing at an accelerating pace. In the near future, we may need to estimate pairwise distances between millions of genomes. Even with the use of cloud computing, very few softwares can perform such estimation.

RESULTS: The multi-threaded software BinDash can perform such estimation using only a typical personal laptop. BinDash implemented b-bit one-permutation rolling MinHash with optimal densification, an existing data-mining technique. BinDash empirically outperforms the state-of-the-art software in terms of precision, compression ratio, memory usage and runtime according to our evaluation. Our evaluation is performed with a Dell Inspiron 157 559 Notebook on all bacterial genomes in RefSeq.

BinDash is released under the Apache 2.0 license at https://github.com/zhaoxiaofei/BinDash.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid30047918, year = {2018}, author = {Song, Y and He, X and Liu, Z and He, W and Sun, C and Wang, FY}, title = {Parallel Control of Distributed Parameter Systems.}, journal = {IEEE transactions on cybernetics}, volume = {48}, number = {12}, pages = {3291-3301}, doi = {10.1109/TCYB.2018.2849569}, pmid = {30047918}, issn = {2168-2275}, abstract = {In this paper, we study the control problems of distributed parameter systems, and discuss the limitations of traditional control methods. In recent years, social factors have gradually become an essential parameter of system modeling. For complex distributed parameter systems, the accurate modeling becomes difficult. With the rapid development of the network and the technology of big data and cloud computing, based on the advanced control theory of large-scale computing, we introduce the idea of parallel control to the control of distributed parameter systems. Parallel control is a method to accomplish tasks through the interaction of virtual and actual. Its core is to model the complex distributed parameter system on artificial society or artificial system, then analyze and evaluate it by computational experiment, and finally control and manage the distributed parameter system by parallel execution. Data-driven control and computational control are used in this method, which is a control idea that adapts to the rapid development of society.}, } @article {pmid30046025, year = {2018}, author = {Gu, B and Chen, Y and Liao, H and Zhou, Z and Zhang, D}, title = {A Distributed and Context-Aware Task Assignment Mechanism for Collaborative Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {8}, pages = {}, pmid = {30046025}, issn = {1424-8220}, abstract = {Mobile edge computing (MEC) is an emerging technology that leverages computing, storage, and network resources deployed at the proximity of users to offload their delay-sensitive tasks. Various existing facilities including mobile devices with idle resources, vehicles, and MEC servers deployed at base stations or road side units, could act as edges in the network. Since task offloading incurs extra transmission energy consumption and transmission latency, two key questions to be addressed in such an environment are (i) should the workload be offloaded to the edge or computed in terminals? (ii) Which edge, among the available ones, should the task be offloaded to? In this paper, we formulate the task assignment problem as a one-to-many matching game which is a powerful tool for studying the formation of a mutual beneficial relationship between two sets of agents. The main goal of our task assignment mechanism design is to reduce overall energy consumption, while satisfying task owners' heterogeneous delay requirements and supporting good scalability. An intensive simulation is conducted to evaluate the efficiency of our proposed mechanism.}, } @article {pmid30026912, year = {2017}, author = {Jensen, TL and Frasketi, M and Conway, K and Villarroel, L and Hill, H and Krampis, K and Goll, JB}, title = {RSEQREP: RNA-Seq Reports, an open-source cloud-enabled framework for reproducible RNA-Seq data processing, analysis, and result reporting.}, journal = {F1000Research}, volume = {6}, number = {}, pages = {2162}, pmid = {30026912}, issn = {2046-1402}, support = {HHSN272200800013C/AI/NIAID NIH HHS/United States ; HHSN272201500002C/AI/NIAID NIH HHS/United States ; }, abstract = {RNA-Seq is increasingly being used to measure human RNA expression on a genome-wide scale. Expression profiles can be interrogated to identify and functionally characterize treatment-responsive genes. Ultimately, such controlled studies promise to reveal insights into molecular mechanisms of treatment effects, identify biomarkers, and realize personalized medicine. RNA-Seq Reports (RSEQREP) is a new open-source cloud-enabled framework that allows users to execute start-to-end gene-level RNA-Seq analysis on a preconfigured RSEQREP Amazon Virtual Machine Image (AMI) hosted by AWS or on their own Ubuntu Linux machine via a Docker container or installation script. The framework works with unstranded, stranded, and paired-end sequence FASTQ files stored locally, on Amazon Simple Storage Service (S3), or at the Sequence Read Archive (SRA). RSEQREP automatically executes a series of customizable steps including reference alignment, CRAM compression, reference alignment QC, data normalization, multivariate data visualization, identification of differentially expressed genes, heatmaps, co-expressed gene clusters, enriched pathways, and a series of custom visualizations. The framework outputs a file collection that includes a dynamically generated PDF report using R, knitr, and LaTeX, as well as publication-ready table and figure files. A user-friendly configuration file handles sample metadata entry, processing, analysis, and reporting options. The configuration supports time series RNA-Seq experimental designs with at least one pre- and one post-treatment sample for each subject, as well as multiple treatment groups and specimen types. All RSEQREP analyses components are built using open-source R code and R/Bioconductor packages allowing for further customization. As a use case, we provide RSEQREP results for a trivalent influenza vaccine (TIV) RNA-Seq study that collected 1 pre-TIV and 10 post-TIV vaccination samples (days 1-10) for 5 subjects and two specimen types (peripheral blood mononuclear cells and B-cells).}, } @article {pmid30021975, year = {2018}, author = {Scionti, A and Mazumdar, S and Portero, A}, title = {Towards a Scalable Software Defined Network-on-Chip for Next Generation Cloud.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {7}, pages = {}, pmid = {30021975}, issn = {1424-8220}, abstract = {The rapid evolution of Cloud-based services and the growing interest in deep learning (DL)-based applications is putting increasing pressure on hyperscalers and general purpose hardware designers to provide more efficient and scalable systems. Cloud-based infrastructures must consist of more energy efficient components. The evolution must take place from the core of the infrastructure (i.e., data centers (DCs)) to the edges (Edge computing) to adequately support new/future applications. Adaptability/elasticity is one of the features required to increase the performance-to-power ratios. Hardware-based mechanisms have been proposed to support system reconfiguration mostly at the processing elements level, while fewer studies have been carried out regarding scalable, modular interconnected sub-systems. In this paper, we propose a scalable Software Defined Network-on-Chip (SDNoC)-based architecture. Our solution can easily be adapted to support devices ranging from low-power computing nodes placed at the edge of the Cloud to high-performance many-core processors in the Cloud DCs, by leveraging on a modular design approach. The proposed design merges the benefits of hierarchical network-on-chip (NoC) topologies (via fusing the ring and the 2D-mesh topology), with those brought by dynamic reconfiguration (i.e., adaptation). Our proposed interconnect allows for creating different types of virtualised topologies aiming at serving different communication requirements and thus providing better resource partitioning (virtual tiles) for concurrent tasks. To further allow the software layer controlling and monitoring of the NoC subsystem, a few customised instructions supporting a data-driven program execution model (PXM) are added to the processing element's instruction set architecture (ISA). In general, the data-driven programming and execution models are suitable for supporting the DL applications. We also introduce a mechanism to map a high-level programming language embedding concurrent execution models into the basic functionalities offered by our SDNoC for easing the programming of the proposed system. In the reported experiments, we compared our lightweight reconfigurable architecture to a conventional flattened 2D-mesh interconnection subsystem. Results show that our design provides an increment of the data traffic throughput of 9.5% and a reduction of 2.2× of the average packet latency, compared to the flattened 2D-mesh topology connecting the same number of processing elements (PEs) (up to 1024 cores). Similarly, power and resource (on FPGA devices) consumption is also low, confirming good scalability of the proposed architecture.}, } @article {pmid30014188, year = {2018}, author = {Thorogood, A}, title = {Canada: will privacy rules continue to favour open science?.}, journal = {Human genetics}, volume = {137}, number = {8}, pages = {595-602}, pmid = {30014188}, issn = {1432-1203}, mesh = {Canada ; *Computer Security/legislation & jurisprudence/standards ; *Databases, Genetic/legislation & jurisprudence/standards ; *Genetic Privacy/legislation & jurisprudence/standards ; Genetic Research/*legislation & jurisprudence ; Humans ; *Personally Identifiable Information/legislation & jurisprudence/standards ; }, abstract = {Canada's regulatory frameworks governing privacy and research are generally permissive of genomic data sharing, though they may soon be tightened in response to public concerns over commercial data handling practices and the strengthening of influential European privacy laws. Regulation can seem complex and uncertain, in part because of the constitutional division of power between federal and provincial governments over both privacy and health care. Broad consent is commonly practiced in genomic research, but without explicit regulatory recognition, it is often scrutinized by research or privacy oversight bodies. Secondary use of health-care data is legally permissible under limited circumstances. A new federal law prohibits genetic discrimination, but is subject to a constitutional challenge. Privacy laws require security safeguards proportionate to the data sensitivity, including breach notification. Special categories of data are not defined a priori. With some exceptions, Canadian researchers are permitted to share personal information internationally but are held accountable for safeguarding the privacy and security of these data. Cloud computing to store and share large scale data sets is permitted, if shared responsibilities for access, responsible use, and security are carefully articulated. For the moment, Canada's commercial sector is recognized as "adequate" by Europe, facilitating import of European data. Maintaining adequacy status under the new European General Data Protection Regulation (GDPR) is a concern because of Canada's weaker individual rights, privacy protections, and regulatory enforcement. Researchers must stay attuned to shifting international and national regulations to ensure a sustainable future for responsible genomic data sharing.}, } @article {pmid30006325, year = {2018}, author = {Balsari, S and Fortenko, A and Blaya, JA and Gropper, A and Jayaram, M and Matthan, R and Sahasranam, R and Shankar, M and Sarbadhikari, SN and Bierer, BE and Mandl, KD and Mehendale, S and Khanna, T}, title = {Reimagining Health Data Exchange: An Application Programming Interface-Enabled Roadmap for India.}, journal = {Journal of medical Internet research}, volume = {20}, number = {7}, pages = {e10725}, pmid = {30006325}, issn = {1438-8871}, mesh = {Computer Security/*trends ; Electronic Health Records/*standards ; Humans ; India ; Public Health/*methods ; Universal Health Insurance/*standards ; }, abstract = {In February 2018, the Government of India announced a massive public health insurance scheme extending coverage to 500 million citizens, in effect making it the world's largest insurance program. To meet this target, the government will rely on technology to effectively scale services, monitor quality, and ensure accountability. While India has seen great strides in informational technology development and outsourcing, cellular phone penetration, cloud computing, and financial technology, the digital health ecosystem is in its nascent stages and has been waiting for a catalyst to seed the system. This National Health Protection Scheme is expected to provide just this impetus for widespread adoption. However, health data in India are mostly not digitized. In the few instances that they are, the data are not standardized, not interoperable, and not readily accessible to clinicians, researchers, or policymakers. While such barriers to easy health information exchange are hardly unique to India, the greenfield nature of India's digital health infrastructure presents an excellent opportunity to avoid the pitfalls of complex, restrictive, digital health systems that have evolved elsewhere. We propose here a federated, patient-centric, application programming interface (API)-enabled health information ecosystem that leverages India's near-universal mobile phone penetration, universal availability of unique ID systems, and evolving privacy and data protection laws. It builds on global best practices and promotes the adoption of human-centered design principles, data minimization, and open standard APIs. The recommendations are the result of 18 months of deliberations with multiple stakeholders in India and the United States, including from academia, industry, and government.}, } @article {pmid30005633, year = {2018}, author = {Xue, Z and Warren, RL and Gibb, EA and MacMillan, D and Wong, J and Chiu, R and Hammond, SA and Yang, C and Nip, KM and Ennis, CA and Hahn, A and Reynolds, S and Birol, I}, title = {Recurrent tumor-specific regulation of alternative polyadenylation of cancer-related genes.}, journal = {BMC genomics}, volume = {19}, number = {1}, pages = {536}, pmid = {30005633}, issn = {1471-2164}, support = {HHSN261201400007C/CA/NCI NIH HHS/United States ; R21 CA187910/CA/NCI NIH HHS/United States ; R21CA187910//National Human Genome Research Institute/ ; }, mesh = {3' Untranslated Regions ; Cloud Computing ; Databases, Genetic ; Fibroblast Growth Factor 2/genetics ; Gene Expression Regulation, Neoplastic ; Humans ; Neoplasm Recurrence, Local/genetics ; Neoplasms/*genetics/pathology ; Polyadenylation ; RNA Cleavage ; RNA, Messenger/*genetics/metabolism ; Software ; }, abstract = {BACKGROUND: Alternative polyadenylation (APA) results in messenger RNA molecules with different 3' untranslated regions (3' UTRs), affecting the molecules' stability, localization, and translation. APA is pervasive and implicated in cancer. Earlier reports on APA focused on 3' UTR length modifications and commonly characterized APA events as 3' UTR shortening or lengthening. However, such characterization oversimplifies the processing of 3' ends of transcripts and fails to adequately describe the various scenarios we observe.

RESULTS: We built a cloud-based targeted de novo transcript assembly and analysis pipeline that incorporates our previously developed cleavage site prediction tool, KLEAT. We applied this pipeline to elucidate the APA profiles of 114 genes in 9939 tumor and 729 tissue normal samples from The Cancer Genome Atlas (TCGA). The full set of 10,668 RNA-Seq samples from 33 cancer types has not been utilized by previous APA studies. By comparing the frequencies of predicted cleavage sites between normal and tumor sample groups, we identified 77 events (i.e. gene-cancer type pairs) of tumor-specific APA regulation in 13 cancer types; for 15 genes, such regulation is recurrent across multiple cancers. Our results also support a previous report showing the 3' UTR shortening of FGF2 in multiple cancers. However, over half of the events we identified display complex changes to 3' UTR length that resist simple classification like shortening or lengthening.

CONCLUSIONS: Recurrent tumor-specific regulation of APA is widespread in cancer. However, the regulation pattern that we observed in TCGA RNA-seq data cannot be described as straightforward 3' UTR shortening or lengthening. Continued investigation into this complex, nuanced regulatory landscape will provide further insight into its role in tumor formation and development.}, } @article {pmid29997108, year = {2018}, author = {Gao, F and Thiebes, S and Sunyaev, A}, title = {Rethinking the Meaning of Cloud Computing for Health Care: A Taxonomic Perspective and Future Research Directions.}, journal = {Journal of medical Internet research}, volume = {20}, number = {7}, pages = {e10041}, pmid = {29997108}, issn = {1438-8871}, mesh = {Classification/*methods ; Cloud Computing/*standards ; Humans ; Medical Informatics/*methods ; Research Design ; }, abstract = {BACKGROUND: Cloud computing is an innovative paradigm that provides users with on-demand access to a shared pool of configurable computing resources such as servers, storage, and applications. Researchers claim that information technology (IT) services delivered via the cloud computing paradigm (ie, cloud computing services) provide major benefits for health care. However, due to a mismatch between our conceptual understanding of cloud computing for health care and the actual phenomenon in practice, the meaningful use of it for the health care industry cannot always be ensured. Although some studies have tried to conceptualize cloud computing or interpret this phenomenon for health care settings, they have mainly relied on its interpretation in a common context or have been heavily based on a general understanding of traditional health IT artifacts, leading to an insufficient or unspecific conceptual understanding of cloud computing for health care.

OBJECTIVE: We aim to generate insights into the concept of cloud computing for health IT research. We propose a taxonomy that can serve as a fundamental mechanism for organizing knowledge about cloud computing services in health care organizations to gain a deepened, specific understanding of cloud computing in health care. With the taxonomy, we focus on conceptualizing the relevant properties of cloud computing for service delivery to health care organizations and highlighting their specific meanings for health care.

METHODS: We employed a 2-stage approach in developing a taxonomy of cloud computing services for health care organizations. We conducted a structured literature review and 24 semistructured expert interviews in stage 1, drawing on data from theory and practice. In stage 2, we applied a systematic approach and relied on data from stage 1 to develop and evaluate the taxonomy using 14 iterations.

RESULTS: Our taxonomy is composed of 8 dimensions and 28 characteristics that are relevant for cloud computing services in health care organizations. By applying the taxonomy to classify existing cloud computing services identified from the literature and expert interviews, which also serves as a part of the taxonomy, we identified 7 specificities of cloud computing in health care. These specificities challenge what we have learned about cloud computing in general contexts or in traditional health IT from the previous literature. The summarized specificities suggest research opportunities and exemplary research questions for future health IT research on cloud computing.

CONCLUSIONS: By relying on perspectives from a taxonomy for cloud computing services for health care organizations, this study provides a solid conceptual cornerstone for cloud computing in health care. Moreover, the identified specificities of cloud computing and the related future research opportunities will serve as a valuable roadmap to facilitate more research into cloud computing in health care.}, } @article {pmid29995117, year = {2018}, author = {Lopes, NLV and Gracitelli, CPB and Moura, CR}, title = {Creation of a childhood glaucoma registry database.}, journal = {Arquivos brasileiros de oftalmologia}, volume = {81}, number = {4}, pages = {271-275}, doi = {10.5935/0004-2749.20180055}, pmid = {29995117}, issn = {1678-2925}, mesh = {Child ; *Database Management Systems ; *Glaucoma ; Humans ; Internet ; *Online Systems ; Pilot Projects ; *Registries ; Remote Consultation/*methods ; *Surveys and Questionnaires ; }, abstract = {PURPOSE: The aim is creation of a database to register patients diagnosed with childhood glaucoma. Information collection and storage will utilize a digital platform that permits multiple centers to register patients. This registry will be an important tool to evaluate and group patient profiles, thus allowing identification of risk factors, estimating prevalence, and recruiting participants in future studies.

METHODS: An online form was designed at the Department of Ophthalmology and Visual Sciences of the Federal University of São Paulo to create a registry of patients diagnosed with congenital glaucoma. In addition, a pilot study was conducted in the same institution to validate the Google Forms platform that comprises the registry questionnaire.

RESULTS: Data can be entered online and stored in the cloud. The database allows the acquisition and storage of epidemiological and clinical data, detection of patterns, and evaluation of the current treatment of children with childhood glaucoma. The data from multiple centers can be combined as collection in online and storage is in the cloud. The form is comprehensive and includes social data, gestational history, family history, clinical data on entering the center, and the treatment received.

CONCLUSIONS: Google Forms is a useful tool for collecting and analyzing statistical data, facilitating the process of registering patients, and analyzing information. Using the online questionnaire, it will be possible to create a multicenter online database that allows identification of risk factors and evaluation of the outcome of interventions and treatment.}, } @article {pmid29994567, year = {2018}, author = {Jindal, A and Dua, A and Kumar, N and Das, AK and Vasilakos, AV and Rodrigues, JJPC}, title = {Providing Healthcare-as-a-Service Using Fuzzy Rule Based Big Data Analytics in Cloud Computing.}, journal = {IEEE journal of biomedical and health informatics}, volume = {22}, number = {5}, pages = {1605-1618}, doi = {10.1109/JBHI.2018.2799198}, pmid = {29994567}, issn = {2168-2208}, mesh = {*Cloud Computing ; *Computer Communication Networks ; *Fuzzy Logic ; Humans ; *Medical Informatics Applications ; }, abstract = {With advancements in information and communication technology, there is a steep increase in the remote healthcare applications in which patients can get treatment from the remote places also. The data collected about the patients by remote healthcare applications constitute big data because it varies with volume, velocity, variety, veracity, and value. To process such a large collection of heterogeneous data is one of the biggest challenges which requires a specialized approach. To address this challenge, a new fuzzy rule based classifier is presented in this paper with an aim to provide Healthcare-as-a-Service. The proposed scheme is based upon the initial cluster formation, retrieval, and processing of the big data in cloud environment. Then, a fuzzy rule based classifier is designed for efficient decision making for data classification in the proposed scheme. To perform inferencing from the collected data, membership functions are designed for fuzzification and defuzzification processes. The proposed scheme is evaluated on various evaluation metrics, such as average response time, accuracy, computation cost, classification time, and false positive ratio. The results obtained confirm the effectiveness of the proposed scheme with respect to various performance evaluation metrics in cloud computing environment.}, } @article {pmid29994490, year = {2019}, author = {Li, W and Liu, BM and Liu, D and Liu, RP and Wang, P and Luo, S and Ni, W}, title = {Unified Fine-Grained Access Control for Personal Health Records in Cloud Computing.}, journal = {IEEE journal of biomedical and health informatics}, volume = {23}, number = {3}, pages = {1278-1289}, doi = {10.1109/JBHI.2018.2850304}, pmid = {29994490}, issn = {2168-2208}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; *Electronic Health Records ; *Health Records, Personal ; Humans ; Information Storage and Retrieval/*methods ; }, abstract = {Attribute-based encryption has been a promising encryption technology to secure personal health records (PHRs) sharing in cloud computing. PHRs consist of the patient data often collected from various sources including hospitals and general practice centres. Different patients' access policies have a common access sub-policy. In this paper, we propose a novel attribute-based encryption scheme for fine-grained and flexible access control to PHRs data in cloud computing. The scheme generates shared information by the common access sub-policy, which is based on different patients' access policies. Then, the scheme combines the encryption of PHRs from different patients. Therefore, both time consumption of encryption and decryption can be reduced. Medical staff require varying levels of access to PHRs. The proposed scheme can also support multi-privilege access control so that medical staff can access the required level of information while maximizing patient privacy. Through implementation and simulation, we demonstrate that the proposed scheme is efficient in terms of time. Moreover, we prove the security of the proposed scheme based on security of the ciphertext-policy attribute-based encryption scheme.}, } @article {pmid29994265, year = {2018}, author = {Halioui, A and Valtchev, P and Diallo, AB}, title = {Bioinformatic workflow extraction from scientific texts based on word sense disambiguation and relation extraction.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {}, number = {}, pages = {}, doi = {10.1109/TCBB.2018.2847336}, pmid = {29994265}, issn = {1557-9964}, abstract = {This paper introduces a method for automatic workflow extraction from texts using Process-Oriented Case-Based Reasoning (POCBR). While the current workflow management systems implement mostly different complicated graphical tasks based on advanced distributed solutions (e.g. cloud computing and grid computation), workflow knowledge acquisition from texts using case-based reasoning represents more expressive and semantic cases representations. We propose in this context, an ontology-based workflow extraction framework to acquire processual knowledge from texts. Our methodology extends classic NLP techniques to extract and disambiguate tasks and relations in texts. Using a graph-based representation of workflows and a domain ontology, our extraction process uses a context-aware approach to recognize workflow components: data and control flows. We applied our framework in a technical domain in bioinformatics: i.e. phylogenetic analyses. An evaluation based on workflow semantic similarities on a gold standard proves that our approach provides promising results in the process extraction domain. Both data and implementation of our framework are available in: http://labo.bioinfo.uqam.ca/tgrowler.}, } @article {pmid29994005, year = {2019}, author = {Jiang, Y and Hamer, J and Wang, C and Jiang, X and Kim, M and Song, Y and Xia, Y and Mohammed, N and Sadat, MN and Wang, S}, title = {SecureLR: Secure Logistic Regression Model via a Hybrid Cryptographic Protocol.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {16}, number = {1}, pages = {113-123}, doi = {10.1109/TCBB.2018.2833463}, pmid = {29994005}, issn = {1557-9964}, support = {R00 HG008175/HG/NHGRI NIH HHS/United States ; R01 GM114612/GM/NIGMS NIH HHS/United States ; U01 EB023685/EB/NIBIB NIH HHS/United States ; }, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; Electronic Health Records ; *Logistic Models ; Machine Learning ; Medical Informatics ; *Software ; }, abstract = {Machine learning applications are intensively utilized in various science fields, and increasingly the biomedical and healthcare sector. Applying predictive modeling to biomedical data introduces privacy and security concerns requiring additional protection to prevent accidental disclosure or leakage of sensitive patient information. Significant advancements in secure computing methods have emerged in recent years, however, many of which require substantial computational and/or communication overheads, which might hinder their adoption in biomedical applications. In this work, we propose SecureLR, a novel framework allowing researchers to leverage both the computational and storage capacity of Public Cloud Servers to conduct learning and predictions on biomedical data without compromising data security or efficiency. Our model builds upon homomorphic encryption methodologies with hardware-based security reinforcement through Software Guard Extensions (SGX), and our implementation demonstrates a practical hybrid cryptographic solution to address important concerns in conducting machine learning with public clouds.}, } @article {pmid29993643, year = {2018}, author = {Celesti, F and Celesti, A and Wan, J and Villari, M}, title = {Why Deep Learning Is Changing the Way to Approach NGS Data Processing: A Review.}, journal = {IEEE reviews in biomedical engineering}, volume = {11}, number = {}, pages = {68-76}, doi = {10.1109/RBME.2018.2825987}, pmid = {29993643}, issn = {1941-1189}, mesh = {Algorithms ; Animals ; Big Data ; Deep Learning/*trends ; Genomics/*trends ; High-Throughput Nucleotide Sequencing/*trends ; Humans ; Internet ; Sequence Analysis, DNA/*trends ; Software ; }, abstract = {Nowadays, big data analytics in genomics is an emerging research topic. In fact, the large amount of genomics data originated by emerging next-generation sequencing (NGS) techniques requires more and more fast and sophisticated algorithms. In this context, deep learning is re-emerging as a possible approach to speed up the DNA sequencing process. In this review, we specifically discuss such a trend. In particular, starting from an analysis of the interest of the Internet community in both NGS and deep learning, we present a taxonomic analysis highlighting the major software solutions based on deep learning algorithms available for each specific NGS application field. We discuss future challenges in the perspective of cloud computing services aimed at deep learning based solutions for NGS.}, } @article {pmid29973569, year = {2018}, author = {Wu, A and Zheng, D and Zhang, Y and Yang, M}, title = {Hidden Policy Attribute-Based Data Sharing with Direct Revocation and Keyword Search in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {7}, pages = {}, pmid = {29973569}, issn = {1424-8220}, abstract = {Attribute-based encryption can be used to realize fine-grained data sharing in open networks. However, in practical applications, we have to address further challenging issues, such as attribute revocation and data search. How do data users search for the data they need in massive amounts of data? When users leave the system, they lose the right to decrypt the shared data. In this case, how do we ensure that revoked users cannot decrypt shared data? In this paper, we successfully address these issues by proposing a hidden policy attribute-based data sharing scheme with direct revocation and keyword search. In the proposed scheme, the direct revocation of attributes does not need to update the private key of non-revoked users during revocation. In addition, a keyword search is realized in our scheme, and the search time is constant with the increase in attributes. In particular, the policy is hidden in our scheme, and hence, users’ privacy is protected. Our security and performance analyses show that the proposed scheme can tackle the security and efficiency concerns in cloud computing.}, } @article {pmid29968595, year = {2018}, author = {Nikolopoulos, M and Karampela, I and Tzortzis, E and Dalamaga, M}, title = {Deploying Cloud Computing in the Greek Healthcare System: A Modern Development Proposal Incorporating Clinical and Laboratory Data.}, journal = {Studies in health technology and informatics}, volume = {251}, number = {}, pages = {35-38}, pmid = {29968595}, issn = {1879-8365}, mesh = {*Cloud Computing ; *Computer Security ; Delivery of Health Care ; Greece ; *Hospital Information Systems ; Hospitals ; Internet ; }, abstract = {Cloud computing is a reality in most business sectors. Hospitals have been more reluctant to adopt cloud technology due to strict data security regulations. Cloud could provide economies of scale reducing Information Technology spending in the Greek state-owned hospitals, while giving the opportunity to the hospitals to upgrade their profile offering web-based services. We propose a simple, robust and easy to apply approach for the Greek hospitals, focusing on clinical and laboratory data in order to move to the cloud environment. To the best of our knowledge, there is no other study regarding the adoption of cloud infrastructure in the Greek hospital sector. This innovative method could transform the business model of the hospitals.}, } @article {pmid29966374, year = {2018}, author = {Park, D and Kim, S and An, Y and Jung, JY}, title = {LiReD: A Light-Weight Real-Time Fault Detection System for Edge Computing Using LSTM Recurrent Neural Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {7}, pages = {}, pmid = {29966374}, issn = {1424-8220}, abstract = {Monitoring the status of the facilities and detecting any faults are considered an important technology in a smart factory. Although the faults of machine can be analyzed in real time using collected data, it requires a large amount of computing resources to handle the massive data. A cloud server can be used to analyze the collected data, but it is more efficient to adopt the edge computing concept that employs edge devices located close to the facilities. Edge devices can improve data processing and analysis speed and reduce network costs. In this paper, an edge device capable of collecting, processing, storing and analyzing data is constructed by using a single-board computer and a sensor. And, a fault detection model for machine is developed based on the long short-term memory (LSTM) recurrent neural networks. The proposed system called LiReD was implemented for an industrial robot manipulator and the LSTM-based fault detection model showed the best performance among six fault detection models.}, } @article {pmid29964370, year = {2018}, author = {Sanduja, S and Jewell, P and Aron, E and Pharai, N}, title = {ERRATUM: Cloud Computing for Pharmacometrics: Using AWS, NONMEM, PsN, Grid Engine, and Sonic.}, journal = {CPT: pharmacometrics & systems pharmacology}, volume = {7}, number = {6}, pages = {413}, doi = {10.1002/psp4.12296}, pmid = {29964370}, issn = {2163-8306}, } @article {pmid29964127, year = {2019}, author = {Lorenzo, AJ and Rickard, M and Braga, LH and Guo, Y and Oliveria, JP}, title = {Predictive Analytics and Modeling Employing Machine Learning Technology: The Next Step in Data Sharing, Analysis, and Individualized Counseling Explored With a Large, Prospective Prenatal Hydronephrosis Database.}, journal = {Urology}, volume = {123}, number = {}, pages = {204-209}, doi = {10.1016/j.urology.2018.05.041}, pmid = {29964127}, issn = {1527-9995}, mesh = {Algorithms ; Counseling ; *Data Analysis ; *Databases, Factual ; Female ; Forecasting ; Humans ; Hydronephrosis/*congenital/*surgery ; Infant ; *Information Dissemination ; *Machine Learning ; Male ; *Models, Statistical ; Prospective Studies ; }, abstract = {OBJECTIVE: To explore the potential value of utilizing a commercially available cloud-based machine learning platform to predict surgical intervention in infants with prenatal hydronephrosis (HN).

MATERIALS AND METHODS: A prospective prenatal HN database was uploaded into Microsoft Azure Machine Learning Studio. Probabilistic principal component analysis was employed for data imputation. Multiple clinical variables were included in two-class decision jungle and neural network for model training, using surgical intervention as the primary outcome. Models were scored and evaluated after a 70/30 split of the data.

RESULTS: A total of 557 entries were included. The optimized model (decision jungle) achieved an area under the curve of 0.9, accuracy of 0.87, and precision of 0.80, employing a threshold of 0.5 to predict surgery. Average time to train, score and evaluate the model was 5 seconds. The predictive model was deployed as a web service in 35 seconds, generating a unique API key for app and webpage development. Individualized prediction based on the included variables was deployed as a web-based and batch execution Excel file in less than one minute.

CONCLUSION: This cloud-based ML technology allows easy building, deployment, and sharing of predictive analytics solutions. Using prenatal HN as an example, we propose an opportunity to address contemporary challenges with data analysis, reporting a creative solution that moves beyond the current standard.}, } @article {pmid31598610, year = {2018}, author = {Wang, Z and Christie, MA and Abeysinghe, E and Chu, T and Marru, S and Pierce, M and Danko, CG}, title = {Building a Science Gateway For Processing and Modeling Sequencing Data Via Apache Airavata.}, journal = {Practice and Experience in Advanced Research Computing 2018 : Seamless Creativity : July 22-26 2017, Pittsburgh, Pennsylvania. Practice and Experience in Advanced Research Computing (Conference) (2018 : Pittsburgh, Pa.)}, volume = {2018}, number = {}, pages = {}, pmid = {31598610}, support = {R01 HG009309/HG/NHGRI NIH HHS/United States ; }, abstract = {The amount of DNA sequencing data has been exponentially growing during the past decade due to advances in sequencing technology. Processing and modeling large amounts of sequencing data can be computationally intractable for desktop computing platforms. High performance computing (HPC) resources offer advantages in terms of computing power, and can be a general solution to these problems. Using HPCs directly for computational needs requires skilled users who know their way around HPCs and acquiring such skills take time. Science gateways acts as the middle layer between users and HPCs, providing users with the resources to accomplish compute-intensive tasks without requiring specialized expertise. We developed a web-based computing platform for genome biologists by customizing the PHP Gateway for Airavata (PGA) framework that accesses publicly accessible HPC resources via Apache Airavata. This web computing platform takes advantage of the Extreme Science and Engineering Discovery Environment (XSEDE) which provides the resources for gateway development, including access to CPU, GPU, and storage resources. We used this platform to develop a gateway for the dREG algorithm, an online computing tool for finding functional regions in mammalian genomes using nascent RNA sequencing data. The dREG gateway provides its users a free, powerful and user-friendly GPU computing resource based on XSEDE, circumventing the need of specialized knowledge about installation, configuration, and execution on an HPC for biologists. The dREG gateway is available at: https://dREG.dnasequence.org/.}, } @article {pmid29956007, year = {2018}, author = {Kumar, V and Jangirala, S and Ahmad, M}, title = {An Efficient Mutual Authentication Framework for Healthcare System in Cloud Computing.}, journal = {Journal of medical systems}, volume = {42}, number = {8}, pages = {142}, pmid = {29956007}, issn = {1573-689X}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; Information Systems ; *Telemedicine ; }, abstract = {The increasing role of Telecare Medicine Information Systems (TMIS) makes its accessibility for patients to explore medical treatment, accumulate and approach medical data through internet connectivity. Security and privacy preservation is necessary for medical data of the patient in TMIS because of the very perceptive purpose. Recently, Mohit et al.'s proposed a mutual authentication protocol for TMIS in the cloud computing environment. In this work, we reviewed their protocol and found that it is not secure against stolen verifier attack, many logged in patient attack, patient anonymity, impersonation attack, and fails to protect session key. For enhancement of security level, we proposed a new mutual authentication protocol for the similar environment. The presented framework is also more capable in terms of computation cost. In addition, the security evaluation of the protocol protects resilience of all possible security attributes, and we also explored formal security evaluation based on random oracle model. The performance of the proposed protocol is much better in comparison to the existing protocol.}, } @article {pmid29946251, year = {2018}, author = {Atee, M and Hoti, K and Hughes, JD}, title = {A Technical Note on the PainChek™ System: A Web Portal and Mobile Medical Device for Assessing Pain in People With Dementia.}, journal = {Frontiers in aging neuroscience}, volume = {10}, number = {}, pages = {117}, pmid = {29946251}, issn = {1663-4365}, abstract = {Background: Pain in dementia is predominant particularly in the advanced stages or in those who are unable to verbalize. Uncontrolled pain alters the course of behaviors in patients with dementia making them perturbed, unsettled, and devitalized. Current measures of assessing pain in this population group are inadequate and underutilized in clinical practice because they lack systematic evaluation and innovative design. Objective: To describe a novel method and system of pain assessment using a combination of technologies: automated facial recognition and analysis (AFRA), smart computing, affective computing, and cloud computing (Internet of Things) for people with advanced dementia. Methods and Results: Cognification and affective computing were used to conceptualize the system. A computerized clinical system was developed to address the challenging problem of identifying pain in non-verbal patients with dementia. The system is composed of a smart device enabled app (App) linked to a web admin portal (WAP). The App "PainChek™" uses AFRA to identify facial action units indicative of pain presence, and user-fed clinical information to calculate a pain intensity score. The App has various functionalities including: pain assessment, pain monitoring, patient profiling, and data synchronization (into the WAP). The WAP serves as a database that collects the data obtained through the App in the clinical setting. These technologies can assist in addressing the various characteristics of pain (e.g., subjectivity, multidimensionality, and dynamicity). With over 750 paired assessments conducted, the App has been validated in two clinical studies (n = 74, age: 60-98 y), which showed sound psychometric properties: excellent concurrent validity (r = 0.882-0.911), interrater reliability (Kw = 0.74-0.86), internal consistency (α = 0.925-0.950), and excellent test-retest reliability (ICC = 0.904), while it possesses good predictive validity and discriminant validity. Clinimetric data revealed high accuracy (95.0%), sensitivity (96.1%), and specificity (91.4%) as well as excellent clinical utility (0.95). Conclusions: PainChek™ is a comprehensive and evidence-based pain management system. This novel approach has the potential to transform pain assessment in people who are unable to verbalize because it can be used by clinicians and carers in everyday clinical practice.}, } @article {pmid29945681, year = {2018}, author = {Miras, H and Jiménez, R and Perales, Á and Terrón, JA and Bertolet, A and Ortiz, A and Macías, J}, title = {Monte Carlo verification of radiotherapy treatments with CloudMC.}, journal = {Radiation oncology (London, England)}, volume = {13}, number = {1}, pages = {99}, pmid = {29945681}, issn = {1748-717X}, support = {PI-0261-2014 FPS 2014//Fundación Progreso y Salud/ ; PIN-0215-2017//Consejería de Salud de la Junta de Andalucía/ ; }, mesh = {Algorithms ; *Cloud Computing/economics ; Humans ; *Monte Carlo Method ; Phantoms, Imaging ; Radiometry ; Radiotherapy Dosage ; Radiotherapy Planning, Computer-Assisted/economics/*methods ; Software ; }, abstract = {BACKGROUND: A new implementation has been made on CloudMC, a cloud-based platform presented in a previous work, in order to provide services for radiotherapy treatment verification by means of Monte Carlo in a fast, easy and economical way. A description of the architecture of the application and the new developments implemented is presented together with the results of the tests carried out to validate its performance.

METHODS: CloudMC has been developed over Microsoft Azure cloud. It is based on a map/reduce implementation for Monte Carlo calculations distribution over a dynamic cluster of virtual machines in order to reduce calculation time. CloudMC has been updated with new methods to read and process the information related to radiotherapy treatment verification: CT image set, treatment plan, structures and dose distribution files in DICOM format. Some tests have been designed in order to determine, for the different tasks, the most suitable type of virtual machines from those available in Azure. Finally, the performance of Monte Carlo verification in CloudMC is studied through three real cases that involve different treatment techniques, linac models and Monte Carlo codes.

RESULTS: Considering computational and economic factors, D1_v2 and G1 virtual machines were selected as the default type for the Worker Roles and the Reducer Role respectively. Calculation times up to 33 min and costs of 16 € were achieved for the verification cases presented when a statistical uncertainty below 2% (2σ) was required. The costs were reduced to 3-6 € when uncertainty requirements are relaxed to 4%.

CONCLUSIONS: Advantages like high computational power, scalability, easy access and pay-per-usage model, make Monte Carlo cloud-based solutions, like the one presented in this work, an important step forward to solve the long-lived problem of truly introducing the Monte Carlo algorithms in the daily routine of the radiotherapy planning process.}, } @article {pmid29940842, year = {2018}, author = {Ausmees, K and John, A and Toor, SZ and Hellander, A and Nettelblad, C}, title = {BAMSI: a multi-cloud service for scalable distributed filtering of massive genome data.}, journal = {BMC bioinformatics}, volume = {19}, number = {1}, pages = {240}, pmid = {29940842}, issn = {1471-2105}, mesh = {Cloud Computing/*standards ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; }, abstract = {BACKGROUND: The advent of next-generation sequencing (NGS) has made whole-genome sequencing of cohorts of individuals a reality. Primary datasets of raw or aligned reads of this sort can get very large. For scientific questions where curated called variants are not sufficient, the sheer size of the datasets makes analysis prohibitively expensive. In order to make re-analysis of such data feasible without the need to have access to a large-scale computing facility, we have developed a highly scalable, storage-agnostic framework, an associated API and an easy-to-use web user interface to execute custom filters on large genomic datasets.

RESULTS: We present BAMSI, a Software as-a Service (SaaS) solution for filtering of the 1000 Genomes phase 3 set of aligned reads, with the possibility of extension and customization to other sets of files. Unique to our solution is the capability of simultaneously utilizing many different mirrors of the data to increase the speed of the analysis. In particular, if the data is available in private or public clouds - an increasingly common scenario for both academic and commercial cloud providers - our framework allows for seamless deployment of filtering workers close to data. We show results indicating that such a setup improves the horizontal scalability of the system, and present a possible use case of the framework by performing an analysis of structural variation in the 1000 Genomes data set.

CONCLUSIONS: BAMSI constitutes a framework for efficient filtering of large genomic data sets that is flexible in the use of compute as well as storage resources. The data resulting from the filter is assumed to be greatly reduced in size, and can easily be downloaded or routed into e.g. a Hadoop cluster for subsequent interactive analysis using Hive, Spark or similar tools. In this respect, our framework also suggests a general model for making very large datasets of high scientific value more accessible by offering the possibility for organizations to share the cost of hosting data on hot storage, without compromising the scalability of downstream analysis.}, } @article {pmid29939188, year = {2018}, author = {Wu, D and Faria, AV and Younes, L and Ross, CA and Mori, S and Miller, MI}, title = {Whole-brain Segmentation and Change-point Analysis of Anatomical Brain MRI-Application in Premanifest Huntington's Disease.}, journal = {Journal of visualized experiments : JoVE}, volume = {}, number = {136}, pages = {}, pmid = {29939188}, issn = {1940-087X}, support = {R01 NS040068/NS/NINDS NIH HHS/United States ; R01 EB008171/EB/NIBIB NIH HHS/United States ; R21 NS098018/NS/NINDS NIH HHS/United States ; R01 NS084957/NS/NINDS NIH HHS/United States ; R01 NS086888/NS/NINDS NIH HHS/United States ; R01 EB000975/EB/NIBIB NIH HHS/United States ; P50 NS016375/NS/NINDS NIH HHS/United States ; U01 NS082085/NS/NINDS NIH HHS/United States ; P41 EB015909/EB/NIBIB NIH HHS/United States ; }, mesh = {Adult ; Aged ; Brain/*diagnostic imaging/pathology ; Disease Progression ; Female ; Humans ; Huntington Disease/*diagnostic imaging/pathology ; Magnetic Resonance Imaging/*methods ; Male ; Middle Aged ; }, abstract = {Recent advances in MRI offer a variety of useful markers to identify neurodegenerative diseases. In Huntington's disease (HD), regional brain atrophy begins many years prior to the motor onset (during the "premanifest" period), but the spatiotemporal pattern of regional atrophy across the brain has not been fully characterized. Here we demonstrate an online cloud-computing platform, "MRICloud", which provides atlas-based whole-brain segmentation of T1-weighted images at multiple granularity levels, and thereby, enables us to access the regional features of brain anatomy. We then describe a regression model that detects statistically significant inflection points, at which regional brain atrophy starts to be noticeable, i.e. the "change-point", with respect to a disease progression index. We used the CAG-age product (CAP) score to index the disease progression in HD patients. Change-point analysis of the volumetric measurements from the segmentation pipeline, therefore, provides important information of the order and pattern of structural atrophy across the brain. The paper illustrates the use of these techniques on T1-weighted MRI data of premanifest HD subjects from a large multicenter PREDICT-HD study. This design potentially has wide applications in a range of neurodegenerative diseases to investigate the dynamic changes of brain anatomy.}, } @article {pmid29936514, year = {2017}, author = {van Roessel, I and Reumann, M and Brand, A}, title = {Potentials and Challenges of the Health Data Cooperative Model.}, journal = {Public health genomics}, volume = {20}, number = {6}, pages = {321-331}, pmid = {29936514}, issn = {1662-8063}, abstract = {INTRODUCTION: Currently, abundances of highly relevant health data are locked up in data silos due to decentralized storage and data protection laws. The health data cooperative (HDC) model is established to make this valuable data available for societal purposes. The aim of this study is to analyse the HDC model and its potentials and challenges.

RESULTS: An HDC is a health data bank. The HDC model has as core principles a cooperative approach, citizen-centredness, not-for-profit structure, data enquiry procedure, worldwide accessibility, cloud computing data storage, open source, and transparency about governance policy. HDC members have access to the HDC platform, which consists of the "core," the "app store," and the "big data." This, respectively, enables the users to collect, store, manage, and share health information, to analyse personal health data, and to conduct big data analytics. Identified potentials of the HDC model are digitization of healthcare information, citizen empowerment, knowledge benefit, patient empowerment, cloud computing data storage, and reduction in healthcare expenses. Nevertheless, there are also challenges linked with this approach, including privacy and data security, citizens' restraint, disclosure of clinical results, big data, and commercial interest. Limitations and Outlook: The results of this article are not generalizable because multiple studies with a limited number of study participants are included. Therefore, it is recommended to undertake further elaborate research on these topics among larger and various groups of individuals. Additionally, more pilots on the HDC model are required before it can be fully implemented. Moreover, when the HDC model becomes operational, further research on its performances should be undertaken.}, } @article {pmid29929475, year = {2018}, author = {De Paris, R and Vahl Quevedo, C and Ruiz, DD and Gargano, F and de Souza, ON}, title = {A selective method for optimizing ensemble docking-based experiments on an InhA Fully-Flexible receptor model.}, journal = {BMC bioinformatics}, volume = {19}, number = {1}, pages = {235}, pmid = {29929475}, issn = {1471-2105}, mesh = {*Drug Design ; Molecular Docking Simulation/*methods ; }, abstract = {BACKGROUND: In the rational drug design process, an ensemble of conformations obtained from a molecular dynamics simulation plays a crucial role in docking experiments. Some studies have found that Fully-Flexible Receptor (FFR) models predict realistic binding energy accurately and improve scoring to enhance selectiveness. At the same time, methods have been proposed to reduce the high computational costs involved in considering the explicit flexibility of proteins in receptor-ligand docking. This study introduces a novel method to optimize ensemble docking-based experiments by reducing the size of an InhA FFR model at docking runtime and scaling docking workflow invocations on cloud virtual machines.

RESULTS: First, in order to find the most affordable cost-benefit pool of virtual machines, we evaluated the performance of the docking workflow invocations in different configurations of Azure instances. Second, we validated the gains obtained by the proposed method based on the quality of the Reduced Fully-Flexible Receptor (RFFR) models produced using AutoDock4.2. The analyses show that the proposed method reduced the model size by approximately 50% while covering at least 86% of the best docking results from the 74 ligands tested. Third, we tested our novel method using AutoDock Vina, a different docking software, and showed the positive accuracy achieved in the resulting RFFR models. Finally, our results demonstrated that the method proposed optimized ensemble docking experiments and is applicable to different docking software. In addition, it detected new binding modes, which would be unreachable if employing only the rigid structure used to generate the InhA FFR model.

CONCLUSIONS: Our results showed that the selective method is a valuable strategy for optimizing ensemble docking-based experiments using different docking software. The RFFR models produced by discarding non-promising snapshots from the original model are accurately shaped for a larger number of ligands, and the elapsed time spent in the ensemble docking experiments are considerably reduced.}, } @article {pmid29914207, year = {2018}, author = {Fernández-Caramés, TM and Fraga-Lamas, P and Suárez-Albela, M and Díaz-Bouza, MA}, title = {A Fog Computing Based Cyber-Physical System for the Automation of Pipe-Related Tasks in the Industry 4.0 Shipyard.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {6}, pages = {}, pmid = {29914207}, issn = {1424-8220}, abstract = {Pipes are one of the key elements in the construction of ships, which usually contain between 15,000 and 40,000 of them. This huge number, as well as the variety of processes that may be performed on a pipe, require rigorous identification, quality assessment and traceability. Traditionally, such tasks have been carried out by using manual procedures and following documentation on paper, which slows down the production processes and reduces the output of a pipe workshop. This article presents a system that allows for identifying and tracking the pipes of a ship through their construction cycle. For such a purpose, a fog computing architecture is proposed to extend cloud computing to the edge of the shipyard network. The system has been developed jointly by Navantia, one of the largest shipbuilders in the world, and the University of A Coru&ntilde;a (Spain), through a project that makes use of some of the latest Industry 4.0 technologies. Specifically, a Cyber-Physical System (CPS) is described, which uses active Radio Frequency Identification (RFID) tags to track pipes and detect relevant events. Furthermore, the CPS has been integrated and tested in conjunction with Siemens’ Manufacturing Execution System (MES) (Simatic IT). The experiments performed on the CPS show that, in the selected real-world scenarios, fog gateways respond faster than the tested cloud server, being such gateways are also able to process successfully more samples under high-load situations. In addition, under regular loads, fog gateways react between five and 481 times faster than the alternative cloud approach.}, } @article {pmid29914104, year = {2018}, author = {Ma, X and Lin, C and Zhang, H and Liu, J}, title = {Energy-Aware Computation Offloading of IoT Sensors in Cloudlet-Based Mobile Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {6}, pages = {}, pmid = {29914104}, issn = {1424-8220}, abstract = {Mobile edge computing is proposed as a promising computing paradigm to relieve the excessive burden of data centers and mobile networks, which is induced by the rapid growth of Internet of Things (IoT). This work introduces the cloud-assisted multi-cloudlet framework to provision scalable services in cloudlet-based mobile edge computing. Due to the constrained computation resources of cloudlets and limited communication resources of wireless access points (APs), IoT sensors with identical computation offloading decisions interact with each other. To optimize the processing delay and energy consumption of computation tasks, theoretic analysis of the computation offloading decision problem of IoT sensors is presented in this paper. In more detail, the computation offloading decision problem of IoT sensors is formulated as a computation offloading game and the condition of Nash equilibrium is derived by introducing the tool of a potential game. By exploiting the finite improvement property of the game, the Computation Offloading Decision (COD) algorithm is designed to provide decentralized computation offloading strategies for IoT sensors. Simulation results demonstrate that the COD algorithm can significantly reduce the system cost compared with the random-selection algorithm and the cloud-first algorithm. Furthermore, the COD algorithm can scale well with increasing IoT sensors.}, } @article {pmid29902623, year = {2018}, author = {Geissler, B and Steiner, G and Mew, MC}, title = {Clearing the fog on phosphate rock data - Uncertainties, fuzziness, and misunderstandings.}, journal = {The Science of the total environment}, volume = {642}, number = {}, pages = {250-263}, doi = {10.1016/j.scitotenv.2018.05.381}, pmid = {29902623}, issn = {1879-1026}, abstract = {Big Data, blockchains, and cloud computing have become ubiquitous in today's mass media and are universally known terms used in everyday speech. If we look behind these often misused buzzwords, we find at least one common element, namely data. Although we hardly use these terms in the "classic discipline" of mineral economics, we find various similarities. The case of phosphate data bears numerous challenges in multiple forms such as uncertainties, fuzziness, or misunderstandings. Often simulation models are used to support decision-making processes. For all these models, reliable and accurate sets of data are an essential premise. A significant number of data series relating to the phosphorus supply chain, including resource inventory or production, consumption, and trade data ranging from phosphate rock to intermediates like marketable concentrate to final phosphate fertilizers, is available. Data analysts and modelers must often choose from various sources, and they also depend on data access. Based on a transdisciplinary orientation, we aim to help colleagues in all fields by illustrating quantitative differences among the reported data, taking a somewhat engineering approach. We use common descriptive statistics to measure and causally explain discrepancies in global phosphate-rock production data issued by the US Geological Survey, the British Geological Survey, Austrian World Mining Data, the International Fertilizer Association, and CRU International over time, with a focus on the most recent years. Furthermore, we provide two snapshots of global-trade flows for phosphate-rock concentrate, in 2015 and 1985, and compare these to an approach using total-nutrient data. We find discrepancies of up to 30% in reported global production volume, whereby the major share could be assigned directly to China and Peru. Consequently, we call for a global, independent agency to collect and monitor phosphate data in order to reduce uncertainties or fuzziness and, thereby, ultimately support policy-making processes.}, } @article {pmid29902176, year = {2018}, author = {Navale, V and Bourne, PE}, title = {Cloud computing applications for biomedical science: A perspective.}, journal = {PLoS computational biology}, volume = {14}, number = {6}, pages = {e1006144}, pmid = {29902176}, issn = {1553-7358}, mesh = {Biomedical Research/*methods ; *Cloud Computing ; Computational Biology/*methods ; Humans ; }, abstract = {Biomedical research has become a digital data-intensive endeavor, relying on secure and scalable computing, storage, and network infrastructure, which has traditionally been purchased, supported, and maintained locally. For certain types of biomedical applications, cloud computing has emerged as an alternative to locally maintained traditional computing approaches. Cloud computing offers users pay-as-you-go access to services such as hardware infrastructure, platforms, and software for solving common biomedical computational problems. Cloud computing services offer secure on-demand storage and analysis and are differentiated from traditional high-performance computing by their rapid availability and scalability of services. As such, cloud services are engineered to address big data problems and enhance the likelihood of data and analytics sharing, reproducibility, and reuse. Here, we provide an introductory perspective on cloud computing to help the reader determine its value to their own research.}, } @article {pmid29897418, year = {2018}, author = {Wang, L and Lu, Z and Van Buren, P and Ware, D}, title = {SciApps: a cloud-based platform for reproducible bioinformatics workflows.}, journal = {Bioinformatics (Oxford, England)}, volume = {34}, number = {22}, pages = {3917-3920}, pmid = {29897418}, issn = {1367-4811}, mesh = {Cloud Computing ; *Computational Biology ; Software ; *Workflow ; }, abstract = {MOTIVATION: The rapid accumulation of both sequence and phenotype data generated by high-throughput methods has increased the need to store and analyze data on distributed storage and computing systems. Efficient data management across these heterogeneous systems requires a workflow management system to simplify the task of analysis through automation and make large-scale bioinformatics analyses accessible and reproducible.

RESULTS: We developed SciApps, a web-based platform for reproducible bioinformatics workflows. The platform is designed to automate the execution of modular Agave apps and support execution of workflows on local clusters or in a cloud. Two workflows, one for association and one for annotation, are provided as exemplar scientific use cases.

https://www.sciapps.org.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid29888067, year = {2018}, author = {Raisaro, JL and Klann, JG and Wagholikar, KB and Estiri, H and Hubaux, JP and Murphy, SN}, title = {Feasibility of Homomorphic Encryption for Sharing I2B2 Aggregate-Level Data in the Cloud.}, journal = {AMIA Joint Summits on Translational Science proceedings. AMIA Joint Summits on Translational Science}, volume = {2017}, number = {}, pages = {176-185}, pmid = {29888067}, issn = {2153-4063}, support = {R00 LM011575/LM/NLM NIH HHS/United States ; }, abstract = {The biomedical community is lagging in the adoption of cloud computing for the management of medical data. The primary obstacles are concerns about privacy and security. In this paper, we explore the feasibility of using advanced privacy-enhancing technologies in order to enable the sharing of sensitive clinical data in a public cloud. Our goal is to facilitate sharing of clinical data in the cloud by minimizing the risk of unintended leakage of sensitive clinical information. In particular, we focus on homomorphic encryption, a specific type of encryption that offers the ability to run computation on the data while the data remains encrypted. This paper demonstrates that homomorphic encryption can be used efficiently to compute aggregating queries on the ciphertexts, along with providing end-to-end confidentiality of aggregate-level data from the i2b2 data model.}, } @article {pmid29883142, year = {2018}, author = {Dumitrescu, EF and McCaskey, AJ and Hagen, G and Jansen, GR and Morris, TD and Papenbrock, T and Pooser, RC and Dean, DJ and Lougovski, P}, title = {Cloud Quantum Computing of an Atomic Nucleus.}, journal = {Physical review letters}, volume = {120}, number = {21}, pages = {210501}, doi = {10.1103/PhysRevLett.120.210501}, pmid = {29883142}, issn = {1079-7114}, abstract = {We report a quantum simulation of the deuteron binding energy on quantum processors accessed via cloud servers. We use a Hamiltonian from pionless effective field theory at leading order. We design a low-depth version of the unitary coupled-cluster ansatz, use the variational quantum eigensolver algorithm, and compute the binding energy to within a few percent. Our work is the first step towards scalable nuclear structure computations on a quantum processor via the cloud, and it sheds light on how to map scientific computing applications onto nascent quantum devices.}, } @article {pmid29882904, year = {2018}, author = {Jo, BW and Jo, JH and Khan, RMA and Kim, JH and Lee, YS}, title = {Development of a Cloud Computing-Based Pier Type Port Structure Stability Evaluation Platform Using Fiber Bragg Grating Sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {6}, pages = {}, pmid = {29882904}, issn = {1424-8220}, abstract = {Structure Health Monitoring is a topic of great interest in port structures due to the ageing of structures and the limitations of evaluating structures. This paper presents a cloud computing-based stability evaluation platform for a pier type port structure using Fiber Bragg Grating (FBG) sensors in a system consisting of a FBG strain sensor, FBG displacement gauge, FBG angle meter, gateway, and cloud computing-based web server. The sensors were installed on core components of the structure and measurements were taken to evaluate the structures. The measurement values were transmitted to the web server via the gateway to analyze and visualize them. All data were analyzed and visualized in the web server to evaluate the structure based on the safety evaluation index (SEI). The stability evaluation platform for pier type port structures involves the efficient monitoring of the structures which can be carried out easily anytime and anywhere by converging new technologies such as cloud computing and FBG sensors. In addition, the platform has been successfully implemented at &ldquo;Maryang Harbor&rdquo; situated in Maryang-Meyon of Korea to test its durability.}, } @article {pmid29882868, year = {2018}, author = {Mora, H and Signes-Pont, MT and Gil, D and Johnsson, M}, title = {Collaborative Working Architecture for IoT-Based Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {6}, pages = {}, pmid = {29882868}, issn = {1424-8220}, abstract = {The new sensing applications need enhanced computing capabilities to handle the requirements of complex and huge data processing. The Internet of Things (IoT) concept brings processing and communication features to devices. In addition, the Cloud Computing paradigm provides resources and infrastructures for performing the computations and outsourcing the work from the IoT devices. This scenario opens new opportunities for designing advanced IoT-based applications, however, there is still much research to be done to properly gear all the systems for working together. This work proposes a collaborative model and an architecture to take advantage of the available computing resources. The resulting architecture involves a novel network design with different levels which combines sensing and processing capabilities based on the Mobile Cloud Computing (MCC) paradigm. An experiment is included to demonstrate that this approach can be used in diverse real applications. The results show the flexibility of the architecture to perform complex computational tasks of advanced applications.}, } @article {pmid29867956, year = {2018}, author = {Christley, S and Scarborough, W and Salinas, E and Rounds, WH and Toby, IT and Fonner, JM and Levin, MK and Kim, M and Mock, SA and Jordan, C and Ostmeyer, J and Buntzman, A and Rubelt, F and Davila, ML and Monson, NL and Scheuermann, RH and Cowell, LG}, title = {VDJServer: A Cloud-Based Analysis Portal and Data Commons for Immune Repertoire Sequences and Rearrangements.}, journal = {Frontiers in immunology}, volume = {9}, number = {}, pages = {976}, pmid = {29867956}, issn = {1664-3224}, support = {K12 CA090625/CA/NCI NIH HHS/United States ; R01 AI097403/AI/NIAID NIH HHS/United States ; U19 AI057229/AI/NIAID NIH HHS/United States ; }, mesh = {Animals ; *Cloud Computing ; Computational Biology/*methods ; Computing Methodologies ; Genomics/*methods ; Humans ; Information Dissemination ; Mice ; Software ; User-Computer Interface ; VDJ Exons/*immunology ; Web Browser ; Workflow ; }, abstract = {BACKGROUND: Recent technological advances in immune repertoire sequencing have created tremendous potential for advancing our understanding of adaptive immune response dynamics in various states of health and disease. Immune repertoire sequencing produces large, highly complex data sets, however, which require specialized methods and software tools for their effective analysis and interpretation.

RESULTS: VDJServer is a cloud-based analysis portal for immune repertoire sequence data that provide access to a suite of tools for a complete analysis workflow, including modules for preprocessing and quality control of sequence reads, V(D)J gene segment assignment, repertoire characterization, and repertoire comparison. VDJServer also provides sophisticated visualizations for exploratory analysis. It is accessible through a standard web browser via a graphical user interface designed for use by immunologists, clinicians, and bioinformatics researchers. VDJServer provides a data commons for public sharing of repertoire sequencing data, as well as private sharing of data between users. We describe the main functionality and architecture of VDJServer and demonstrate its capabilities with use cases from cancer immunology and autoimmunity.

CONCLUSION: VDJServer provides a complete analysis suite for human and mouse T-cell and B-cell receptor repertoire sequencing data. The combination of its user-friendly interface and high-performance computing allows large immune repertoire sequencing projects to be analyzed with no programming or software installation required. VDJServer is a web-accessible cloud platform that provides access through a graphical user interface to a data management infrastructure, a collection of analysis tools covering all steps in an analysis, and an infrastructure for sharing data along with workflows, results, and computational provenance. VDJServer is a free, publicly available, and open-source licensed resource.}, } @article {pmid29867037, year = {2018}, author = {Yan, H and Li, X and Wang, Y and Jia, C}, title = {Centralized Duplicate Removal Video Storage System with Privacy Preservation in IoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {6}, pages = {}, pmid = {29867037}, issn = {1424-8220}, abstract = {In recent years, the Internet of Things (IoT) has found wide application and attracted much attention. Since most of the end-terminals in IoT have limited capabilities for storage and computing, it has become a trend to outsource the data from local to cloud computing. To further reduce the communication bandwidth and storage space, data deduplication has been widely adopted to eliminate the redundant data. However, since data collected in IoT are sensitive and closely related to users' personal information, the privacy protection of users' information becomes a challenge. As the channels, like the wireless channels between the terminals and the cloud servers in IoT, are public and the cloud servers are not fully trusted, data have to be encrypted before being uploaded to the cloud. However, encryption makes the performance of deduplication by the cloud server difficult because the ciphertext will be different even if the underlying plaintext is identical. In this paper, we build a centralized privacy-preserving duplicate removal storage system, which supports both file-level and block-level deduplication. In order to avoid the leakage of statistical information of data, Intel Software Guard Extensions (SGX) technology is utilized to protect the deduplication process on the cloud server. The results of the experimental analysis demonstrate that the new scheme can significantly improve the deduplication efficiency and enhance the security. It is envisioned that the duplicated removal system with privacy preservation will be of great use in the centralized storage environment of IoT.}, } @article {pmid29865266, year = {2018}, author = {Fernández-Caramés, TM and Fraga-Lamas, P and Suárez-Albela, M and Vilar-Montesinos, M}, title = {A Fog Computing and Cloudlet Based Augmented Reality System for the Industry 4.0 Shipyard.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {6}, pages = {}, pmid = {29865266}, issn = {1424-8220}, abstract = {Augmented Reality (AR) is one of the key technologies pointed out by Industry 4.0 as a tool for enhancing the next generation of automated and computerized factories. AR can also help shipbuilding operators, since they usually need to interact with information (e.g., product datasheets, instructions, maintenance procedures, quality control forms) that could be handled easily and more efficiently through AR devices. This is the reason why Navantia, one of the 10 largest shipbuilders in the world, is studying the application of AR (among other technologies) in different shipyard environments in a project called "Shipyard 4.0". This article presents Navantia's industrial AR (IAR) architecture, which is based on cloudlets and on the fog computing paradigm. Both technologies are ideal for supporting physically-distributed, low-latency and QoS-aware applications that decrease the network traffic and the computational load of traditional cloud computing systems. The proposed IAR communications architecture is evaluated in real-world scenarios with payload sizes according to demanding Microsoft HoloLens applications and when using a cloud, a cloudlet and a fog computing system. The results show that, in terms of response delay, the fog computing system is the fastest when transferring small payloads (less than 128 KB), while for larger file sizes, the cloudlet solution is faster than the others. Moreover, under high loads (with many concurrent IAR clients), the cloudlet in some cases is more than four times faster than the fog computing system in terms of response delay.}, } @article {pmid29864529, year = {2018}, author = {Cianfrocco, MA and Lahiri, I and DiMaio, F and Leschziner, AE}, title = {cryoem-cloud-tools: A software platform to deploy and manage cryo-EM jobs in the cloud.}, journal = {Journal of structural biology}, volume = {203}, number = {3}, pages = {230-235}, pmid = {29864529}, issn = {1095-8657}, support = {R01 GM092895/GM/NIGMS NIH HHS/United States ; R01 GM107214/GM/NIGMS NIH HHS/United States ; R01 GM123089/GM/NIGMS NIH HHS/United States ; S10 OD020011/OD/NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; Cryoelectron Microscopy/*methods ; Image Processing, Computer-Assisted/*methods ; *Software ; beta-Galactosidase/chemistry/ultrastructure ; }, abstract = {Access to streamlined computational resources remains a significant bottleneck for new users of cryo-electron microscopy (cryo-EM). To address this, we have developed tools that will submit cryo-EM analysis routines and atomic model building jobs directly to Amazon Web Services (AWS) from a local computer or laptop. These new software tools ("cryoem-cloud-tools") have incorporated optimal data movement, security, and cost-saving strategies, giving novice users access to complex cryo-EM data processing pipelines. Integrating these tools into the RELION processing pipeline and graphical user interface we determined a 2.2 Å structure of ß-galactosidase in ∼55 h on AWS. We implemented a similar strategy to submit Rosetta atomic model building and refinement to AWS. These software tools dramatically reduce the barrier for entry of new users to cloud computing for cryo-EM and are freely available at cryoem-tools.cloud.}, } @article {pmid29862718, year = {2017}, author = {Zou, L and Zhang, W and Liu, X and Xie, Z and Xie, Y}, title = {[Porting Radiotherapy Software of Varian to Cloud Platform].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {41}, number = {5}, pages = {330-333}, doi = {10.3969/j.issn.1671-7104.2017.05.005}, pmid = {29862718}, issn = {1671-7104}, mesh = {*Cloud Computing ; Internet ; Radiotherapy/*instrumentation ; *Software ; }, abstract = {To develop a low-cost private cloud platform of radiotherapy software. First, a private cloud platform which was based on OpenStack and the virtual GPU hardware was builded. Then on the private cloud platform, all the Varian radiotherapy software modules were installed to the virtual machine, and the corresponding function configuration was completed. Finally the software on the cloud was able to be accessed by virtual desktop client. The function test results of the cloud workstation show that a cloud workstation is equivalent to an isolated physical workstation, and any clients on the LAN can use the cloud workstation smoothly. The cloud platform transplantation in this study is economical and practical. The project not only improves the utilization rates of radiotherapy software, but also makes it possible that the cloud computing technology can expand its applications to the field of radiation oncology.}, } @article {pmid29854362, year = {2018}, author = {Ghanem, SM and Abdel Wahed, M and Saleh, N}, title = {Automated Risk Control in Medical Imaging Equipment Management Using Cloud Application.}, journal = {Journal of healthcare engineering}, volume = {2018}, number = {}, pages = {7125258}, pmid = {29854362}, issn = {2040-2295}, mesh = {*Cloud Computing ; Diagnostic Imaging/*instrumentation/methods ; Egypt ; Hospitals ; Humans ; Medical Informatics/*instrumentation/methods ; Probability ; *Risk Management ; Software ; }, abstract = {Medical imaging equipment (MIE) is the baseline of providing patient diagnosis in healthcare facilities. However, that type of equipment poses high risk for patients, operators, and environment in terms of technology and application. Considering risk management in MIE management is rarely covered in literature. The study proposes a methodology that controls risks associated with MIE management. The methodology is based on proposing a set of key performance indicators (KPIs) that lead to identify a set of undesired events (UDEs), and through a risk matrix, a risk level is evaluated. By using cloud computing software, risks could be controlled to be manageable. The methodology was verified by using a data set of 204 pieces of MIE along 104 hospitals, which belong to Egyptian Ministry of Health. Results point to appropriateness of proposed KPIs and UDEs in risk evaluation and control. Thus, the study reveals that optimizing risks taking into account the costs has an impact on risk control of MIE management.}, } @article {pmid29854245, year = {2017}, author = {Chenghong, W and Jiang, Y and Mohammed, N and Chen, F and Jiang, X and Al Aziz, MM and Sadat, MN and Wang, S}, title = {SCOTCH: Secure Counting Of encrypTed genomiC data using a Hybrid approach.}, journal = {AMIA ... Annual Symposium proceedings. AMIA Symposium}, volume = {2017}, number = {}, pages = {1744-1753}, pmid = {29854245}, issn = {1942-597X}, support = {R00 HG008175/HG/NHGRI NIH HHS/United States ; R01 GM114612/GM/NIGMS NIH HHS/United States ; R01 GM118574/GM/NIGMS NIH HHS/United States ; R01 GM118609/GM/NIGMS NIH HHS/United States ; R21 LM012060/LM/NLM NIH HHS/United States ; U01 EB023685/EB/NIBIB NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Computer Security ; Databases, Genetic ; *Datasets as Topic ; *Genetic Privacy ; Genome, Human ; *Genomics ; Humans ; Models, Theoretical ; Software ; }, abstract = {As genomic data are usually at large scale and highly sensitive, it is essential to enable both efficient and secure analysis, by which the data owner can securely delegate both computation and storage on untrusted public cloud. Counting query of genotypes is a basic function for many downstream applications in biomedical research (e.g., computing allele frequency, calculating chi-squared statistics, etc.). Previous solutions show promise on secure counting of outsourced data but the efficiency is still a big limitation for real world applications. In this paper, we propose a novel hybrid solution to combine a rigorous theoretical model (homomorphic encryption) and the latest hardware-based infrastructure (i.e., Software Guard Extensions) to speed up the computation while preserving the privacy of both data owners and data users. Our results demonstrated efficiency by using the real data from the personal genome project.}, } @article {pmid29796233, year = {2018}, author = {Ko, H and Měsíček, L and Choi, J and Hwang, S}, title = {A Study on Secure Medical-Contents Strategies with DRM Based on Cloud Computing.}, journal = {Journal of healthcare engineering}, volume = {2018}, number = {}, pages = {6410180}, pmid = {29796233}, issn = {2040-2295}, mesh = {Algorithms ; Biometry ; *Cloud Computing ; *Computer Security ; Confidentiality ; Data Collection ; Delivery of Health Care ; Electronic Health Records ; Humans ; Information Storage and Retrieval/*methods ; Internet ; Medical Informatics/*instrumentation ; Medical Records ; Monitoring, Ambulatory/*instrumentation ; Programming Languages ; }, abstract = {Many hospitals and medical clinics have been using a wearable sensor in its health care system because the wearable sensor, which is able to measure the patients' biometric information, has been developed to analyze their patients remotely. The measured information is saved to a server in a medical center, and the server keeps the medical information, which also involves personal information, on a cloud system. The server and network devices are used by connecting each other, and sensitive medical records are dealt with remotely. However, these days, the attackers, who try to attack the server or the network systems, are increasing. In addition, the server and the network system have a weak protection and security policy against the attackers. In this paper, it is suggested that security compliance of medical contents should be followed to improve the level of security. As a result, the medical contents are kept safely.}, } @article {pmid29794537, year = {2018}, author = {Grossman, RL}, title = {Progress Toward Cancer Data Ecosystems.}, journal = {Cancer journal (Sudbury, Mass.)}, volume = {24}, number = {3}, pages = {126-130}, pmid = {29794537}, issn = {1540-336X}, support = {OT3 HL142481/HL/NHLBI NIH HHS/United States ; OT3 OD025460/OD/NIH HHS/United States ; }, mesh = {Ecosystem ; Humans ; Neoplasms/*pathology ; Research ; }, abstract = {One of the recommendations of the Cancer Moonshot Blue Ribbon Panel report from 2016 was the creation of a national cancer data ecosystem. We review some of the approaches for building cancer data ecosystems and some of the progress that has been made. A data commons is the colocation of data with cloud computing infrastructure and commonly used software services, tools, and applications for managing, integrating, analyzing, and sharing data to create an interoperable resource for the research community. We discuss data commons and their potential role in cancer data ecosystems and, in particular, how multiple data commons can interoperate to form part of the foundation for a cancer data ecosystem.}, } @article {pmid29789515, year = {2018}, author = {Wu, G and Bao, W and Zhu, X and Zhang, X}, title = {A General Cross-Layer Cloud Scheduling Framework for Multiple IoT Computer Tasks.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {6}, pages = {}, pmid = {29789515}, issn = {1424-8220}, abstract = {The diversity of IoT services and applications brings enormous challenges to improving the performance of multiple computer tasks' scheduling in cross-layer cloud computing systems. Unfortunately, the commonly-employed frameworks fail to adapt to the new patterns on the cross-layer cloud. To solve this issue, we design a new computer task scheduling framework for multiple IoT services in cross-layer cloud computing systems. Specifically, we first analyze the features of the cross-layer cloud and computer tasks. Then, we design the scheduling framework based on the analysis and present detailed models to illustrate the procedures of using the framework. With the proposed framework, the IoT services deployed in cross-layer cloud computing systems can dynamically select suitable algorithms and use resources more effectively to finish computer tasks with different objectives. Finally, the algorithms are given based on the framework, and extensive experiments are also given to validate its effectiveness, as well as its superiority.}, } @article {pmid29772840, year = {2018}, author = {Xu, Q and Tan, C and Fan, Z and Zhu, W and Xiao, Y and Cheng, F}, title = {Secure Data Access Control for Fog Computing Based on Multi-Authority Attribute-Based Signcryption with Computation Outsourcing and Attribute Revocation.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {5}, pages = {}, pmid = {29772840}, issn = {1424-8220}, abstract = {Nowadays, fog computing provides computation, storage, and application services to end users in the Internet of Things. One of the major concerns in fog computing systems is how fine-grained access control can be imposed. As a logical combination of attribute-based encryption and attribute-based signature, Attribute-based Signcryption (ABSC) can provide confidentiality and anonymous authentication for sensitive data and is more efficient than traditional "encrypt-then-sign" or "sign-then-encrypt" strategy. Thus, ABSC is suitable for fine-grained access control in a semi-trusted cloud environment and is gaining more and more attention recently. However, in many existing ABSC systems, the computation cost required for the end users in signcryption and designcryption is linear with the complexity of signing and encryption access policy. Moreover, only a single authority that is responsible for attribute management and key generation exists in the previous proposed ABSC schemes, whereas in reality, mostly, different authorities monitor different attributes of the user. In this paper, we propose OMDAC-ABSC, a novel data access control scheme based on Ciphertext-Policy ABSC, to provide data confidentiality, fine-grained control, and anonymous authentication in a multi-authority fog computing system. The signcryption and designcryption overhead for the user is significantly reduced by outsourcing the undesirable computation operations to fog nodes. The proposed scheme is proven to be secure in the standard model and can provide attribute revocation and public verifiability. The security analysis, asymptotic complexity comparison, and implementation results indicate that our construction can balance the security goals with practical efficiency in computation.}, } @article {pmid29762754, year = {2018}, author = {Sun, X and Gao, J and Jin, P and Eng, C and Burchard, EG and Beaty, TH and Ruczinski, I and Mathias, RA and Barnes, K and Wang, F and Qin, ZS and , }, title = {Optimized distributed systems achieve significant performance improvement on sorted merging of massive VCF files.}, journal = {GigaScience}, volume = {7}, number = {6}, pages = {}, pmid = {29762754}, issn = {2047-217X}, support = {P01 NS097206/NS/NINDS NIH HHS/United States ; R01 HL128439/HL/NHLBI NIH HHS/United States ; U54 NS091859/NS/NINDS NIH HHS/United States ; R01 HL135156/HL/NHLBI NIH HHS/United States ; P60 MD006902/MD/NIMHD NIH HHS/United States ; R01 HL104608/HL/NHLBI NIH HHS/United States ; R01 MD010443/MD/NIMHD NIH HHS/United States ; R21 ES024844/ES/NIEHS NIH HHS/United States ; RL5 GM118984/GM/NIGMS NIH HHS/United States ; R01 HL117004/HL/NHLBI NIH HHS/United States ; R01 NS051630/NS/NINDS NIH HHS/United States ; U54 GM115428/GM/NIGMS NIH HHS/United States ; R01 ES015794/ES/NIEHS NIH HHS/United States ; }, mesh = {Cluster Analysis ; *Computer Communication Networks ; Humans ; *Information Storage and Retrieval ; *Software ; Workflow ; }, abstract = {BACKGROUND: Sorted merging of genomic data is a common data operation necessary in many sequencing-based studies. It involves sorting and merging genomic data from different subjects by their genomic locations. In particular, merging a large number of variant call format (VCF) files is frequently required in large-scale whole-genome sequencing or whole-exome sequencing projects. Traditional single-machine based methods become increasingly inefficient when processing large numbers of files due to the excessive computation time and Input/Output bottleneck. Distributed systems and more recent cloud-based systems offer an attractive solution. However, carefully designed and optimized workflow patterns and execution plans (schemas) are required to take full advantage of the increased computing power while overcoming bottlenecks to achieve high performance.

FINDINGS: In this study, we custom-design optimized schemas for three Apache big data platforms, Hadoop (MapReduce), HBase, and Spark, to perform sorted merging of a large number of VCF files. These schemas all adopt the divide-and-conquer strategy to split the merging job into sequential phases/stages consisting of subtasks that are conquered in an ordered, parallel, and bottleneck-free way. In two illustrating examples, we test the performance of our schemas on merging multiple VCF files into either a single TPED or a single VCF file, which are benchmarked with the traditional single/parallel multiway-merge methods, message passing interface (MPI)-based high-performance computing (HPC) implementation, and the popular VCFTools.

CONCLUSIONS: Our experiments suggest all three schemas either deliver a significant improvement in efficiency or render much better strong and weak scalabilities over traditional methods. Our findings provide generalized scalable schemas for performing sorted merging on genetics and genomics data using these Apache distributed systems.}, } @article {pmid31984322, year = {2018}, author = {Amland, RC and Sutariya, BB}, title = {An investigation of sepsis surveillance and emergency treatment on patient mortality outcomes: An observational cohort study.}, journal = {JAMIA open}, volume = {1}, number = {1}, pages = {107-114}, pmid = {31984322}, issn = {2574-2531}, abstract = {OBJECTIVE: To determine the prevalence of initiating the sepsis 3-h bundle of care and estimate effects of bundle completion on risk-adjusted mortality among emergency department (ED) patients screened-in by electronic surveillance.

MATERIALS AND METHODS: This was a multiple center observational cohort study conducted in 2016. The study population was comprised of patients screened-in by St. John Sepsis Surveillance Agent within 4 h of ED arrival, had a sepsis bundle initiated, and admitted to hospital. We built multivariable logistic regression models to estimate impact of a 3-h bundle completed within 3 h of arrival on mortality outcomes.

RESULTS: Approximately 3% ED patients were screened-in by electronic surveillance within 4 h of arrival and admitted to hospital. Nearly 7 in 10 (69%) patients had a bundle initiated, with most bundles completed within 3 h of arrival. The fully-adjusted risk model achieved good discrimination on mortality outcomes [area under the receiver operating characteristic 0.82, 95% confidence interval (CI) 0.79-0.85] and estimated 34% reduced mortality risk among patients with a bundle completed within 3 h of arrival compared to non-completers.

DISCUSSION: The sepsis bundle is an effective intervention for many vulnerable patients, and likely to be completed within 3 h after arrival when electronic surveillance with reliable alert notifications are integrated into clinical workflow. Beginning at triage, the platform and sepsis program enables identification and management of patients with greater precision, and increases the odds of good outcomes.

CONCLUSION: Sepsis surveillance and clinical decision support accelerate accurate recognition and stratification of patients, and facilitate timely delivery of health care.}, } @article {pmid29757988, year = {2018}, author = {Venčkauskas, A and Morkevicius, N and Bagdonas, K and Damaševičius, R and Maskeliūnas, R}, title = {A Lightweight Protocol for Secure Video Streaming.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {5}, pages = {}, pmid = {29757988}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) introduces many new challenges which cannot be solved using traditional cloud and host computing models. A new architecture known as fog computing is emerging to address these technological and security gaps. Traditional security paradigms focused on providing perimeter-based protections and client/server point to point protocols (e.g., Transport Layer Security (TLS)) are no longer the best choices for addressing new security challenges in fog computing end devices, where energy and computational resources are limited. In this paper, we present a lightweight secure streaming protocol for the fog computing "Fog Node-End Device" layer. This protocol is lightweight, connectionless, supports broadcast and multicast operations, and is able to provide data source authentication, data integrity, and confidentiality. The protocol is based on simple and energy efficient cryptographic methods, such as Hash Message Authentication Codes (HMAC) and symmetrical ciphers, and uses modified User Datagram Protocol (UDP) packets to embed authentication data into streaming data. Data redundancy could be added to improve reliability in lossy networks. The experimental results summarized in this paper confirm that the proposed method efficiently uses energy and computational resources and at the same time provides security properties on par with the Datagram TLS (DTLS) standard.}, } @article {pmid29757227, year = {2018}, author = {Lavassani, M and Forsström, S and Jennehag, U and Zhang, T}, title = {Combining Fog Computing with Sensor Mote Machine Learning for Industrial IoT.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {5}, pages = {}, pmid = {29757227}, issn = {1424-8220}, abstract = {Digitalization is a global trend becoming ever more important to our connected and sustainable society. This trend also affects industry where the Industrial Internet of Things is an important part, and there is a need to conserve spectrum as well as energy when communicating data to a fog or cloud back-end system. In this paper we investigate the benefits of fog computing by proposing a novel distributed learning model on the sensor device and simulating the data stream in the fog, instead of transmitting all raw sensor values to the cloud back-end. To save energy and to communicate as few packets as possible, the updated parameters of the learned model at the sensor device are communicated in longer time intervals to a fog computing system. The proposed framework is implemented and tested in a real world testbed in order to make quantitative measurements and evaluate the system. Our results show that the proposed model can achieve a 98% decrease in the number of packets sent over the wireless link, and the fog node can still simulate the data stream with an acceptable accuracy of 97%. We also observe an end-to-end delay of 180 ms in our proposed three-layer framework. Hence, the framework shows that a combination of fog and cloud computing with a distributed data modeling at the sensor device for wireless sensor networks can be beneficial for Industrial Internet of Things applications.}, } @article {pmid29751588, year = {2018}, author = {Rao, NSV and Ma, CYT and Hausken, K and He, F and Yau, DKY and Zhuang, J}, title = {Defense Strategies for Asymmetric Networked Systems with Discrete Components.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {5}, pages = {}, pmid = {29751588}, issn = {1424-8220}, abstract = {We consider infrastructures consisting of a network of systems, each composed of discrete components. The network provides the vital connectivity between the systems and hence plays a critical, asymmetric role in the infrastructure operations. The individual components of the systems can be attacked by cyber and physical means and can be appropriately reinforced to withstand these attacks. We formulate the problem of ensuring the infrastructure performance as a game between an attacker and a provider, who choose the numbers of the components of the systems and network to attack and reinforce, respectively. The costs and benefits of attacks and reinforcements are characterized using the sum-form, product-form and composite utility functions, each composed of a survival probability term and a component cost term. We present a two-level characterization of the correlations within the infrastructure: (i) the aggregate failure correlation function specifies the infrastructure failure probability given the failure of an individual system or network, and (ii) the survival probabilities of the systems and network satisfy first-order differential conditions that capture the component-level correlations using multiplier functions. We derive Nash equilibrium conditions that provide expressions for individual system survival probabilities and also the expected infrastructure capacity specified by the total number of operational components. We apply these results to derive and analyze defense strategies for distributed cloud computing infrastructures using cyber-physical models.}, } @article {pmid29741956, year = {2018}, author = {Zhao, Y and Wang, X and Tang, H}, title = {A Secure Alignment Algorithm for Mapping Short Reads to Human Genome.}, journal = {Journal of computational biology : a journal of computational molecular cell biology}, volume = {25}, number = {6}, pages = {529-540}, pmid = {29741956}, issn = {1557-8666}, support = {R01 HG007078/HG/NHGRI NIH HHS/United States ; U01 EB023685/EB/NIBIB NIH HHS/United States ; }, mesh = {*Algorithms ; Cloud Computing/standards ; *Computer Security/standards ; Data Mining/methods ; *Genome, Human ; Genomics ; High-Throughput Nucleotide Sequencing ; Humans ; }, abstract = {The elastic and inexpensive computing resources such as clouds have been recognized as a useful solution to analyzing massive human genomic data (e.g., acquired by using next-generation sequencers) in biomedical researches. However, outsourcing human genome computation to public or commercial clouds was hindered due to privacy concerns: even a small number of human genome sequences contain sufficient information for identifying the donor of the genomic data. This issue cannot be directly addressed by existing security and cryptographic techniques (such as homomorphic encryption), because they are too heavyweight to carry out practical genome computation tasks on massive data. In this article, we present a secure algorithm to accomplish the read mapping, one of the most basic tasks in human genomic data analysis based on a hybrid cloud computing model. Comparing with the existing approaches, our algorithm delegates most computation to the public cloud, while only performing encryption and decryption on the private cloud, and thus makes the maximum use of the computing resource of the public cloud. Furthermore, our algorithm reports similar results as the nonsecure read mapping algorithms, including the alignment between reads and the reference genome, which can be directly used in the downstream analysis such as the inference of genomic variations. We implemented the algorithm in C++ and Python on a hybrid cloud system, in which the public cloud uses an Apache Spark system.}, } @article {pmid29726914, year = {2018}, author = {Mora-Márquez, F and Vázquez-Poletti, JL and López de Heredia, U}, title = {NGScloud: RNA-seq analysis of non-model species using cloud computing.}, journal = {Bioinformatics (Oxford, England)}, volume = {34}, number = {19}, pages = {3405-3407}, doi = {10.1093/bioinformatics/bty363}, pmid = {29726914}, issn = {1367-4811}, mesh = {*Cloud Computing ; Computational Biology ; RNA ; *Sequence Analysis, RNA ; *Software ; }, abstract = {SUMMARY: RNA-seq analysis usually requires large computing infrastructures. NGScloud is a bioinformatic system developed to analyze RNA-seq data using the cloud computing services of Amazon that permit the access to ad hoc computing infrastructure scaled according to the complexity of the experiment, so its costs and times can be optimized. The application provides a user-friendly front-end to operate Amazon's hardware resources, and to control a workflow of RNA-seq analysis oriented to non-model species, incorporating the cluster concept, which allows parallel runs of common RNA-seq analysis programs in several virtual machines for faster analysis.

NGScloud is freely available at https://github.com/GGFHF/NGScloud/. A manual detailing installation and how-to-use instructions is available with the distribution.}, } @article {pmid29719309, year = {2018}, author = {Ahmadi, M and Aslani, N}, title = {Capabilities and Advantages of Cloud Computing in the Implementation of Electronic Health Record.}, journal = {Acta informatica medica : AIM : journal of the Society for Medical Informatics of Bosnia & Herzegovina : casopis Drustva za medicinsku informatiku BiH}, volume = {26}, number = {1}, pages = {24-28}, pmid = {29719309}, issn = {0353-8109}, abstract = {BACKGROUND: With regard to the high cost of the Electronic Health Record (EHR), in recent years the use of new technologies, in particular cloud computing, has increased. The purpose of this study was to review systematically the studies conducted in the field of cloud computing.

METHODS: The present study was a systematic review conducted in 2017. Search was performed in the Scopus, Web of Sciences, IEEE, Pub Med and Google Scholar databases by combination keywords. From the 431 article that selected at the first, after applying the inclusion and exclusion criteria, 27 articles were selected for surveyed. Data gathering was done by a self-made check list and was analyzed by content analysis method.

RESULTS: The finding of this study showed that cloud computing is a very widespread technology. It includes domains such as cost, security and privacy, scalability, mutual performance and interoperability, implementation platform and independence of Cloud Computing, ability to search and exploration, reducing errors and improving the quality, structure, flexibility and sharing ability. It will be effective for electronic health record.

CONCLUSION: According to the findings of the present study, higher capabilities of cloud computing are useful in implementing EHR in a variety of contexts. It also provides wide opportunities for managers, analysts and providers of health information systems. Considering the advantages and domains of cloud computing in the establishment of HER, it is recommended to use this technology.}, } @article {pmid29702607, year = {2018}, author = {Lin, YH and Hu, YC}, title = {Residential Consumer-Centric Demand-Side Management Based on Energy Disaggregation-Piloting Constrained Swarm Intelligence: Towards Edge Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {5}, pages = {}, pmid = {29702607}, issn = {1424-8220}, abstract = {The emergence of smart Internet of Things (IoT) devices has highly favored the realization of smart homes in a down-stream sector of a smart grid. The underlying objective of Demand Response (DR) schemes is to actively engage customers to modify their energy consumption on domestic appliances in response to pricing signals. Domestic appliance scheduling is widely accepted as an effective mechanism to manage domestic energy consumption intelligently. Besides, to residential customers for DR implementation, maintaining a balance between energy consumption cost and users’ comfort satisfaction is a challenge. Hence, in this paper, a constrained Particle Swarm Optimization (PSO)-based residential consumer-centric load-scheduling method is proposed. The method can be further featured with edge computing. In contrast with cloud computing, edge computing&mdash;a method of optimizing cloud computing technologies by driving computing capabilities at the IoT edge of the Internet as one of the emerging trends in engineering technology&mdash;addresses bandwidth-intensive contents and latency-sensitive applications required among sensors and central data centers through data analytics at or near the source of data. A non-intrusive load-monitoring technique proposed previously is utilized to automatic determination of physical characteristics of power-intensive home appliances from users’ life patterns. The swarm intelligence, constrained PSO, is used to minimize the energy consumption cost while considering users’ comfort satisfaction for DR implementation. The residential consumer-centric load-scheduling method proposed in this paper is evaluated under real-time pricing with inclining block rates and is demonstrated in a case study. The experimentation reported in this paper shows the proposed residential consumer-centric load-scheduling method can re-shape loads by home appliances in response to DR signals. Moreover, a phenomenal reduction in peak power consumption is achieved by 13.97%.}, } @article {pmid29677991, year = {2018}, author = {Flynn, AJ and Boisvert, P and Gittlen, N and Gross, C and Iott, B and Lagoze, C and Meng, G and Friedman, CP}, title = {Architecture and Initial Development of a Knowledge-as-a-Service Activator for Computable Knowledge Objects for Health.}, journal = {Studies in health technology and informatics}, volume = {247}, number = {}, pages = {401-405}, pmid = {29677991}, issn = {1879-8365}, mesh = {*Cloud Computing ; Humans ; *Internet ; Knowledge Bases ; }, abstract = {The Knowledge Grid (KGrid) is a research and development program toward infrastructure capable of greatly decreasing latency between the publication of new biomedical knowledge and its widespread uptake into practice. KGrid comprises digital knowledge objects, an online Library to store them, and an Activator that uses them to provide Knowledge-as-a-Service (KaaS). KGrid's Activator enables computable biomedical knowledge, held in knowledge objects, to be rapidly deployed at Internet-scale in cloud computing environments for improved health. Here we present the Activator, its system architecture and primary functions.}, } @article {pmid29671336, year = {2018}, author = {Verhoeven, JTP and Canuti, M and Munro, HJ and Dufour, SC and Lang, AS}, title = {ViDiT-CACTUS: an inexpensive and versatile library preparation and sequence analysis method for virus discovery and other microbiology applications.}, journal = {Canadian journal of microbiology}, volume = {64}, number = {10}, pages = {761-773}, doi = {10.1139/cjm-2018-0097}, pmid = {29671336}, issn = {1480-3275}, mesh = {*Computational Biology ; Gene Library ; Genome, Viral ; *High-Throughput Nucleotide Sequencing ; Microbiota ; Polymerase Chain Reaction/*methods ; Viruses/genetics/*isolation & purification ; }, abstract = {High-throughput sequencing (HTS) technologies are becoming increasingly important within microbiology research, but aspects of library preparation, such as high cost per sample or strict input requirements, make HTS difficult to implement in some niche applications and for research groups on a budget. To answer these necessities, we developed ViDiT, a customizable, PCR-based, extremely low-cost (less than US$5 per sample), and versatile library preparation method, and CACTUS, an analysis pipeline designed to rely on cloud computing power to generate high-quality data from ViDiT-based experiments without the need of expensive servers. We demonstrate here the versatility and utility of these methods within three fields of microbiology: virus discovery, amplicon-based viral genome sequencing, and microbiome profiling. ViDiT-CACTUS allowed the identification of viral fragments from 25 different viral families from 36 oropharyngeal-cloacal swabs collected from wild birds, the sequencing of three almost complete genomes of avian influenza A viruses (>90% coverage), and the characterization and functional profiling of the complete microbial diversity (bacteria, archaea, viruses) within a deep-sea carnivorous sponge. ViDiT-CACTUS demonstrated its validity in a wide range of microbiology applications, and its simplicity and modularity make it easily implementable in any molecular biology laboratory, towards various research goals.}, } @article {pmid29661796, year = {2018}, author = {Shrestha, S and Sengupta, PP}, title = {Imaging Heart Failure With Artificial Intelligence: Improving the Realism of Synthetic Wisdom.}, journal = {Circulation. Cardiovascular imaging}, volume = {11}, number = {4}, pages = {e007723}, doi = {10.1161/CIRCIMAGING.118.007723}, pmid = {29661796}, issn = {1942-0080}, mesh = {Artificial Intelligence ; *Heart Failure ; Humans ; Machine Learning ; Stroke Volume ; *Ventricular Function, Left ; }, } @article {pmid29652810, year = {2018}, author = {Guo, Y and Liu, F and Cai, Z and Xiao, N and Zhao, Z}, title = {Edge-Based Efficient Search over Encrypted Data Mobile Cloud Storage.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {4}, pages = {}, pmid = {29652810}, issn = {1424-8220}, abstract = {Smart sensor-equipped mobile devices sense, collect, and process data generated by the edge network to achieve intelligent control, but such mobile devices usually have limited storage and computing resources. Mobile cloud storage provides a promising solution owing to its rich storage resources, great accessibility, and low cost. But it also brings a risk of information leakage. The encryption of sensitive data is the basic step to resist the risk. However, deploying a high complexity encryption and decryption algorithm on mobile devices will greatly increase the burden of terminal operation and the difficulty to implement the necessary privacy protection algorithm. In this paper, we propose ENSURE (EfficieNt and SecURE), an efficient and secure encrypted search architecture over mobile cloud storage. ENSURE is inspired by edge computing. It allows mobile devices to offload the computation intensive task onto the edge server to achieve a high efficiency. Besides, to protect data security, it reduces the information acquisition of untrusted cloud by hiding the relevance between query keyword and search results from the cloud. Experiments on a real data set show that ENSURE reduces the computation time by 15% to 49% and saves the energy consumption by 38% to 69% per query.}, } @article {pmid29649172, year = {2018}, author = {Diaconita, V and Bologa, AR and Bologa, R}, title = {Hadoop Oriented Smart Cities Architecture.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {4}, pages = {}, pmid = {29649172}, issn = {1424-8220}, abstract = {A smart city implies a consistent use of technology for the benefit of the community. As the city develops over time, components and subsystems such as smart grids, smart water management, smart traffic and transportation systems, smart waste management systems, smart security systems, or e-governance are added. These components ingest and generate a multitude of structured, semi-structured or unstructured data that may be processed using a variety of algorithms in batches, micro batches or in real-time. The ICT architecture must be able to handle the increased storage and processing needs. When vertical scaling is no longer a viable solution, Hadoop can offer efficient linear horizontal scaling, solving storage, processing, and data analyses problems in many ways. This enables architects and developers to choose a stack according to their needs and skill-levels. In this paper, we propose a Hadoop-based architectural stack that can provide the ICT backbone for efficiently managing a smart city. On the one hand, Hadoop, together with Spark and the plethora of NoSQL databases and accompanying Apache projects, is a mature ecosystem. This is one of the reasons why it is an attractive option for a Smart City architecture. On the other hand, it is also very dynamic; things can change very quickly, and many new frameworks, products and options continue to emerge as others decline. To construct an optimized, modern architecture, we discuss and compare various products and engines based on a process that takes into consideration how the products perform and scale, as well as the reusability of the code, innovations, features, and support and interest in online communities.}, } @article {pmid29643364, year = {2018}, author = {Durham, TJ and Libbrecht, MW and Howbert, JJ and Bilmes, J and Noble, WS}, title = {PREDICTD PaRallel Epigenomics Data Imputation with Cloud-based Tensor Decomposition.}, journal = {Nature communications}, volume = {9}, number = {1}, pages = {1402}, pmid = {29643364}, issn = {2041-1723}, support = {R01 ES024917/ES/NIEHS NIH HHS/United States ; U41 HG007000/HG/NHGRI NIH HHS/United States ; U41 HG007000/NH/NIH HHS/United States ; R01 ES024917/NH/NIH HHS/United States ; }, mesh = {Chromatin/chemistry/metabolism ; Cloud Computing/*statistics & numerical data ; Datasets as Topic ; *Epigenesis, Genetic ; Epigenomics/statistics & numerical data ; *Genome, Human ; Histones/*genetics/metabolism ; Humans ; *Software ; }, abstract = {The Encyclopedia of DNA Elements (ENCODE) and the Roadmap Epigenomics Project seek to characterize the epigenome in diverse cell types using assays that identify, for example, genomic regions with modified histones or accessible chromatin. These efforts have produced thousands of datasets but cannot possibly measure each epigenomic factor in all cell types. To address this, we present a method, PaRallel Epigenomics Data Imputation with Cloud-based Tensor Decomposition (PREDICTD), to computationally impute missing experiments. PREDICTD leverages an elegant model called "tensor decomposition" to impute many experiments simultaneously. Compared with the current state-of-the-art method, ChromImpute, PREDICTD produces lower overall mean squared error, and combining the two methods yields further improvement. We show that PREDICTD data captures enhancer activity at noncoding human accelerated regions. PREDICTD provides reference imputed data and open-source software for investigating new cell types, and demonstrates the utility of tensor decomposition and cloud computing, both promising technologies for bioinformatics.}, } @article {pmid29618454, year = {2018}, author = {Huang, YN and Peng, XC and Ma, S and Yu, H and Jin, YB and Zheng, J and Fu, GH}, title = {Development of Whole Slide Imaging on Smartphones and Evaluation With ThinPrep Cytology Test Samples: Follow-Up Study.}, journal = {JMIR mHealth and uHealth}, volume = {6}, number = {4}, pages = {e82}, pmid = {29618454}, issn = {2291-5222}, abstract = {BACKGROUND: The smartphone-based whole slide imaging (WSI) system represents a low-cost and effective alternative to automatic scanners for telepathology. In a previous study, the development of one such solution, named scalable whole slide imaging (sWSI), was presented and analyzed. A clinical evaluation of its iOS version with 100 frozen section samples verified the diagnosis-readiness of the produced virtual slides.

OBJECTIVE: The first aim of this study was to delve into the quantifying issues encountered in the development of an Android version. It should also provide insights into future high-resolution real-time feedback medical imaging apps on Android and invoke the awareness of smartphone manufacturers for collaboration. The second aim of this study was to further verify the clinical value of sWSI with cytology samples. This type is different from the frozen section samples in that they require finer detail on the cellular level.

METHODS: During sWSI development on Android, it was discovered that many models do not support uncompressed camera pixel data with sufficient resolution and full field of view. The proportion of models supporting the optimal format was estimated in a test on 200 mainstream Android models. Other factors, including slower processing speed and camera preview freezing, also led to inferior performance of sWSI on Android compared with the iOS version. The processing speed was mostly determined by the central processing unit frequency in theory, and the relationship was investigated in the 200-model simulation experiment with physical devices. The camera preview freezing was caused by the lag between triggering photo capture and resuming preview. In the clinical evaluation, 100 ThinPrep cytology test samples covering 6 diseases were scanned with sWSI and compared against the ground truth of optical microscopy.

RESULTS: Among the tested Android models, only 3.0% (6/200) provided an optimal data format, meeting all criteria of quality and efficiency. The image-processing speed demonstrated a positive relationship with the central processing unit frequency but to a smaller degree than expected and was highly model-dependent. The virtual slides produced by sWSI on Android and iOS of ThinPrep cytology test samples achieved similar high quality. Using optical microscopy as the ground truth, pathologists made a correct diagnosis on 87.5% (175/200) of the cases with sWSI virtual slides. Depending on the sWSI version and the pathologist in charge, the kappa value varied between .70 and .82. All participating pathologists considered the quality of the sWSI virtual slides in the experiment to be adequate for routine usage.

CONCLUSIONS: Limited by hardware and operating system support, the performance of sWSI on mainstream Android smartphones did not fully match the iOS version. However, in practice, this difference was not significant, and both were adequate for digitizing most of the sample types for telepathology consultation.}, } @article {pmid29596416, year = {2018}, author = {Cole, BS and Moore, JH}, title = {Eleven quick tips for architecting biomedical informatics workflows with cloud computing.}, journal = {PLoS computational biology}, volume = {14}, number = {3}, pages = {e1005994}, pmid = {29596416}, issn = {1553-7358}, support = {P30 ES013508/ES/NIEHS NIH HHS/United States ; AI116794/NH/NIH HHS/United States ; LM010098/NH/NIH HHS/United States ; R01 LM010098/LM/NLM NIH HHS/United States ; R01 AI116794/AI/NIAID NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computational Biology/*methods ; Computer Security ; Humans ; Workflow ; }, abstract = {Cloud computing has revolutionized the development and operations of hardware and software across diverse technological arenas, yet academic biomedical research has lagged behind despite the numerous and weighty advantages that cloud computing offers. Biomedical researchers who embrace cloud computing can reap rewards in cost reduction, decreased development and maintenance workload, increased reproducibility, ease of sharing data and software, enhanced security, horizontal and vertical scalability, high availability, a thriving technology partner ecosystem, and much more. Despite these advantages that cloud-based workflows offer, the majority of scientific software developed in academia does not utilize cloud computing and must be migrated to the cloud by the user. In this article, we present 11 quick tips for architecting biomedical informatics workflows on compute clouds, distilling knowledge gained from experience developing, operating, maintaining, and distributing software and virtualized appliances on the world's largest cloud. Researchers who follow these tips stand to benefit immediately by migrating their workflows to cloud computing and embracing the paradigm of abstraction.}, } @article {pmid29550393, year = {2018}, author = {Hasan, MZ and Mahdi, MSR and Sadat, MN and Mohammed, N}, title = {Secure count query on encrypted genomic data.}, journal = {Journal of biomedical informatics}, volume = {81}, number = {}, pages = {41-52}, doi = {10.1016/j.jbi.2018.03.003}, pmid = {29550393}, issn = {1532-0480}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; Confidentiality ; False Positive Reactions ; *Genome, Human ; Genomics/*methods ; Genotype ; Health Insurance Portability and Accountability Act ; Humans ; Information Dissemination ; Medical Informatics/instrumentation/*methods ; Outsourced Services ; Phenotype ; Polymorphism, Single Nucleotide ; Privacy ; Programming Languages ; Records ; United States ; }, abstract = {Human genomic information can yield more effective healthcare by guiding medical decisions. Therefore, genomics research is gaining popularity as it can identify potential correlations between a disease and a certain gene, which improves the safety and efficacy of drug treatment and can also develop more effective prevention strategies [1]. To reduce the sampling error and to increase the statistical accuracy of this type of research projects, data from different sources need to be brought together since a single organization does not necessarily possess required amount of data. In this case, data sharing among multiple organizations must satisfy strict policies (for instance, HIPAA and PIPEDA) that have been enforced to regulate privacy-sensitive data sharing. Storage and computation on the shared data can be outsourced to a third party cloud service provider, equipped with enormous storage and computation resources. However, outsourcing data to a third party is associated with a potential risk of privacy violation of the participants, whose genomic sequence or clinical profile is used in these studies. In this article, we propose a method for secure sharing and computation on genomic data in a semi-honest cloud server. In particular, there are two main contributions. Firstly, the proposed method can handle biomedical data containing both genotype and phenotype. Secondly, our proposed index tree scheme reduces the computational overhead significantly for executing secure count query operation. In our proposed method, the confidentiality of shared data is ensured through encryption, while making the entire computation process efficient and scalable for cutting-edge biomedical applications. We evaluated our proposed method in terms of efficiency on a database of Single-Nucleotide Polymorphism (SNP) sequences, and experimental results demonstrate that the execution time for a query of 50 SNPs in a database of 50,000 records is approximately 5 s, where each record contains 500 SNPs. And, it requires 69.7 s to execute the query on the same database that also includes phenotypes.}, } @article {pmid29547983, year = {2018}, author = {Wolford, BN and Willer, CJ and Surakka, I}, title = {Electronic health records: the next wave of complex disease genetics.}, journal = {Human molecular genetics}, volume = {27}, number = {R1}, pages = {R14-R21}, pmid = {29547983}, issn = {1460-2083}, support = {R01 HL127564/HL/NHLBI NIH HHS/United States ; R35 HL135824/HL/NHLBI NIH HHS/United States ; T32 HG000040/HG/NHGRI NIH HHS/United States ; }, mesh = {Cardiovascular Diseases/*genetics/pathology ; Cloud Computing ; Databases, Genetic/trends ; *Electronic Health Records ; Genetic Diseases, Inborn/*genetics/pathology ; Genetics, Population/trends ; Genome-Wide Association Study/*trends ; Genotype ; Humans ; Polymorphism, Single Nucleotide/genetics ; Quantitative Trait Loci/genetics ; }, abstract = {The combination of electronic health records (EHRs) with genetic data has ushered in the next wave of complex disease genetics. Population-based biobanks and other large cohorts provide sufficient sample sizes to identify novel genetic associations across the hundreds to thousands of phenotypes gleaned from EHRs. In this review, we summarize the current state of these EHR-linked biobanks, explore ongoing methods development in the field and highlight recent discoveries of genetic associations. We enumerate the many existing biobanks with EHRs linked to genetic data, many of which are available to researchers via application and contain sample sizes >50 000. We also discuss the computational and statistical considerations for analysis of such large datasets including mixed models, phenotype curation and cloud computing. Finally, we demonstrate how genome-wide association studies and phenome-wide association studies have identified novel genetic findings for complex diseases, specifically cardiometabolic traits. As more researchers employ innovative hypotheses and analysis approaches to study EHR-linked biobanks, we anticipate a richer understanding of the genetic etiology of complex diseases.}, } @article {pmid29543809, year = {2018}, author = {Abdollahi, N and Albani, A and Anthony, E and Baud, A and Cardon, M and Clerc, R and Czernecki, D and Conte, R and David, L and Delaune, A and Djerroud, S and Fourgoux, P and Guiglielmoni, N and Laurentie, J and Lehmann, N and Lochard, C and Montagne, R and Myrodia, V and Opuu, V and Parey, E and Polit, L and Privé, S and Quignot, C and Ruiz-Cuevas, M and Sissoko, M and Sompairac, N and Vallerix, A and Verrecchia, V and Delarue, M and Guérois, R and Ponty, Y and Sacquin-Mora, S and Carbone, A and Froidevaux, C and Le Crom, S and Lespinet, O and Weigt, M and Abboud, S and Bernardes, J and Bouvier, G and Dequeker, C and Ferré, A and Fuchs, P and Lelandais, G and Poulain, P and Richard, H and Schweke, H and Laine, E and Lopes, A}, title = {Meet-U: Educating through research immersion.}, journal = {PLoS computational biology}, volume = {14}, number = {3}, pages = {e1005992}, pmid = {29543809}, issn = {1553-7358}, mesh = {Computational Biology/*education/*methods ; Humans ; Research/*education ; Research Design ; Students ; Universities ; }, abstract = {We present a new educational initiative called Meet-U that aims to train students for collaborative work in computational biology and to bridge the gap between education and research. Meet-U mimics the setup of collaborative research projects and takes advantage of the most popular tools for collaborative work and of cloud computing. Students are grouped in teams of 4-5 people and have to realize a project from A to Z that answers a challenging question in biology. Meet-U promotes "coopetition," as the students collaborate within and across the teams and are also in competition with each other to develop the best final product. Meet-U fosters interactions between different actors of education and research through the organization of a meeting day, open to everyone, where the students present their work to a jury of researchers and jury members give research seminars. This very unique combination of education and research is strongly motivating for the students and provides a formidable opportunity for a scientific community to unite and increase its visibility. We report on our experience with Meet-U in two French universities with master's students in bioinformatics and modeling, with protein-protein docking as the subject of the course. Meet-U is easy to implement and can be straightforwardly transferred to other fields and/or universities. All the information and data are available at www.meet-u.org.}, } @article {pmid29543729, year = {2018}, author = {González-Briones, A and Prieto, J and De La Prieta, F and Herrera-Viedma, E and Corchado, JM}, title = {Energy Optimization Using a Case-Based Reasoning Strategy.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {3}, pages = {}, pmid = {29543729}, issn = {1424-8220}, abstract = {At present, the domotization of homes and public buildings is becoming increasingly popular. Domotization is most commonly applied to the field of energy management, since it gives the possibility of managing the consumption of the devices connected to the electric network, the way in which the users interact with these devices, as well as other external factors that influence consumption. In buildings, Heating, Ventilation and Air Conditioning (HVAC) systems have the highest consumption rates. The systems proposed so far have not succeeded in optimizing the energy consumption associated with a HVAC system because they do not monitor all the variables involved in electricity consumption. For this reason, this article presents an agent approach that benefits from the advantages provided by a Multi-Agent architecture (MAS) deployed in a Cloud environment with a wireless sensor network (WSN) in order to achieve energy savings. The agents of the MAS learn social behavior thanks to the collection of data and the use of an artificial neural network (ANN). The proposed system has been assessed in an office building achieving an average energy savings of 41% in the experimental group offices.}, } @article {pmid29533240, year = {2018}, author = {Krissinel, E and Uski, V and Lebedev, A and Winn, M and Ballard, C}, title = {Distributed computing for macromolecular crystallography.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {74}, number = {Pt 2}, pages = {143-151}, pmid = {29533240}, issn = {2059-7983}, support = {BB/L007037/1//BBSRC/ ; H2020-EINFRA-2015-1-675858//European Commission, Horizon 2020 Framework Programme/ ; }, mesh = {Automation ; Cloud Computing ; *Computer Communication Networks ; Crystallography, X-Ray/*methods ; *Electronic Data Processing ; Macromolecular Substances/*chemistry ; Software ; }, abstract = {Modern crystallographic computing is characterized by the growing role of automated structure-solution pipelines, which represent complex expert systems utilizing a number of program components, decision makers and databases. They also require considerable computational resources and regular database maintenance, which is increasingly more difficult to provide at the level of individual desktop-based CCP4 setups. On the other hand, there is a significant growth in data processed in the field, which brings up the issue of centralized facilities for keeping both the data collected and structure-solution projects. The paradigm of distributed computing and data management offers a convenient approach to tackling these problems, which has become more attractive in recent years owing to the popularity of mobile devices such as tablets and ultra-portable laptops. In this article, an overview is given of developments by CCP4 aimed at bringing distributed crystallographic computations to a wide crystallographic community.}, } @article {pmid29526864, year = {2018}, author = {Rajan, JP and Rajan, SE}, title = {An Internet of Things based physiological signal monitoring and receiving system for virtual enhanced health care network.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {26}, number = {2}, pages = {379-385}, doi = {10.3233/THC-171173}, pmid = {29526864}, issn = {1878-7401}, mesh = {Blood Pressure ; Body Temperature ; Computer Communication Networks ; Computer Security ; Humans ; *Internet ; Monitoring, Ambulatory/*methods ; Oxygen/blood ; Pulse ; Remote Sensing Technology ; Wearable Electronic Devices ; *Wireless Technology ; }, abstract = {BACKGROUND: Wireless physiological signal monitoring system designing with secured data communication in the health care system is an important and dynamic process.

OBJECTIVE: We propose a signal monitoring system using NI myRIO connected with the wireless body sensor network through multi-channel signal acquisition method. Based on the server side validation of the signal, the data connected to the local server is updated in the cloud. The Internet of Things (IoT) architecture is used to get the mobility and fast access of patient data to healthcare service providers.

METHODS: This research work proposes a novel architecture for wireless physiological signal monitoring system using ubiquitous healthcare services by virtual Internet of Things.

RESULTS: We showed an improvement in method of access and real time dynamic monitoring of physiological signal of this remote monitoring system using virtual Internet of thing approach. This remote monitoring and access system is evaluated in conventional value. This proposed system is envisioned to modern smart health care system by high utility and user friendly in clinical applications.

CONCLUSION: We claim that the proposed scheme significantly improves the accuracy of the remote monitoring system compared to the other wireless communication methods in clinical system.}, } @article {pmid29515815, year = {2018}, author = {Wu, JX and Huang, PT and Lin, CH and Li, CM}, title = {Blood leakage detection during dialysis therapy based on fog computing with array photocell sensors and heteroassociative memory model.}, journal = {Healthcare technology letters}, volume = {5}, number = {1}, pages = {38-44}, pmid = {29515815}, issn = {2053-3713}, abstract = {Blood leakage and blood loss are serious life-threatening complications occurring during dialysis therapy. These events have been of concerns to both healthcare givers and patients. More than 40% of adult blood volume can be lost in just a few minutes, resulting in morbidities and mortality. The authors intend to propose the design of a warning tool for the detection of blood leakage/blood loss during dialysis therapy based on fog computing with an array of photocell sensors and heteroassociative memory (HAM) model. Photocell sensors are arranged in an array on a flexible substrate to detect blood leakage via the resistance changes with illumination in the visible spectrum of 500-700 nm. The HAM model is implemented to design a virtual alarm unit using electricity changes in an embedded system. The proposed warning tool can indicate the risk level in both end-sensing units and remote monitor devices via a wireless network and fog/cloud computing. The animal experimental results (pig blood) will demonstrate the feasibility.}, } @article {pmid29515813, year = {2018}, author = {Putluri, S and Zia Ur Rahman, M and Fathima, SY}, title = {Cloud-based adaptive exon prediction for DNA analysis.}, journal = {Healthcare technology letters}, volume = {5}, number = {1}, pages = {25-30}, pmid = {29515813}, issn = {2053-3713}, abstract = {Cloud computing offers significant research and economic benefits to healthcare organisations. Cloud services provide a safe place for storing and managing large amounts of such sensitive data. Under conventional flow of gene information, gene sequence laboratories send out raw and inferred information via Internet to several sequence libraries. DNA sequencing storage costs will be minimised by use of cloud service. In this study, the authors put forward a novel genomic informatics system using Amazon Cloud Services, where genomic sequence information is stored and accessed for processing. True identification of exon regions in a DNA sequence is a key task in bioinformatics, which helps in disease identification and design drugs. Three base periodicity property of exons forms the basis of all exon identification techniques. Adaptive signal processing techniques found to be promising in comparison with several other methods. Several adaptive exon predictors (AEPs) are developed using variable normalised least mean square and its maximum normalised variants to reduce computational complexity. Finally, performance evaluation of various AEPs is done based on measures such as sensitivity, specificity and precision using various standard genomic datasets taken from National Center for Biotechnology Information genomic sequence database.}, } @article {pmid29504905, year = {2018}, author = {Ko, G and Kim, PG and Yoon, J and Han, G and Park, SJ and Song, W and Lee, B}, title = {Closha: bioinformatics workflow system for the analysis of massive sequencing data.}, journal = {BMC bioinformatics}, volume = {19}, number = {Suppl 1}, pages = {43}, pmid = {29504905}, issn = {1471-2105}, mesh = {Algorithms ; Cloud Computing ; Genomics/methods ; High-Throughput Nucleotide Sequencing/*methods ; *Software ; Workflow ; }, abstract = {BACKGROUND: While next-generation sequencing (NGS) costs have fallen in recent years, the cost and complexity of computation remain substantial obstacles to the use of NGS in bio-medical care and genomic research. The rapidly increasing amounts of data available from the new high-throughput methods have made data processing infeasible without automated pipelines. The integration of data and analytic resources into workflow systems provides a solution to the problem by simplifying the task of data analysis.

RESULTS: To address this challenge, we developed a cloud-based workflow management system, Closha, to provide fast and cost-effective analysis of massive genomic data. We implemented complex workflows making optimal use of high-performance computing clusters. Closha allows users to create multi-step analyses using drag and drop functionality and to modify the parameters of pipeline tools. Users can also import the Galaxy pipelines into Closha. Closha is a hybrid system that enables users to use both analysis programs providing traditional tools and MapReduce-based big data analysis programs simultaneously in a single pipeline. Thus, the execution of analytics algorithms can be parallelized, speeding up the whole process. We also developed a high-speed data transmission solution, KoDS, to transmit a large amount of data at a fast rate. KoDS has a file transfer speed of up to 10 times that of normal FTP and HTTP. The computer hardware for Closha is 660 CPU cores and 800 TB of disk storage, enabling 500 jobs to run at the same time.

CONCLUSIONS: Closha is a scalable, cost-effective, and publicly available web service for large-scale genomic data analysis. Closha supports the reliable and highly scalable execution of sequencing analysis workflows in a fully automated manner. Closha provides a user-friendly interface to all genomic scientists to try to derive accurate results from NGS platform data. The Closha cloud server is freely available for use from http://closha.kobic.re.kr/ .}, } @article {pmid29502320, year = {2018}, author = {Ogiela, U and Takizawa, M and Ogiela, L}, title = {Cognitive Approaches for Medicine in Cloud Computing.}, journal = {Journal of medical systems}, volume = {42}, number = {4}, pages = {70}, doi = {10.1007/s10916-018-0918-5}, pmid = {29502320}, issn = {1573-689X}, support = {DEC-2016/23/B/HS4/00616//Narodowe Centrum Nauki/ ; }, mesh = {*Algorithms ; *Cloud Computing ; Computer Security ; *Data Interpretation, Statistical ; Humans ; Semantic Web ; }, abstract = {UNLABELLED: This paper will present the application potential of the cognitive approach to data interpretation, with special reference to medical areas. The possibilities of using the meaning approach to data description and analysis will be proposed for data analysis tasks in Cloud Computing. The methods of cognitive data management in Cloud Computing are aimed to support the processes of protecting data against unauthorised takeover and they serve to enhance the data management processes. The accomplishment of the proposed tasks will be the definition of algorithms for the execution of meaning data interpretation processes in safe Cloud Computing.

HIGHLIGHTS: • We proposed a cognitive methods for data description. • Proposed a techniques for secure data in Cloud Computing. • Application of cognitive approaches for medicine was described.}, } @article {pmid29498641, year = {2018}, author = {Pinheiro, A and Dias Canedo, E and de Sousa Junior, RT and de Oliveira Albuquerque, R and García Villalba, LJ and Kim, TH}, title = {Security Architecture and Protocol for Trust Verifications Regarding the Integrity of Files Stored in Cloud Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {3}, pages = {}, pmid = {29498641}, issn = {1424-8220}, abstract = {Cloud computing is considered an interesting paradigm due to its scalability, availability and virtually unlimited storage capacity. However, it is challenging to organize a cloud storage service (CSS) that is safe from the client point-of-view and to implement this CSS in public clouds since it is not advisable to blindly consider this configuration as fully trustworthy. Ideally, owners of large amounts of data should trust their data to be in the cloud for a long period of time, without the burden of keeping copies of the original data, nor of accessing the whole content for verifications regarding data preservation. Due to these requirements, integrity, availability, privacy and trust are still challenging issues for the adoption of cloud storage services, especially when losing or leaking information can bring significant damage, be it legal or business-related. With such concerns in mind, this paper proposes an architecture for periodically monitoring both the information stored in the cloud and the service provider behavior. The architecture operates with a proposed protocol based on trust and encryption concepts to ensure cloud data integrity without compromising confidentiality and without overloading storage services. Extensive tests and simulations of the proposed architecture and protocol validate their functional behavior and performance.}, } @article {pmid29496338, year = {2018}, author = {Wiemken, TL and Furmanek, SP and Mattingly, WA and Haas, J and Ramirez, JA and Carrico, RM}, title = {Googling your hand hygiene data: Using Google Forms, Google Sheets, and R to collect and automate analysis of hand hygiene compliance monitoring.}, journal = {American journal of infection control}, volume = {46}, number = {6}, pages = {617-619}, doi = {10.1016/j.ajic.2018.01.010}, pmid = {29496338}, issn = {1527-3296}, mesh = {*Data Collection ; Guideline Adherence/*statistics & numerical data ; Hand Hygiene/*statistics & numerical data ; Humans ; *Internet ; Patient Compliance/*statistics & numerical data ; }, abstract = {BACKGROUND: Hand hygiene is one of the most important interventions in the quest to eliminate healthcare-associated infections, and rates in healthcare facilities are markedly low. Since hand hygiene observation and feedback are critical to improve adherence, we created an easy-to-use, platform-independent hand hygiene data collection process and an automated, on-demand reporting engine.

METHODS: A 3-step approach was used for this project: 1) creation of a data collection form using Google Forms, 2) transfer of data from the form to a spreadsheet using Google Spreadsheets, and 3) creation of an automated, cloud-based analytics platform for report generation using R and RStudio Shiny software.

RESULTS: A video tutorial of all steps in the creation and use of this free tool can be found on our YouTube channel: https://www.youtube.com/watch?v=uFatMR1rXqU&t. The on-demand reporting tool can be accessed at: https://crsp.louisville.edu/shiny/handhygiene.

CONCLUSIONS: This data collection and automated analytics engine provides an easy-to-use environment for evaluating hand hygiene data; it also provides rapid feedback to healthcare workers. By reducing some of the data management workload required of the infection preventionist, more focused interventions may be instituted to increase global hand hygiene rates and reduce infection.}, } @article {pmid29495269, year = {2018}, author = {Huang, Q and Yang, Y and Shi, Y}, title = {SmartVeh: Secure and Efficient Message Access Control and Authentication for Vehicular Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {2}, pages = {}, pmid = {29495269}, issn = {1424-8220}, abstract = {With the growing number of vehicles and popularity of various services in vehicular cloud computing (VCC), message exchanging among vehicles under traffic conditions and in emergency situations is one of the most pressing demands, and has attracted significant attention. However, it is an important challenge to authenticate the legitimate sources of broadcast messages and achieve fine-grained message access control. In this work, we propose SmartVeh, a secure and efficient message access control and authentication scheme in VCC. A hierarchical, attribute-based encryption technique is utilized to achieve fine-grained and flexible message sharing, which ensures that vehicles whose persistent or dynamic attributes satisfy the access policies can access the broadcast message with equipped on-board units (OBUs). Message authentication is enforced by integrating an attribute-based signature, which achieves message authentication and maintains the anonymity of the vehicles. In order to reduce the computations of the OBUs in the vehicles, we outsource the heavy computations of encryption, decryption and signing to a cloud server and road-side units. The theoretical analysis and simulation results reveal that our secure and efficient scheme is suitable for VCC.}, } @article {pmid29492726, year = {2018}, author = {Ahmed, L and Georgiev, V and Capuccini, M and Toor, S and Schaal, W and Laure, E and Spjuth, O}, title = {Efficient iterative virtual screening with Apache Spark and conformal prediction.}, journal = {Journal of cheminformatics}, volume = {10}, number = {1}, pages = {8}, pmid = {29492726}, issn = {1758-2946}, abstract = {BACKGROUND: Docking and scoring large libraries of ligands against target proteins forms the basis of structure-based virtual screening. The problem is trivially parallelizable, and calculations are generally carried out on computer clusters or on large workstations in a brute force manner, by docking and scoring all available ligands.

CONTRIBUTION: In this study we propose a strategy that is based on iteratively docking a set of ligands to form a training set, training a ligand-based model on this set, and predicting the remainder of the ligands to exclude those predicted as 'low-scoring' ligands. Then, another set of ligands are docked, the model is retrained and the process is repeated until a certain model efficiency level is reached. Thereafter, the remaining ligands are docked or excluded based on this model. We use SVM and conformal prediction to deliver valid prediction intervals for ranking the predicted ligands, and Apache Spark to parallelize both the docking and the modeling.

RESULTS: We show on 4 different targets that conformal prediction based virtual screening (CPVS) is able to reduce the number of docked molecules by 62.61% while retaining an accuracy for the top 30 hits of 94% on average and a speedup of 3.7. The implementation is available as open source via GitHub (https://github.com/laeeq80/spark-cpvs) and can be run on high-performance computers as well as on cloud resources.}, } @article {pmid29482119, year = {2018}, author = {Bussery, J and Denis, LA and Guillon, B and Liu, P and Marchetti, G and Rahal, G}, title = {eTRIKS platform: Conception and operation of a highly scalable cloud-based platform for translational research and applications development.}, journal = {Computers in biology and medicine}, volume = {95}, number = {}, pages = {99-106}, doi = {10.1016/j.compbiomed.2018.02.006}, pmid = {29482119}, issn = {1879-0534}, mesh = {*Cloud Computing ; *Databases, Factual ; *Genomics ; Humans ; Translational Research, Biomedical/*methods ; }, abstract = {We describe the genesis, design and evolution of a computing platform designed and built to improve the success rate of biomedical translational research. The eTRIKS project platform was developed with the aim of building a platform that can securely host heterogeneous types of data and provide an optimal environment to run tranSMART analytical applications. Many types of data can now be hosted, including multi-OMICS data, preclinical laboratory data and clinical information, including longitudinal data sets. During the last two years, the platform has matured into a robust translational research knowledge management system that is able to host other data mining applications and support the development of new analytical tools.}, } @article {pmid29477433, year = {2018}, author = {Catal, C and Akbulut, A}, title = {Automatic energy expenditure measurement for health science.}, journal = {Computer methods and programs in biomedicine}, volume = {157}, number = {}, pages = {31-37}, doi = {10.1016/j.cmpb.2018.01.015}, pmid = {29477433}, issn = {1872-7565}, mesh = {Accelerometry/instrumentation ; Adult ; Algorithms ; Automation ; *Cloud Computing ; *Computer Simulation ; *Decision Trees ; *Energy Metabolism ; *Exercise ; Fitness Trackers ; Heart Rate ; Humans ; Internet ; Machine Learning ; Respiration ; User-Computer Interface ; }, abstract = {BACKGROUND AND OBJECTIVE: It is crucial to predict the human energy expenditure in any sports activity and health science application accurately to investigate the impact of the activity. However, measurement of the real energy expenditure is not a trivial task and involves complex steps. The objective of this work is to improve the performance of existing estimation models of energy expenditure by using machine learning algorithms and several data from different sensors and provide this estimation service in a cloud-based platform.

METHODS: In this study, we used input data such as breathe rate, and hearth rate from three sensors. Inputs are received from a web form and sent to the web service which applies a regression model on Azure cloud platform. During the experiments, we assessed several machine learning models based on regression methods.

RESULTS: Our experimental results showed that our novel model which applies Boosted Decision Tree Regression in conjunction with the median aggregation technique provides the best result among other five regression algorithms.

CONCLUSIONS: This cloud-based energy expenditure system which uses a web service showed that cloud computing technology is a great opportunity to develop estimation systems and the new model which applies Boosted Decision Tree Regression with the median aggregation provides remarkable results.}, } @article {pmid29477428, year = {2018}, author = {Li, CT and Shih, DH and Wang, CC}, title = {Cloud-assisted mutual authentication and privacy preservation protocol for telecare medical information systems.}, journal = {Computer methods and programs in biomedicine}, volume = {157}, number = {}, pages = {191-203}, doi = {10.1016/j.cmpb.2018.02.002}, pmid = {29477428}, issn = {1872-7565}, mesh = {*Cloud Computing ; Computer Security/*standards ; *Confidentiality ; Delivery of Health Care/organization & administration ; Humans ; *Information Systems ; Telemedicine/*organization & administration ; }, abstract = {BACKGROUND AND OBJECTIVE: With the rapid development of wireless communication technologies and the growing prevalence of smart devices, telecare medical information system (TMIS) allows patients to receive medical treatments from the doctors via Internet technology without visiting hospitals in person. By adopting mobile device, cloud-assisted platform and wireless body area network, the patients can collect their physiological conditions and upload them to medical cloud via their mobile devices, enabling caregivers or doctors to provide patients with appropriate treatments at anytime and anywhere. In order to protect the medical privacy of the patient and guarantee reliability of the system, before accessing the TMIS, all system participants must be authenticated.

METHODS:  Mohit et al. recently suggested a lightweight authentication protocol for cloud-based health care system. They claimed their protocol ensures resilience of all well-known security attacks and has several important features such as mutual authentication and patient anonymity. In this paper, we demonstrate that Mohit et al.'s authentication protocol has various security flaws and we further introduce an enhanced version of their protocol for cloud-assisted TMIS, which can ensure patient anonymity and patient unlinkability and prevent the security threats of report revelation and report forgery attacks.

RESULTS:  The security analysis proves that our enhanced protocol is secure against various known attacks as well as found in Mohit et al.'s protocol. Compared with existing related protocols, our enhanced protocol keeps the merits of all desirable security requirements and also maintains the efficiency in terms of computation costs for cloud-assisted TMIS.

CONCLUSIONS:  We propose a more secure mutual authentication and privacy preservation protocol for cloud-assisted TMIS, which fixes the mentioned security weaknesses found in Mohit et al.'s protocol. According to our analysis, our authentication protocol satisfies most functionality features for privacy preservation and effectively cope with cloud-assisted TMIS with better efficiency.}, } @article {pmid29440193, year = {2018}, author = {Kass-Hout, TA and Stevens, LM and Hall, JL}, title = {American Heart Association Precision Medicine Platform.}, journal = {Circulation}, volume = {137}, number = {7}, pages = {647-649}, pmid = {29440193}, issn = {1524-4539}, support = {T15 LM009451/LM/NLM NIH HHS/United States ; }, mesh = {*American Heart Association ; *Biomedical Research ; Humans ; *Precision Medicine ; United States ; }, abstract = {Integrating the open science movement with impactful discoveries in science, velocity of technology, and raw power of cloud computing has led to an unprecedented opportunity for scientific discovery. The American Heart Association recently established the Precision Medicine Platform through the efforts of multiple American Heart Association volunteers and a collaboration with Amazon Web Services. The cloud-based platform, powered by Amazon Web Services and available at https://precision.heart.org, was founded on the FAIR principles (findable, accessible, interoperable, and reusable) and includes secure collaboration areas (workspaces) and an open sharing area. The goals of the platform are to democratize data, to make it easy to search across orthogonal data sets, to provide a secure workspace to leverage the power of cloud computing, and to provide a forum for users to share insights. Multiple learning tools are available, including video tutorials, templates using open interactive programming framework, and a forum for interaction among community members.}, } @article {pmid29439442, year = {2018}, author = {Ma, X and Liang, J and Liu, R and Ni, W and Li, Y and Li, R and Ma, W and Qi, C}, title = {A Survey on Data Storage and Information Discovery in the WSANs-Based Edge Computing Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {2}, pages = {}, pmid = {29439442}, issn = {1424-8220}, abstract = {In the post-Cloud era, the proliferation of Internet of Things (IoT) has pushed the horizon of Edge computing, which is a new computing paradigm with data are processed at the edge of the network. As the important systems of Edge computing, wireless sensor and actuator networks (WSANs) play an important role in collecting and processing the sensing data from the surrounding environment as well as taking actions on the events happening in the environment. In WSANs, in-network data storage and information discovery schemes with high energy efficiency, high load balance and low latency are needed because of the limited resources of the sensor nodes and the real-time requirement of some specific applications, such as putting out a big fire in a forest. In this article, the existing schemes of WSANs on data storage and information discovery are surveyed with detailed analysis on their advancements and shortcomings, and possible solutions are proposed on how to achieve high efficiency, good load balance, and perfect real-time performances at the same time, hoping that it can provide a good reference for the future research of the WSANs-based Edge computing systems.}, } @article {pmid29430012, year = {2018}, author = {Langmead, B and Nellore, A}, title = {Cloud computing for genomic data analysis and collaboration.}, journal = {Nature reviews. Genetics}, volume = {19}, number = {5}, pages = {325}, pmid = {29430012}, issn = {1471-0064}, support = {R01 GM118568/GM/NIGMS NIH HHS/United States ; }, abstract = {This corrects the article DOI: 10.1038/nrg.2017.113.}, } @article {pmid29428417, year = {2018}, author = {Robinson, JT and Turner, D and Durand, NC and Thorvaldsdóttir, H and Mesirov, JP and Aiden, EL}, title = {Juicebox.js Provides a Cloud-Based Visualization System for Hi-C Data.}, journal = {Cell systems}, volume = {6}, number = {2}, pages = {256-258.e1}, pmid = {29428417}, issn = {2405-4712}, support = {U01 HL130010/HL/NHLBI NIH HHS/United States ; R01 GM074024/GM/NIGMS NIH HHS/United States ; UM1 HG009375/HG/NHGRI NIH HHS/United States ; DP2 OD008540/OD/NIH HHS/United States ; U24 CA210004/CA/NCI NIH HHS/United States ; R01 CA157304/CA/NCI NIH HHS/United States ; }, mesh = {Cloud Computing ; Computational Biology/*methods ; Computer Graphics ; Computers ; Data Analysis ; Genome/genetics ; Image Processing, Computer-Assisted/*methods ; Internet ; Reproducibility of Results ; Software ; }, abstract = {Contact mapping experiments such as Hi-C explore how genomes fold in 3D. Here, we introduce Juicebox.js, a cloud-based web application for exploring the resulting datasets. Like the original Juicebox application, Juicebox.js allows users to zoom in and out of such datasets using an interface similar to Google Earth. Juicebox.js also has many features designed to facilitate data reproducibility and sharing. Furthermore, Juicebox.js encodes the exact state of the browser in a shareable URL. Creating a public browser for a new Hi-C dataset does not require coding and can be accomplished in under a minute. The web app also makes it possible to create interactive figures online that can complement or replace ordinary journal figures. When combined with Juicer, this makes the entire process of data analysis transparent, insofar as every step from raw reads to published figure is publicly available as open source code.}, } @article {pmid29425378, year = {2018}, author = {Lebeda, FJ and Zalatoris, JJ and Scheerer, JB}, title = {Government Cloud Computing Policies: Potential Opportunities for Advancing Military Biomedical Research.}, journal = {Military medicine}, volume = {183}, number = {11-12}, pages = {e438-e447}, doi = {10.1093/milmed/usx114}, pmid = {29425378}, issn = {1930-613X}, mesh = {Biomedical Research/methods/trends ; Cloud Computing/legislation & jurisprudence/*trends ; Government Programs/*methods/trends ; Humans ; Military Medicine/methods/trends ; *Policy ; United States ; United States Department of Defense/organization & administration/statistics & numerical data ; }, abstract = {INTRODUCTION: This position paper summarizes the development and the present status of Department of Defense (DoD) and other government policies and guidances regarding cloud computing services. Due to the heterogeneous and growing biomedical big datasets, cloud computing services offer an opportunity to mitigate the associated storage and analysis requirements. Having on-demand network access to a shared pool of flexible computing resources creates a consolidated system that should reduce potential duplications of effort in military biomedical research.

METHODS: Interactive, online literature searches were performed with Google, at the Defense Technical Information Center, and at two National Institutes of Health research portfolio information sites. References cited within some of the collected documents also served as literature resources.

RESULTS: We gathered, selected, and reviewed DoD and other government cloud computing policies and guidances published from 2009 to 2017. These policies were intended to consolidate computer resources within the government and reduce costs by decreasing the number of federal data centers and by migrating electronic data to cloud systems. Initial White House Office of Management and Budget information technology guidelines were developed for cloud usage, followed by policies and other documents from the DoD, the Defense Health Agency, and the Armed Services. Security standards from the National Institute of Standards and Technology, the Government Services Administration, the DoD, and the Army were also developed. Government Services Administration and DoD Inspectors General monitored cloud usage by the DoD. A 2016 Government Accountability Office report characterized cloud computing as being economical, flexible and fast. A congressionally mandated independent study reported that the DoD was active in offering a wide selection of commercial cloud services in addition to its milCloud system. Our findings from the Department of Health and Human Services indicated that the security infrastructure in cloud services may be more compliant with the Health Insurance Portability and Accountability Act of 1996 regulations than traditional methods. To gauge the DoD's adoption of cloud technologies proposed metrics included cost factors, ease of use, automation, availability, accessibility, security, and policy compliance.

CONCLUSIONS: Since 2009, plans and policies were developed for the use of cloud technology to help consolidate and reduce the number of data centers which were expected to reduce costs, improve environmental factors, enhance information technology security, and maintain mission support for service members. Cloud technologies were also expected to improve employee efficiency and productivity. Federal cloud computing policies within the last decade also offered increased opportunities to advance military healthcare. It was assumed that these opportunities would benefit consumers of healthcare and health science data by allowing more access to centralized cloud computer facilities to store, analyze, search and share relevant data, to enhance standardization, and to reduce potential duplications of effort. We recommend that cloud computing be considered by DoD biomedical researchers for increasing connectivity, presumably by facilitating communications and data sharing, among the various intra- and extramural laboratories. We also recommend that policies and other guidances be updated to include developing additional metrics that will help stakeholders evaluate the above mentioned assumptions and expectations.}, } @article {pmid29423574, year = {2018}, author = {Yu, J and Lim, J and Lee, KS}, title = {Investigation of drought-vulnerable regions in North Korea using remote sensing and cloud computing climate data.}, journal = {Environmental monitoring and assessment}, volume = {190}, number = {3}, pages = {126}, pmid = {29423574}, issn = {1573-2959}, mesh = {*Climate ; *Cloud Computing ; Crops, Agricultural ; Democratic People's Republic of Korea ; Disasters ; *Droughts ; Environmental Monitoring/*methods ; Forests ; *Remote Sensing Technology ; Seasons ; }, abstract = {Drought is one of the most severe natural disasters in the world and leads to serious challenges that affect both the natural environment and human societies. North Korea (NK) has frequently suffered from severe and prolonged droughts since the second half of the twentieth century. These droughts affect the growing conditions of agricultural crops, which have led to food shortages in NK. However, it is not easy to obtain ground data because NK is one of the most closed-off societies in the world. In this situation, remote sensing (RS) techniques and cloud computing climate data (CCCD) can be used for drought monitoring in NK. RS-derived drought indices and CCCD were used to determine the drought-vulnerable regions in the spring season in NK. After the results were compared and discussed, the following conclusions were derived: (1) 10.0% of the total area of NK is estimated to be a drought-vulnerable region. The most susceptible regions to drought appear in the eastern and western coastal regions, far from BaekDu-DaeGan (BDDG), while fewer drought regions are found near BDDG and the Nahngrim Mountains. The drought-vulnerable regions are the coastal regions of South Hamgyong Province, North Hamgyong Province, South Pyongan Province, and South Hwanghae Province. The latter region is the food basket of NK. (2) In terms of land cover, the drought-vulnerable regions mainly consisted of croplands and mixed forest.}, } @article {pmid29415510, year = {2018}, author = {Chen, YS and Tsai, YT}, title = {A Mobility Management Using Follow-Me Cloud-Cloudlet in Fog-Computing-Based RANs for Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {2}, pages = {}, pmid = {29415510}, issn = {1424-8220}, abstract = {Mobility management for supporting the location tracking and location-based service (LBS) is an important issue of smart city by providing the means for the smooth transportation of people and goods. The mobility is useful to contribute the innovation in both public and private transportation infrastructures for smart cities. With the assistance of edge/fog computing, this paper presents a fully new mobility management using the proposed follow-me cloud-cloudlet (FMCL) approach in fog-computing-based radio access networks (Fog-RANs) for smart cities. The proposed follow-me cloud-cloudlet approach is an integration strategy of follow-me cloud (FMC) and follow-me edge (FME) (or called cloudlet). A user equipment (UE) receives the data, transmitted from original cloud, into the original edge cloud before the handover operation. After the handover operation, an UE searches for a new cloud, called as a migrated cloud, and a new edge cloud, called as a migrated edge cloud near to UE, where the remaining data is migrated from the original cloud to the migrated cloud and all the remaining data are received in the new edge cloud. Existing FMC results do not have the property of the VM migration between cloudlets for the purpose of reducing the transmission latency, and existing FME results do not keep the property of the service migration between data centers for reducing the transmission latency. Our proposed FMCL approach can simultaneously keep the VM migration between cloudlets and service migration between data centers to significantly reduce the transmission latency. The new proposed mobility management using FMCL approach aims to reduce the total transmission time if some data packets are pre-scheduled and pre-stored into the cache of cloudlet if UE is switching from the previous Fog-RAN to the serving Fog-RAN. To illustrate the performance achievement, the mathematical analysis and simulation results are examined in terms of the total transmission time, the throughput, the probability of packet loss, and the number of control messages.}, } @article {pmid29415450, year = {2018}, author = {Ahmad, S and Hang, L and Kim, DH}, title = {Design and Implementation of Cloud-Centric Configuration Repository for DIY IoT Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {2}, pages = {}, pmid = {29415450}, issn = {1424-8220}, abstract = {The Do-It-Yourself (DIY) vision for the design of a smart and customizable IoT application demands the involvement of the general public in its development process. The general public lacks the technical knowledge for programming state-of-the-art prototyping and development kits. The latest IoT kits, for example, Raspberry Pi, are revolutionizing the DIY paradigm for IoT, and more than ever, a DIY intuitive programming interface is required to enable the masses to interact with and customize the behavior of remote IoT devices on the Internet. However, in most cases, these DIY toolkits store the resultant configuration data in local storage and, thus, cannot be accessed remotely. This paper presents the novel implementation of such a system, which not only enables the general public to customize the behavior of remote IoT devices through a visual interface, but also makes the configuration available everywhere and anytime by leveraging the power of cloud-based platforms. The interface enables the visualization of the resources exposed by remote embedded resources in the form of graphical virtual objects (VOs). These VOs are used to create the service design through simple operations like drag-and-drop and the setting of properties. The configuration created as a result is maintained as an XML document, which is ingested by the cloud platform, thus making it available to be used anywhere. We use the HTTP approach for the communication between the cloud and IoT toolbox and the cloud and real devices, but for communication between the toolbox and actual resources, CoAP is used. Finally, a smart home case study has been implemented and presented in order to assess the effectiveness of the proposed work.}, } @article {pmid29415444, year = {2018}, author = {Wang, S and Wan, J and Li, D and Liu, C}, title = {Knowledge Reasoning with Semantic Data for Real-Time Data Processing in Smart Factory.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {2}, pages = {}, pmid = {29415444}, issn = {1424-8220}, abstract = {The application of high-bandwidth networks and cloud computing in manufacturing systems will be followed by mass data. Industrial data analysis plays important roles in condition monitoring, performance optimization, flexibility, and transparency of the manufacturing system. However, the currently existing architectures are mainly for offline data analysis, not suitable for real-time data processing. In this paper, we first define the smart factory as a cloud-assisted and self-organized manufacturing system in which physical entities such as machines, conveyors, and products organize production through intelligent negotiation and the cloud supervises this self-organized process for fault detection and troubleshooting based on data analysis. Then, we propose a scheme to integrate knowledge reasoning and semantic data where the reasoning engine processes the ontology model with real time semantic data coming from the production process. Based on these ideas, we build a benchmarking system for smart candy packing application that supports direct consumer customization and flexible hybrid production, and the data are collected and processed in real time for fault diagnosis and statistical analysis.}, } @article {pmid29401656, year = {2018}, author = {Tran, THG and Ressl, C and Pfeifer, N}, title = {Integrated Change Detection and Classification in Urban Areas Based on Airborne Laser Scanning Point Clouds.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {2}, pages = {}, pmid = {29401656}, issn = {1424-8220}, abstract = {This paper suggests a new approach for change detection (CD) in 3D point clouds. It combines classification and CD in one step using machine learning. The point cloud data of both epochs are merged for computing features of four types: features describing the point distribution, a feature relating to relative terrain elevation, features specific for the multi-target capability of laser scanning, and features combining the point clouds of both epochs to identify the change. All these features are merged in the points and then training samples are acquired to create the model for supervised classification, which is then applied to the whole study area. The final results reach an overall accuracy of over 90% for both epochs of eight classes: lost tree, new tree, lost building, new building, changed ground, unchanged building, unchanged tree, and unchanged ground.}, } @article {pmid29400473, year = {2018}, author = {Collins, A and Jones, AR}, title = {phpMs: A PHP-Based Mass Spectrometry Utilities Library.}, journal = {Journal of proteome research}, volume = {17}, number = {3}, pages = {1309-1313}, doi = {10.1021/acs.jproteome.7b00783}, pmid = {29400473}, issn = {1535-3907}, support = {BB/L005239/1//Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {Databases, Protein ; Humans ; Information Dissemination/*methods ; Information Storage and Retrieval/statistics & numerical data ; Internet ; Libraries, Special/*statistics & numerical data ; Mass Spectrometry/methods/*statistics & numerical data ; Proteomics/methods/statistics & numerical data ; Small Molecule Libraries/*supply & distribution ; *Software ; }, abstract = {The recent establishment of cloud computing, high-throughput networking, and more versatile web standards and browsers has led to a renewed interest in web-based applications. While traditionally big data has been the domain of optimized desktop and server applications, it is now possible to store vast amounts of data and perform the necessary calculations offsite in cloud storage and computing providers, with the results visualized in a high-quality cross-platform interface via a web browser. There are number of emerging platforms for cloud-based mass spectrometry data analysis; however, there is limited pre-existing code accessible to web developers, especially for those that are constrained to a shared hosting environment where Java and C applications are often forbidden from use by the hosting provider. To remedy this, we provide an open-source mass spectrometry library for one of the most commonly used web development languages, PHP. Our new library, phpMs, provides objects for storing and manipulating spectra and identification data as well as utilities for file reading, file writing, calculations, peptide fragmentation, and protein digestion as well as a software interface for controlling search engines. We provide a working demonstration of some of the capabilities at http://pgb.liv.ac.uk/phpMs .}, } @article {pmid29379135, year = {2018}, author = {Langmead, B and Nellore, A}, title = {Cloud computing for genomic data analysis and collaboration.}, journal = {Nature reviews. Genetics}, volume = {19}, number = {4}, pages = {208-219}, pmid = {29379135}, issn = {1471-0064}, support = {R01 GM118568/GM/NIGMS NIH HHS/United States ; }, mesh = {*Cloud Computing ; Computational Biology ; *Genomics ; *High-Throughput Nucleotide Sequencing ; Humans ; *Internet ; }, abstract = {Next-generation sequencing has made major strides in the past decade. Studies based on large sequencing data sets are growing in number, and public archives for raw sequencing data have been doubling in size every 18 months. Leveraging these data requires researchers to use large-scale computational resources. Cloud computing, a model whereby users rent computers and storage from large data centres, is a solution that is gaining traction in genomics research. Here, we describe how cloud computing is used in genomics for research and large-scale collaborations, and argue that its elasticity, reproducibility and privacy features make it ideally suited for the large-scale reanalysis of publicly available archived data, including privacy-protected data.}, } @article {pmid29374408, year = {2018}, author = {N, S and R, B and M, P}, title = {Cancer Diagnosis Epigenomics Scientific Workflow Scheduling in the Cloud Computing Environment Using an Improved PSO Algorithm.}, journal = {Asian Pacific journal of cancer prevention : APJCP}, volume = {19}, number = {1}, pages = {243-246}, pmid = {29374408}, issn = {2476-762X}, abstract = {Objective: Epigenetic modifications involving DNA methylation and histone statud are responsible for the stable maintenance of cellular phenotypes. Abnormalities may be causally involved in cancer development and therefore could have diagnostic potential. The field of epigenomics refers to all epigenetic modifications implicated in control of gene expression, with a focus on better understanding of human biology in both normal and pathological states. Epigenomics scientific workflow is essentially a data processing pipeline to automate the execution of various genome sequencing operations or tasks. Cloud platform is a popular computing platform for deploying large scale epigenomics scientific workflow. Its dynamic environment provides various resources to scientific users on a pay-per-use billing model. Scheduling epigenomics scientific workflow tasks is a complicated problem in cloud platform. We here focused on application of an improved particle swam optimization (IPSO) algorithm for this purpose. Methods: The IPSO algorithm was applied to find suitable resources and allocate epigenomics tasks so that the total cost was minimized for detection of epigenetic abnormalities of potential application for cancer diagnosis. Result: The results showed that IPSO based task to resource mapping reduced total cost by 6.83 percent as compared to the traditional PSO algorithm. Conclusion: The results for various cancer diagnosis tasks showed that IPSO based task to resource mapping can achieve better costs when compared to PSO based mapping for epigenomics scientific application workflow.}, } @article {pmid29364172, year = {2018}, author = {Rosário, D and Schimuneck, M and Camargo, J and Nobre, J and Both, C and Rochol, J and Gerla, M}, title = {Service Migration from Cloud to Multi-tier Fog Nodes for Multimedia Dissemination with QoE Support.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {2}, pages = {}, pmid = {29364172}, issn = {1424-8220}, abstract = {A wide range of multimedia services is expected to be offered for mobile users via various wireless access networks. Even the integration of Cloud Computing in such networks does not support an adequate Quality of Experience (QoE) in areas with high demands for multimedia contents. Fog computing has been conceptualized to facilitate the deployment of new services that cloud computing cannot provide, particularly those demanding QoE guarantees. These services are provided using fog nodes located at the network edge, which is capable of virtualizing their functions/applications. Service migration from the cloud to fog nodes can be actuated by request patterns and the timing issues. To the best of our knowledge, existing works on fog computing focus on architecture and fog node deployment issues. In this article, we describe the operational impacts and benefits associated with service migration from the cloud to multi-tier fog computing for video distribution with QoE support. Besides that, we perform the evaluation of such service migration of video services. Finally, we present potential research challenges and trends.}, } @article {pmid29363427, year = {2018}, author = {Wang, Y and Li, G and Ma, M and He, F and Song, Z and Zhang, W and Wu, C}, title = {GT-WGS: an efficient and economic tool for large-scale WGS analyses based on the AWS cloud service.}, journal = {BMC genomics}, volume = {19}, number = {Suppl 1}, pages = {959}, pmid = {29363427}, issn = {1471-2164}, mesh = {Cloud Computing/*economics ; Cluster Analysis ; *Genome, Human ; Genomics/*methods ; Humans ; Sequence Analysis, DNA/economics/*methods ; *Software ; Whole Genome Sequencing/economics/*methods ; }, abstract = {BACKGROUND: Whole-genome sequencing (WGS) plays an increasingly important role in clinical practice and public health. Due to the big data size, WGS data analysis is usually compute-intensive and IO-intensive. Currently it usually takes 30 to 40 h to finish a 50× WGS analysis task, which is far from the ideal speed required by the industry. Furthermore, the high-end infrastructure required by WGS computing is costly in terms of time and money. In this paper, we aim to improve the time efficiency of WGS analysis and minimize the cost by elastic cloud computing.

RESULTS: We developed a distributed system, GT-WGS, for large-scale WGS analyses utilizing the Amazon Web Services (AWS). Our system won the first prize on the Wind and Cloud challenge held by Genomics and Cloud Technology Alliance conference (GCTA) committee. The system makes full use of the dynamic pricing mechanism of AWS. We evaluate the performance of GT-WGS with a 55× WGS dataset (400GB fastq) provided by the GCTA 2017 competition. In the best case, it only took 18.4 min to finish the analysis and the AWS cost of the whole process is only 16.5 US dollars. The accuracy of GT-WGS is 99.9% consistent with that of the Genome Analysis Toolkit (GATK) best practice. We also evaluated the performance of GT-WGS performance on a real-world dataset provided by the XiangYa hospital, which consists of 5× whole-genome dataset with 500 samples, and on average GT-WGS managed to finish one 5× WGS analysis task in 2.4 min at a cost of $3.6.

CONCLUSIONS: WGS is already playing an important role in guiding therapeutic intervention. However, its application is limited by the time cost and computing cost. GT-WGS excelled as an efficient and affordable WGS analyses tool to address this problem. The demo video and supplementary materials of GT-WGS can be accessed at https://github.com/Genetalks/wgs_analysis_demo .}, } @article {pmid29352282, year = {2018}, author = {Liu, J and Wu, Z and Dong, J and Wu, J and Wen, D}, title = {An energy-efficient failure detector for vehicular cloud computing.}, journal = {PloS one}, volume = {13}, number = {1}, pages = {e0191577}, pmid = {29352282}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Computer Communication Networks ; Electric Power Supplies/*statistics & numerical data ; Equipment Failure/statistics & numerical data ; Humans ; Internet ; Motor Vehicles/*statistics & numerical data ; Solar Energy/statistics & numerical data ; Systems Integration ; }, abstract = {Failure detectors are one of the fundamental components for maintaining the high availability of vehicular cloud computing. In vehicular cloud computing, lots of RSUs are deployed along the road to improve the connectivity. Many of them are equipped with solar battery due to the unavailability or excess expense of wired electrical power. So it is important to reduce the battery consumption of RSU. However, the existing failure detection algorithms are not designed to save battery consumption RSU. To solve this problem, a new energy-efficient failure detector 2E-FD has been proposed specifically for vehicular cloud computing. 2E-FD does not only provide acceptable failure detection service, but also saves the battery consumption of RSU. Through the comparative experiments, the results show that our failure detector has better performance in terms of speed, accuracy and battery consumption.}, } @article {pmid29334350, year = {2017}, author = {Thomas, PBM}, title = {Bespoke automation of medical workforce rostering using Google's free cloud applications.}, journal = {Journal of innovation in health informatics}, volume = {24}, number = {4}, pages = {885}, doi = {10.14236/jhi.v24i4.885}, pmid = {29334350}, issn = {2058-4563}, mesh = {*Automation ; Cloud Computing/*statistics & numerical data ; Health Personnel/*organization & administration ; Hospitals ; Humans ; Internet ; Ophthalmology ; Personnel Staffing and Scheduling/*organization & administration ; Practice Management/*organization & administration ; Software ; }, abstract = {BACKGROUND: Providing safe and consistent care requires optimal deployment of medical staff. Ensuring this happens is a significant administrative burden due to complex working patterns.

OBJECTIVE: To describe a pilot feasibility study of the automation of medical duty rostering in a busy tertiary Ophthalmology department.

METHODS: A cloud based web application was created using Google's free cloud services. Users access the system via a website which hosts live rosters, and use electronic forms to submit requests which are automatically handled by Google App Scripts.

RESULTS: Over a 2-year period (8/2014-6/2016), the system processed 563 leave requests and 300 on call swaps automatically. 3,300 emails and 1,000 forms were automatically generated. User satisfaction was 100% (n=24).

DISCUSSION: Many time consuming aspects of roster management were automated with significant time savings to all parties, allowing increased clinical time for doctors involved in administration. Planning for safe staffing levels was supported.}, } @article {pmid29332985, year = {2017}, author = {Puechmaille, D and Styner, M and Prieto, JC}, title = {CIVILITY: Cloud based Interactive Visualization of Tractography Brain Connectome.}, journal = {Proceedings of SPIE--the International Society for Optical Engineering}, volume = {10137}, number = {}, pages = {}, pmid = {29332985}, issn = {0277-786X}, support = {R01 MH091351/MH/NIMH NIH HHS/United States ; R01 MH091645/MH/NIMH NIH HHS/United States ; U01 MH070890/MH/NIMH NIH HHS/United States ; U54 HD079124/HD/NICHD NIH HHS/United States ; }, abstract = {Cloud based Interactive Visualization of Tractography Brain Connectome (CIVILITY) is an interactive visualization tool of brain connectome in the cloud. This application submits tasks to remote computing grids were the CIVILITY-tractography pipeline is deployed. The application will list the running tasks for the user and once a task is completed the brain connectome is visualized using Hierarchical Edge Bundling. The analysis pipeline uses FSL tools (bedpostx and probtrackx2) to generate a triangular matrix indicating the connectivity strength between different regions in the brain. This work is motivated by medical applications in which expensive computational tasks such as brain connectivity is needed and to provide a state of the art visualization tool of Brain Connectome. This work does not contribute any novelty with respect to the visualization methodology, is rather a new resource for the neuroimaging community. This work is submitted to the SPIE Biomedical Applications in Molecular, Structural, and Functional Imaging conference. The source code of this application is available in NITRC.}, } @article {pmid29320418, year = {2018}, author = {Liao, Y and He, Y and Li, F and Jiang, S and Zhou, S}, title = {Analysis of an ABE Scheme with Verifiable Outsourced Decryption.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {1}, pages = {}, pmid = {29320418}, issn = {1424-8220}, abstract = {Attribute-based encryption (ABE) is a popular cryptographic technology to protect the security of users' data in cloud computing. In order to reduce its decryption cost, outsourcing the decryption of ciphertexts is an available method, which enables users to outsource a large number of decryption operations to the cloud service provider. To guarantee the correctness of transformed ciphertexts computed by the cloud server via the outsourced decryption, it is necessary to check the correctness of the outsourced decryption to ensure security for the data of users. Recently, Li et al. proposed a full verifiability of the outsourced decryption of ABE scheme (ABE-VOD) for the authorized users and unauthorized users, which can simultaneously check the correctness of the transformed ciphertext for both them. However, in this paper we show that their ABE-VOD scheme cannot obtain the results which they had shown, such as finding out all invalid ciphertexts, and checking the correctness of the transformed ciphertext for the authorized user via checking it for the unauthorized user. We first construct some invalid ciphertexts which can pass the validity checking in the decryption algorithm. That means their "verify-then-decrypt" skill is unavailable. Next, we show that the method to check the validity of the outsourced decryption for the authorized users via checking it for the unauthorized users is not always correct. That is to say, there exist some invalid ciphertexts which can pass the validity checking for the unauthorized user, but cannot pass the validity checking for the authorized user.}, } @article {pmid29306539, year = {2018}, author = {Hosseini, MP and Pompili, D and Elisevich, K and Soltanian-Zadeh, H}, title = {Random ensemble learning for EEG classification.}, journal = {Artificial intelligence in medicine}, volume = {84}, number = {}, pages = {146-158}, doi = {10.1016/j.artmed.2017.12.004}, pmid = {29306539}, issn = {1873-2860}, mesh = {Automation ; Brain/*physiopathology ; Brain Mapping/*methods ; *Brain Waves ; Cloud Computing ; Electrocorticography ; *Electroencephalography ; False Negative Reactions ; False Positive Reactions ; Humans ; Neural Networks, Computer ; Predictive Value of Tests ; Reproducibility of Results ; Seizures/classification/*diagnosis/physiopathology ; *Signal Processing, Computer-Assisted ; *Support Vector Machine ; Time Factors ; Wavelet Analysis ; }, abstract = {Real-time detection of seizure activity in epilepsy patients is critical in averting seizure activity and improving patients' quality of life. Accurate evaluation, presurgical assessment, seizure prevention, and emergency alerts all depend on the rapid detection of seizure onset. A new method of feature selection and classification for rapid and precise seizure detection is discussed wherein informative components of electroencephalogram (EEG)-derived data are extracted and an automatic method is presented using infinite independent component analysis (I-ICA) to select independent features. The feature space is divided into subspaces via random selection and multichannel support vector machines (SVMs) are used to classify these subspaces. The result of each classifier is then combined by majority voting to establish the final output. In addition, a random subspace ensemble using a combination of SVM, multilayer perceptron (MLP) neural network and an extended k-nearest neighbors (k-NN), called extended nearest neighbor (ENN), is developed for the EEG and electrocorticography (ECoG) big data problem. To evaluate the solution, a benchmark ECoG of eight patients with temporal and extratemporal epilepsy was implemented in a distributed computing framework as a multitier cloud-computing architecture. Using leave-one-out cross-validation, the accuracy, sensitivity, specificity, and both false positive and false negative ratios of the proposed method were found to be 0.97, 0.98, 0.96, 0.04, and 0.02, respectively. Application of the solution to cases under investigation with ECoG has also been effected to demonstrate its utility.}, } @article {pmid29305754, year = {2018}, author = {Platt, RV and Manthos, D and Amos, J}, title = {Estimating the Creation and Removal Date of Fracking Ponds Using Trend Analysis of Landsat Imagery.}, journal = {Environmental management}, volume = {61}, number = {2}, pages = {310-320}, pmid = {29305754}, issn = {1432-1009}, mesh = {*Environmental Exposure ; Geographic Information Systems ; *Hydraulic Fracking ; Natural Gas ; Pennsylvania ; *Ponds ; Risk Assessment/*methods ; Wastewater ; Water Pollutants, Chemical/*adverse effects ; }, abstract = {Hydraulic fracturing, or fracking, is a process of introducing liquid at high pressure to create fractures in shale rock formations, thus releasing natural gas. Flowback and produced water from fracking operations is typically stored in temporary open-air earthen impoundments, or frack ponds. Unfortunately, in the United States there is no public record of the location of impoundments, or the dates that impoundments are created or removed. In this study we use a dataset of drilling-related impoundments in Pennsylvania identified through the FrackFinder project led by SkyTruth, an environmental non-profit. For each impoundment location, we compiled all low cloud Landsat imagery from 2000 to 2016 and created a monthly time series for three bands: red, near-infrared (NIR), and the Normalized Difference Vegetation Index (NDVI). We identified the approximate date of creation and removal of impoundments from sudden breaks in the time series. To verify our method, we compared the results to date ranges derived from photointerpretation of all available historical imagery on Google Earth for a subset of impoundments. Based on our analysis, we found that the number of impoundments built annually increased rapidly from 2006 to 2010, and then slowed from 2010 to 2013. Since newer impoundments tend to be larger, however, the total impoundment area has continued to increase. The methods described in this study would be appropriate for finding the creation and removal date of a variety of industrial land use changes at known locations.}, } @article {pmid29297296, year = {2017}, author = {Xing, Y and Li, G and Wang, Z and Feng, B and Song, Z and Wu, C}, title = {GTZ: a fast compression and cloud transmission tool optimized for FASTQ files.}, journal = {BMC bioinformatics}, volume = {18}, number = {Suppl 16}, pages = {549}, pmid = {29297296}, issn = {1471-2105}, mesh = {Cloud Computing/*standards ; Data Compression/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; }, abstract = {BACKGROUND: The dramatic development of DNA sequencing technology is generating real big data, craving for more storage and bandwidth. To speed up data sharing and bring data to computing resource faster and cheaper, it is necessary to develop a compression tool than can support efficient compression and transmission of sequencing data onto the cloud storage.

RESULTS: This paper presents GTZ, a compression and transmission tool, optimized for FASTQ files. As a reference-free lossless FASTQ compressor, GTZ treats different lines of FASTQ separately, utilizes adaptive context modelling to estimate their characteristic probabilities, and compresses data blocks with arithmetic coding. GTZ can also be used to compress multiple files or directories at once. Furthermore, as a tool to be used in the cloud computing era, it is capable of saving compressed data locally or transmitting data directly into cloud by choice. We evaluated the performance of GTZ on some diverse FASTQ benchmarks. Results show that in most cases, it outperforms many other tools in terms of the compression ratio, speed and stability.

CONCLUSIONS: GTZ is a tool that enables efficient lossless FASTQ data compression and simultaneous data transmission onto to cloud. It emerges as a useful tool for NGS data storage and transmission in the cloud environment. GTZ is freely available online at: https://github.com/Genetalks/gtz .}, } @article {pmid29295395, year = {2017}, author = {Suominen, H and Müller, H and Ohno-Machado, L and Salanterä, S and Schreier, G and Hanlen, L}, title = {Prerequisites for International Exchanges of Health Information for Record Research: Comparison of Australian, Austrian, Finnish, Swiss, and US Policies.}, journal = {Studies in health technology and informatics}, volume = {245}, number = {}, pages = {1312}, pmid = {29295395}, issn = {1879-8365}, mesh = {Australia ; Austria ; *Confidentiality ; Finland ; *Health Information Systems ; Humans ; Information Dissemination ; *Privacy ; Switzerland ; }, abstract = {The policies that address health information exchanges for research purposes in Australia, Austria, Finland, Switzerland, and the USA apply accountability and/or adequacy to protect privacy. Specific requirements complicate the exchanges: inform data subjects of data use purposes; assure that the subjects are no longer identifiable; destroy the data in the end; and not to use cloud computing without specific permission.}, } @article {pmid29295062, year = {2017}, author = {Henriksen, A and Hopstock, LA and Hartvigsen, G and Grimsgaard, S}, title = {Using Cloud-Based Physical Activity Data from Google Fit and Apple Healthkit to Expand Recording of Physical Activity Data in a Population Study.}, journal = {Studies in health technology and informatics}, volume = {245}, number = {}, pages = {108-112}, pmid = {29295062}, issn = {1879-8365}, mesh = {*Cloud Computing ; *Exercise ; *Fitness Trackers ; Humans ; Population Health ; Research Design ; Surveys and Questionnaires ; }, abstract = {Large population studies are important sources for medical research. These studies are well planned, well organized, and costly. However, people record health data themselves using different sensors, which are mostly unplanned, unorganized and inexpensive. Nevertheless, self-recorded data might be an important supplement to population studies. The question is how to access and use this data. In the seventh survey of the Tromsøcohort study, questionnaires and accelerometers were used to collect data on physical activity (PA). We now plan to collect historical PA data from these participants, using mobile sensor data already stored in the cloud. We will examine the feasibility of this approach and the quality of this data. Objectively measured historical data will provide valuable insights in the potential and limitations of mobile sensors as new data collection tools in medical research.}, } @article {pmid29295047, year = {2017}, author = {Cilliers, L and Wright, G}, title = {Electronic Health Records in the Cloud: Improving Primary Health Care Delivery in South Africa.}, journal = {Studies in health technology and informatics}, volume = {245}, number = {}, pages = {35-39}, pmid = {29295047}, issn = {1879-8365}, mesh = {*Delivery of Health Care ; *Electronic Health Records ; Humans ; Primary Health Care ; South Africa ; }, abstract = {In South Africa, the recording of health data is done manually in a paper-based file, while attempts to digitize healthcare records have had limited success. In many countries, Electronic Health Records (EHRs) has developed in silos, with little or no integration between different operational systems. Literature has provided evidence that the cloud can be used to 'leapfrog' some of these implementation issues, but the adoption of this technology in the public health care sector has been very limited. This paper aims to identify the major reasons why the cloud has not been used to implement EHRs for the South African public health care system, and to provide recommendations of how to overcome these challenges. From the literature, it is clear that there are technology, environmental and organisational challenges affecting the implementation of EHRs in the cloud. Four recommendations are provided that can be used by the National Department of Health to implement EHRs making use of the cloud.}, } @article {pmid32218876, year = {2018}, author = {Sareen, S and Sood, SK and Gupta, SK}, title = {IoT-based cloud framework to control Ebola virus outbreak.}, journal = {Journal of ambient intelligence and humanized computing}, volume = {9}, number = {3}, pages = {459-476}, pmid = {32218876}, issn = {1868-5137}, abstract = {Ebola is a deadly infectious virus that spreads very quickly through human-to-human transmission and sometimes death. The continuous detection and remote monitoring of infected patients are required in order to prevent the spread of Ebola virus disease (EVD). Healthcare services based on Internet of Things (IoT) and cloud computing technologies are emerging as a more effective and proactive solution which provides remote continuous monitoring of patients. A novel architecture based on Radio Frequency Identification Device (RFID), wearable sensor technology, and cloud computing infrastructure is proposed for the detection and monitoring of Ebola infected patients. The aim of this work is to prevent the spreading of the infection at the early stage of the outbreak. The J48 decision tree is used to evaluate the level of infection in a user depending on his symptoms. RFID is used to automatically sense the close proximity interactions (CPIs) between users. Temporal Network Analysis (TNA) is applied to describe and monitor the current state of the outbreak using the CPI data. The performance and accuracy of our proposed model are evaluated on Amazon EC2 cloud using synthetic data of two million users. Our proposed model provided 94 % accuracy for the classification and 92 % of the resource utilization.}, } @article {pmid32021700, year = {2018}, author = {Sazib, N and Mladenova, I and Bolten, J}, title = {Leveraging Google Earth Engine for Drought Assessment using Global Soil Moisture Data.}, journal = {Remote sensing}, volume = {10}, number = {8}, pages = {}, pmid = {32021700}, issn = {2072-4292}, support = {N-999999/ImNASA/Intramural NASA/United States ; }, abstract = {Soil moisture is considered a key variable to assess crop and drought conditions. However, readily available soil moisture datasets developed for monitoring agricultural drought conditions are uncommon. The aim of this work is to examine two global soil moisture data sets and a set of soil moisture web-based processing tools developed to demonstrate the value of the soil moisture data for drought monitoring and crop forecasting using Google Earth Engine (GEE). The two global soil moisture data sets discussed in the paper are generated by integrating Soil Moisture Ocean Salinity (SMOS) and Soil Moisture Active Passive (SMAP) satellite-derived observations into the modified two-layer Palmer model using a 1-D Ensemble Kalman Filter (EnKF) data assimilation approach. The web-based tools are designed to explore soil moisture variability as a function of land cover change and to easily estimate drought characteristics such as drought duration and intensity using soil moisture anomalies, and to inter-compare them against alternative drought indicators. To demonstrate the utility of these tools for agricultural drought monitoring, the soil moisture products, vegetation- and precipitation-based products are assessed over drought prone regions in South Africa and Ethiopia. Overall, the 3-month scale Standardized Precipitation Index (SPI) and Normalized Vegetation Index (NDVI) showed higher agreement with the root zone soil moisture anomalies. Soil moisture anomalies exhibited lower drought duration but higher intensity compare to SPIs. Inclusion of the global soil moisture data into GEE data catalog and the development of the web-based tools described in the paper enable a vast diversity of users to quickly and easily assess the impact of drought and improve planning related to drought risk assessment and early warning. GEE also improves the accessibility and usability of the earth observation data and related tools by making them available to a wide range of researchers and the public. In particular, the cloud-based nature of GEE is useful for providing access to the soil moisture data and scripts to users in developing countries that lack adequate observational soil moisture data or the necessary computational resources required to develop them.}, } @article {pmid31061674, year = {2018}, author = {Heintz, D and Gryk, MR}, title = {Curating Scientific Workflows for Biomolecular Nuclear Magnetic Resonance Spectroscopy.}, journal = {International journal of digital curation}, volume = {13}, number = {1}, pages = {286-293}, pmid = {31061674}, issn = {1746-8256}, support = {P41 GM111135/GM/NIGMS NIH HHS/United States ; }, abstract = {This paper describes our recent and ongoing efforts for enhancing the curation of scientific workflows to improve reproducibility and reusability of biomolecular nuclear magnetic resonance (bioNMR) data. Our efforts have focused on both developing a workflow management system, called CONNJUR Workflow Builder (CWB), as well as refactoring our workflow data model to make use of the PREMIS model for digital preservation. This revised workflow management system will be available through the NMRbox cloud-computing platform for bioNMR. In addition, we are implementing a new file structure which bundles the original binary data files along with PREMIS XML records describing the provenance of the data. These are packaged together using a standardized file archive utility. In this manner, the provenance and data curation information is maintained together along with the scientific data. The benefits and limitations of these approaches as well as future directions are discussed.}, } @article {pmid29280752, year = {2018}, author = {Gorman, D and Kashner, TM}, title = {Medical Graduates, Truthful and Useful Analytics With Big Data, and the Art of Persuasion.}, journal = {Academic medicine : journal of the Association of American Medical Colleges}, volume = {93}, number = {8}, pages = {1113-1116}, doi = {10.1097/ACM.0000000000002109}, pmid = {29280752}, issn = {1938-808X}, mesh = {Benchmarking/methods ; *Big Data ; Clinical Competence/standards ; Evidence-Based Medicine/*education/methods ; Humans ; Needs Assessment ; Outcome Assessment, Health Care/standards ; Persuasive Communication ; }, abstract = {The authors propose that the provision of state-of-the-art, effective, safe, and affordable health care requires medical school graduates not only to be competent practitioners and scientists but also to be policy makers and professional leaders. To meet this challenge in the era of big data and cloud computing, these graduates must be able to understand and critically interpret analyses of large, observational datasets from electronic health records, third-party claims files, surveys, and epidemiologic health datasets.The authors contend that medical students need to be exposed to three components. First, students should be familiar with outcome metrics that not only are scientifically valid but also are robust, useful for the medical community, understandable to patients and relevant to their preferences and health goals, and persuasive to health administrators and policy decision makers. Next, students must interact with an inclusive set of analysts including biostatisticians, mathematical and computational statisticians, econometrists, psychometricians, epidemiologists, informaticians, and qualitative researchers. Last, students should learn in environments in which data analyses are not static with a "one-size-fits-all" solution but, rather, where mathematical and computer scientists provide new, innovative, and effective ways of solving predictable and commonplace data limitations such as missing data; make causal inferences from nonrandomized studies and/or those with selection biases; and estimate effect size when patient outcomes are heterogeneous and surveys have low response rates.}, } @article {pmid29279292, year = {2018}, author = {Kovacs, MD and Sheafor, DH and Thacker, PG and Hardie, AD and Costello, P}, title = {Metrix Matrix: A Cloud-Based System for Tracking Non-Relative Value Unit Value-Added Work Metrics.}, journal = {Journal of the American College of Radiology : JACR}, volume = {15}, number = {3 Pt A}, pages = {415-421}, doi = {10.1016/j.jacr.2017.10.028}, pmid = {29279292}, issn = {1558-349X}, mesh = {*Cloud Computing ; *Efficiency, Organizational ; Humans ; Radiologists/*statistics & numerical data ; *Relative Value Scales ; Workload/*statistics & numerical data ; }, abstract = {PURPOSE: In the era of value-based medicine, it will become increasingly important for radiologists to provide metrics that demonstrate their value beyond clinical productivity. In this article the authors describe their institution's development of an easy-to-use system for tracking value-added but non-relative value unit (RVU)-based activities.

METHODS: Metrix Matrix is an efficient cloud-based system for tracking value-added work. A password-protected home page contains links to web-based forms created using Google Forms, with collected data populating Google Sheets spreadsheets. Value-added work metrics selected for tracking included interdisciplinary conferences, hospital committee meetings, consulting on nonbilled outside studies, and practice-based quality improvement. Over a period of 4 months, value-added work data were collected for all clinical attending faculty members in a university-based radiology department (n = 39). Time required for data entry was analyzed for 2 faculty members over the same time period.

RESULTS: Thirty-nine faculty members (equivalent to 36.4 full-time equivalents) reported a total of 1,223.5 hours of value-added work time (VAWT). A formula was used to calculate "value-added RVUs" (vRVUs) from VAWT. VAWT amounted to 5,793.6 vRVUs or 6.0% of total work performed (vRVUs plus work RVUs [wRVUs]). Were vRVUs considered equivalent to wRVUs for staffing purposes, this would require an additional 2.3 full-time equivalents, on the basis of average wRVU calculations. Mean data entry time was 56.1 seconds per day per faculty member.

CONCLUSIONS: As health care reimbursement evolves with an emphasis on value-based medicine, it is imperative that radiologists demonstrate the value they add to patient care beyond wRVUs. This free and easy-to-use cloud-based system allows the efficient quantification of value-added work activities.}, } @article {pmid29278370, year = {2017}, author = {Agirre, A and Armentia, A and Estévez, E and Marcos, M}, title = {A Component-Based Approach for Securing Indoor Home Care Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {18}, number = {1}, pages = {}, pmid = {29278370}, issn = {1424-8220}, abstract = {eHealth systems have adopted recent advances on sensing technologies together with advances in information and communication technologies (ICT) in order to provide people-centered services that improve the quality of life of an increasingly elderly population. As these eHealth services are founded on the acquisition and processing of sensitive data (e.g., personal details, diagnosis, treatments and medical history), any security threat would damage the public's confidence in them. This paper proposes a solution for the design and runtime management of indoor eHealth applications with security requirements. The proposal allows applications definition customized to patient particularities, including the early detection of health deterioration and suitable reaction (events) as well as security needs. At runtime, security support is twofold. A secured component-based platform supervises applications execution and provides events management, whilst the security of the communications among application components is also guaranteed. Additionally, the proposed event management scheme adopts the fog computing paradigm to enable local event related data storage and processing, thus saving communication bandwidth when communicating with the cloud. As a proof of concept, this proposal has been validated through the monitoring of the health status in diabetic patients at a nursing home.}, } @article {pmid29256144, year = {2017}, author = {Sarinho, VT and Mota, AO and Silva, EP}, title = {Towards an e-Health Cloud Solution for Remote Regions at Bahia-Brazil.}, journal = {Journal of medical systems}, volume = {42}, number = {2}, pages = {23}, pmid = {29256144}, issn = {1573-689X}, mesh = {Brazil ; *Cloud Computing ; *Health Services Administration ; Humans ; Information Systems/*organization & administration ; Management Information Systems ; }, abstract = {This paper presents CloudMedic, an e-Health Cloud solution that manages health care services in remote regions of Bahia-Brazil. For that, six main modules: Clinic, Hospital, Supply, Administrative, Billing and Health Business Intelligence, were developed to control the health flow among health actors at health institutions. They provided database model and procedures for health business rules, a standard gateway for data maintenance between web views and database layer, and a multi-front-end framework based on web views and web commands configurations. These resources were used by 2042 health actors in 261 health posts covering health demands from 118 municipalities at Bahia state. They also managed approximately 2.4 million health service 'orders and approximately 13.5 million health exams for more than 1.3 million registered patients. As a result, a collection of health functionalities available in a cloud infrastructure was successfully developed, deployed and validated in more than 28% of Bahia municipalities. A viable e-Health Cloud solution that, despite municipality limitations in remote regions, decentralized and improved the access to health care services at Bahia state.}, } @article {pmid29231378, year = {2016}, author = {Long, Y and Liu, B and Mao, S and Ma, L}, title = {[Review and prospect of the standardization of acupuncture and moxibustion in China].}, journal = {Zhongguo zhen jiu = Chinese acupuncture & moxibustion}, volume = {36}, number = {12}, pages = {1337-1340}, doi = {10.13703/j.0255-2930.2016.12.031}, pmid = {29231378}, issn = {0255-2930}, mesh = {Acupuncture Therapy/*standards ; China ; Humans ; Moxibustion/*standards ; }, abstract = {Literature,achievements and other materials on acupuncture-moxibustion standardization research in recent years were collected and summarized domestically and overseas. The process of acupuncture-moxibustion standardization in China was reviewed through two phases. Also,we compared domestic acupuncture-moxibustion standardization study with the foreign one. It is considered that its domestic development is sound but challenged by world competition in its internationalization. Going forward,acupuncture-moxibustion standardization system is needed to be completed and become international. Contemporary techniques should be used including big data,artificial intelligence,internet of things,and cloud computing,etc., and its application engineering research will be improved so as to drive Chinese medicine modernization.}, } @article {pmid29228186, year = {2018}, author = {Luber, JM and Tierney, BT and Cofer, EM and Patel, CJ and Kostic, AD}, title = {Aether: leveraging linear programming for optimal cloud computing in genomics.}, journal = {Bioinformatics (Oxford, England)}, volume = {34}, number = {9}, pages = {1565-1567}, pmid = {29228186}, issn = {1367-4811}, support = {P30 DK036836/DK/NIDDK NIH HHS/United States ; R00 ES023504/ES/NIEHS NIH HHS/United States ; R21 ES025052/ES/NIEHS NIH HHS/United States ; T32 HG002295/HG/NHGRI NIH HHS/United States ; }, mesh = {*Cloud Computing ; Genomics/*methods ; *Programming, Linear ; *Software ; }, abstract = {MOTIVATION: Across biology, we are seeing rapid developments in scale of data production without a corresponding increase in data analysis capabilities.

RESULTS: Here, we present Aether (http://aether.kosticlab.org), an intuitive, easy-to-use, cost-effective and scalable framework that uses linear programming to optimally bid on and deploy combinations of underutilized cloud computing resources. Our approach simultaneously minimizes the cost of data analysis and provides an easy transition from users' existing HPC pipelines.

Data utilized are available at https://pubs.broadinstitute.org/diabimmune and with EBI SRA accession ERP005989. Source code is available at (https://github.com/kosticlab/aether). Examples, documentation and a tutorial are available at http://aether.kosticlab.org.

CONTACT: chirag_patel@hms.harvard.edu or aleksandar.kostic@joslin.harvard.edu.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid29220078, year = {2017}, author = {Malhotra, R and Seth, I and Lehnert, E and Zhao, J and Kaushik, G and Williams, EH and Sethi, A and Davis-Dusenbery, BN}, title = {Using the Seven Bridges Cancer Genomics Cloud to Access and Analyze Petabytes of Cancer Data.}, journal = {Current protocols in bioinformatics}, volume = {60}, number = {}, pages = {11.16.1-11.16.32}, pmid = {29220078}, issn = {1934-340X}, support = {HHSN261201400008C/CA/NCI NIH HHS/United States ; }, mesh = {Cloud Computing ; Computational Biology ; Data Interpretation, Statistical ; Databases, Genetic/*statistics & numerical data ; Genomics ; High-Throughput Nucleotide Sequencing ; Humans ; Metadata ; Neoplasms/*genetics ; Pilot Projects ; }, abstract = {Next-generation sequencing has produced petabytes of data, but accessing and analyzing these data remain challenging. Traditionally, researchers investigating public datasets like The Cancer Genome Atlas (TCGA) would download the data to a high-performance cluster, which could take several weeks even with a highly optimized network connection. The National Cancer Institute (NCI) initiated the Cancer Genomics Cloud Pilots program to provide researchers with the resources to process data with cloud computational resources. We present protocols using one of these Cloud Pilots, the Seven Bridges Cancer Genomics Cloud (CGC), to find and query public datasets, bring your own data to the CGC, analyze data using standard or custom workflows, and benchmark tools for accuracy with interactive analysis features. These protocols demonstrate that the CGC is a data-analysis ecosystem that fully empowers researchers with a variety of areas of expertise and interests to collaborate in the analysis of petabytes of data. © 2017 by John Wiley & Sons, Inc.}, } @article {pmid29218873, year = {2018}, author = {Verma, SS and Verma, A and Basile, AO and Bishop, MB and Darabos, C}, title = {Session Introduction: Challenges of Pattern Recognition in Biomedical Data.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {23}, number = {}, pages = {104-110}, pmid = {29218873}, issn = {2335-6936}, abstract = {The analysis of large biomedical data often presents with various challenges related to not just the size of the data, but also to data quality issues such as heterogeneity, multidimensionality, noisiness, and incompleteness of the data. The data-intensive nature of computational genomics problems in biomedical informatics warrants the development and use of massive computer infrastructure and advanced software tools and platforms, including but not limited to the use of cloud computing. Our session aims to address these challenges in handling big data for designing a study, performing analysis, and interpreting outcomes of these analyses. These challenges have been prevalent in many studies including those which focus on the identification of novel genetic variant-phenotype associations using data from sources like Electronic Health Records (EHRs) or multi-omic data. One of the biggest challenges to focus on is the imperfect nature of the biomedical data where a lot of noise and sparseness is observed. In our session, we will present research articles that can help in identifying innovative ways to recognize and overcome newly arising challenges associated with pattern recognition in biomedical data.}, } @article {pmid29208055, year = {2018}, author = {Mellerup, E and Jørgensen, MB and Dam, H and Møller, GL}, title = {Combinations of SNP genotypes from the Wellcome Trust Case Control Study of bipolar patients.}, journal = {Acta neuropsychiatrica}, volume = {30}, number = {2}, pages = {106-110}, doi = {10.1017/neu.2017.36}, pmid = {29208055}, issn = {1601-5215}, mesh = {Bipolar Disorder/*genetics ; Case-Control Studies ; Cluster Analysis ; Data Mining ; *Genetic Predisposition to Disease ; Genotype ; Humans ; *Polymorphism, Single Nucleotide ; Risk Factors ; }, abstract = {OBJECTIVES: Combinations of genetic variants are the basis for polygenic disorders. We examined combinations of SNP genotypes taken from the 446 729 SNPs in The Wellcome Trust Case Control Study of bipolar patients.

METHODS: Parallel computing by graphics processing units, cloud computing, and data mining tools were used to scan The Wellcome Trust data set for combinations.

RESULTS: Two clusters of combinations were significantly associated with bipolar disorder. One cluster contained 68 combinations, each of which included five SNP genotypes. Of the 1998 patients, 305 had combinations from this cluster in their genome, but none of the 1500 controls had any of these combinations in their genome. The other cluster contained six combinations, each of which included five SNP genotypes. Of the 1998 patients, 515 had combinations from the cluster in their genome, but none of the 1500 controls had any of these combinations in their genome.

CONCLUSION: Clusters of combinations of genetic variants can be considered general risk factors for polygenic disorders, whereas accumulation of combinations from the clusters in the genome of a patient can be considered a personal risk factor.}, } @article {pmid29207509, year = {2017}, author = {Lin, TH and Tsung, CK and Lee, TF and Wang, ZB}, title = {A Round-Efficient Authenticated Key Agreement Scheme Based on Extended Chaotic Maps for Group Cloud Meeting.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {12}, pages = {}, pmid = {29207509}, issn = {1424-8220}, abstract = {The security is a critical issue for business purposes. For example, the cloud meeting must consider strong security to maintain the communication privacy. Considering the scenario with cloud meeting, we apply extended chaotic map to present passwordless group authentication key agreement, termed as Passwordless Group Authentication Key Agreement (PL-GAKA). PL-GAKA improves the computation efficiency for the simple group password-based authenticated key agreement (SGPAKE) proposed by Lee et al. in terms of computing the session key. Since the extended chaotic map has equivalent security level to the Diffie-Hellman key exchange scheme applied by SGPAKE, the security of PL-GAKA is not sacrificed when improving the computation efficiency. Moreover, PL-GAKA is a passwordless scheme, so the password maintenance is not necessary. Short-term authentication is considered, hence the communication security is stronger than other protocols by dynamically generating session key in each cloud meeting. In our analysis, we first prove that each meeting member can get the correct information during the meeting. We analyze common security issues for the proposed PL-GAKA in terms of session key security, mutual authentication, perfect forward security, and data integrity. Moreover, we also demonstrate that communicating in PL-GAKA is secure when suffering replay attacks, impersonation attacks, privileged insider attacks, and stolen-verifier attacks. Eventually, an overall comparison is given to show the performance between PL-GAKA, SGPAKE and related solutions.}, } @article {pmid29202689, year = {2017}, author = {Jalili, V and Matteucci, M and Masseroli, M and Ceri, S}, title = {Explorative visual analytics on interval-based genomic data and their metadata.}, journal = {BMC bioinformatics}, volume = {18}, number = {1}, pages = {536}, pmid = {29202689}, issn = {1471-2105}, mesh = {A549 Cells ; *Databases, Genetic ; Dexamethasone/pharmacology ; Ethanol/pharmacology ; Genomics/*methods ; Humans ; *Metadata ; Models, Theoretical ; Pattern Recognition, Automated ; Protein Interaction Mapping ; Software ; }, abstract = {BACKGROUND: With the wide-spreading of public repositories of NGS processed data, the availability of user-friendly and effective tools for data exploration, analysis and visualization is becoming very relevant. These tools enable interactive analytics, an exploratory approach for the seamless "sense-making" of data through on-the-fly integration of analysis and visualization phases, suggested not only for evaluating processing results, but also for designing and adapting NGS data analysis pipelines.

RESULTS: This paper presents abstractions for supporting the early analysis of NGS processed data and their implementation in an associated tool, named GenoMetric Space Explorer (GeMSE). This tool serves the needs of the GenoMetric Query Language, an innovative cloud-based system for computing complex queries over heterogeneous processed data. It can also be used starting from any text files in standard BED, BroadPeak, NarrowPeak, GTF, or general tab-delimited format, containing numerical features of genomic regions; metadata can be provided as text files in tab-delimited attribute-value format. GeMSE allows interactive analytics, consisting of on-the-fly cycling among steps of data exploration, analysis and visualization that help biologists and bioinformaticians in making sense of heterogeneous genomic datasets. By means of an explorative interaction support, users can trace past activities and quickly recover their results, seamlessly going backward and forward in the analysis steps and comparative visualizations of heatmaps.

CONCLUSIONS: GeMSE effective application and practical usefulness is demonstrated through significant use cases of biological interest. GeMSE is available at http://www.bioinformatics.deib.polimi.it/GeMSE/ , and its source code is available at https://github.com/Genometric/GeMSE under GPLv3 open-source license.}, } @article {pmid29194413, year = {2017}, author = {Yang, X and Wu, C and Lu, K and Fang, L and Zhang, Y and Li, S and Guo, G and Du, Y}, title = {An Interface for Biomedical Big Data Processing on the Tianhe-2 Supercomputer.}, journal = {Molecules (Basel, Switzerland)}, volume = {22}, number = {12}, pages = {}, pmid = {29194413}, issn = {1420-3049}, mesh = {Cloud Computing ; Computers ; *Computing Methodologies ; Humans ; *Software ; }, abstract = {Big data, cloud computing, and high-performance computing (HPC) are at the verge of convergence. Cloud computing is already playing an active part in big data processing with the help of big data frameworks like Hadoop and Spark. The recent upsurge of high-performance computing in China provides extra possibilities and capacity to address the challenges associated with big data. In this paper, we propose Orion-a big data interface on the Tianhe-2 supercomputer-to enable big data applications to run on Tianhe-2 via a single command or a shell script. Orion supports multiple users, and each user can launch multiple tasks. It minimizes the effort needed to initiate big data applications on the Tianhe-2 supercomputer via automated configuration. Orion follows the "allocate-when-needed" paradigm, and it avoids the idle occupation of computational resources. We tested the utility and performance of Orion using a big genomic dataset and achieved a satisfactory performance on Tianhe-2 with very few modifications to existing applications that were implemented in Hadoop/Spark. In summary, Orion provides a practical and economical interface for big data processing on Tianhe-2.}, } @article {pmid31069047, year = {2017}, author = {Agafonov, A and Mattila, K and Tuan, CD and Tiede, L and Raknes, IA and Bongo, LA}, title = {META-pipe cloud setup and execution.}, journal = {F1000Research}, volume = {6}, number = {}, pages = {}, pmid = {31069047}, issn = {2046-1402}, abstract = {META-pipe is a complete service for the analysis of marine metagenomic data. It provides assembly of high-throughput sequence data, functional annotation of predicted genes, and taxonomic profiling. The functional annotation is computationally demanding and is therefore currently run on a high-performance computing cluster in Norway. However, additional compute resources are necessary to open the service to all ELIXIR users. We describe our approach for setting up and executing the functional analysis of META-pipe on additional academic and commercial clouds. Our goal is to provide a powerful analysis service that is easy to use and to maintain. Our design therefore uses a distributed architecture where we combine central servers with multiple distributed backends that execute the computationally intensive jobs. We believe our experiences developing and operating META-pipe provides a useful model for others that plan to provide a portal based data analysis service in ELIXIR and other organizations with geographically distributed compute and storage resources.}, } @article {pmid29175431, year = {2018}, author = {Liu, L and Liu, L and Fu, X and Huang, Q and Zhang, X and Zhang, Y}, title = {A cloud-based framework for large-scale traditional Chinese medical record retrieval.}, journal = {Journal of biomedical informatics}, volume = {77}, number = {}, pages = {21-33}, doi = {10.1016/j.jbi.2017.11.013}, pmid = {29175431}, issn = {1532-0480}, mesh = {Algorithms ; China ; *Cloud Computing ; *Electronic Health Records ; Humans ; Information Storage and Retrieval/*methods ; Medical Informatics/methods ; Semantics ; }, abstract = {INTRODUCTION: Electronic medical records are increasingly common in medical practice. The secondary use of medical records has become increasingly important. It relies on the ability to retrieve the complete information about desired patient populations. How to effectively and accurately retrieve relevant medical records from large- scale medical big data is becoming a big challenge. Therefore, we propose an efficient and robust framework based on cloud for large-scale Traditional Chinese Medical Records (TCMRs) retrieval.

METHODS: We propose a parallel index building method and build a distributed search cluster, the former is used to improve the performance of index building, and the latter is used to provide high concurrent online TCMRs retrieval. Then, a real-time multi-indexing model is proposed to ensure the latest relevant TCMRs are indexed and retrieved in real-time, and a semantics-based query expansion method and a multi- factor ranking model are proposed to improve retrieval quality. Third, we implement a template-based visualization method for displaying medical reports.

RESULTS: The proposed parallel indexing method and distributed search cluster can improve the performance of index building and provide high concurrent online TCMRs retrieval. The multi-indexing model can ensure the latest relevant TCMRs are indexed and retrieved in real-time. The semantics expansion method and the multi-factor ranking model can enhance retrieval quality. The template-based visualization method can enhance the availability and universality, where the medical reports are displayed via friendly web interface.

CONCLUSIONS: In conclusion, compared with the current medical record retrieval systems, our system provides some advantages that are useful in improving the secondary use of large-scale traditional Chinese medical records in cloud environment. The proposed system is more easily integrated with existing clinical systems and be used in various scenarios.}, } @article {pmid29164544, year = {2017}, author = {Chao, LF and Huang, HP and Ni, LF and Tsai, CL and Huang, TY}, title = {[Construction and Application of Innovative Education Technology Strategies in Nursing].}, journal = {Hu li za zhi The journal of nursing}, volume = {64}, number = {6}, pages = {26-33}, doi = {10.6224/JN.000080}, pmid = {29164544}, issn = {0047-262X}, mesh = {Computer-Assisted Instruction ; *Education, Nursing ; *Educational Technology ; Humans ; Virtual Reality ; }, abstract = {The evolution of information and communication technologies has deeply impacted education reform, promoted the development of digital-learning models, and stimulated the development of diverse nursing education strategies in order to better fulfill needs and expand in new directions. The present paper introduces the intelligent-learning resources that are available for basic medical science education, problem-based learning, nursing scenario-based learning, objective structured clinical examinations, and other similar activities in the Department of Nursing at Chang Gung University of Science and Technology. The program is offered in two parts: specialized classroom facilities and cloud computing / mobile-learning. The latter includes high-fidelity simulation classrooms, online e-books, and virtual interactive simulation and augmented reality mobile-learning materials, which are provided through multimedia technology development, learning management systems, web-certificated examinations, and automated teaching and learning feedback mechanisms. It is expected that the teaching experiences that are shared in this article may be used as a reference for applying professional wisdom teaching models into nursing education.}, } @article {pmid29164340, year = {2017}, author = {Ahmad, A and Asif, A and Rajpoot, N and Arif, M and Minhas, FUAA}, title = {Correlation Filters for Detection of Cellular Nuclei in Histopathology Images.}, journal = {Journal of medical systems}, volume = {42}, number = {1}, pages = {7}, pmid = {29164340}, issn = {1573-689X}, mesh = {Cell Nucleus/*pathology ; Fourier Analysis ; Humans ; Image Interpretation, Computer-Assisted/*methods ; *Machine Learning ; }, abstract = {UNLABELLED: Nuclei detection in histology images is an essential part of computer aided diagnosis of cancers and tumors. It is a challenging task due to diverse and complicated structures of cells. In this work, we present an automated technique for detection of cellular nuclei in hematoxylin and eosin stained histopathology images. Our proposed approach is based on kernelized correlation filters. Correlation filters have been widely used in object detection and tracking applications but their strength has not been explored in the medical imaging domain up till now. Our experimental results show that the proposed scheme gives state of the art accuracy and can learn complex nuclear morphologies. Like deep learning approaches, the proposed filters do not require engineering of image features as they can operate directly on histopathology images without significant preprocessing. However, unlike deep learning methods, the large-margin correlation filters developed in this work are interpretable, computationally efficient and do not require specialized or expensive computing hardware.

AVAILABILITY: A cloud based webserver of the proposed method and its python implementation can be accessed at the following URL: http://faculty.pieas.edu.pk/fayyaz/software.html#corehist .}, } @article {pmid29163119, year = {2017}, author = {Madhyastha, TM and Koh, N and Day, TKM and Hernández-Fernández, M and Kelley, A and Peterson, DJ and Rajan, S and Woelfer, KA and Wolf, J and Grabowski, TJ}, title = {Running Neuroimaging Applications on Amazon Web Services: How, When, and at What Cost?.}, journal = {Frontiers in neuroinformatics}, volume = {11}, number = {}, pages = {63}, pmid = {29163119}, issn = {1662-5196}, support = {P50 AG005136/AG/NIA NIH HHS/United States ; P50 NS062684/NS/NINDS NIH HHS/United States ; U54 HD083091/HD/NICHD NIH HHS/United States ; }, abstract = {The contribution of this paper is to identify and describe current best practices for using Amazon Web Services (AWS) to execute neuroimaging workflows "in the cloud." Neuroimaging offers a vast set of techniques by which to interrogate the structure and function of the living brain. However, many of the scientists for whom neuroimaging is an extremely important tool have limited training in parallel computation. At the same time, the field is experiencing a surge in computational demands, driven by a combination of data-sharing efforts, improvements in scanner technology that allow acquisition of images with higher image resolution, and by the desire to use statistical techniques that stress processing requirements. Most neuroimaging workflows can be executed as independent parallel jobs and are therefore excellent candidates for running on AWS, but the overhead of learning to do so and determining whether it is worth the cost can be prohibitive. In this paper we describe how to identify neuroimaging workloads that are appropriate for running on AWS, how to benchmark execution time, and how to estimate cost of running on AWS. By benchmarking common neuroimaging applications, we show that cloud computing can be a viable alternative to on-premises hardware. We present guidelines that neuroimaging labs can use to provide a cluster-on-demand type of service that should be familiar to users, and scripts to estimate cost and create such a cluster.}, } @article {pmid29043062, year = {2017}, author = {van Rijswijk, M and Beirnaert, C and Caron, C and Cascante, M and Dominguez, V and Dunn, WB and Ebbels, TMD and Giacomoni, F and Gonzalez-Beltran, A and Hankemeier, T and Haug, K and Izquierdo-Garcia, JL and Jimenez, RC and Jourdan, F and Kale, N and Klapa, MI and Kohlbacher, O and Koort, K and Kultima, K and Le Corguillé, G and Moreno, P and Moschonas, NK and Neumann, S and O'Donovan, C and Reczko, M and Rocca-Serra, P and Rosato, A and Salek, RM and Sansone, SA and Satagopam, V and Schober, D and Shimmo, R and Spicer, RA and Spjuth, O and Thévenot, EA and Viant, MR and Weber, RJM and Willighagen, EL and Zanetti, G and Steinbeck, C}, title = {The future of metabolomics in ELIXIR.}, journal = {F1000Research}, volume = {6}, number = {}, pages = {}, pmid = {29043062}, issn = {2046-1402}, support = {BB/H024921/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/I000771/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; MR/M009157/1/MRC_/Medical Research Council/United Kingdom ; }, abstract = {Metabolomics, the youngest of the major omics technologies, is supported by an active community of researchers and infrastructure developers across Europe. To coordinate and focus efforts around infrastructure building for metabolomics within Europe, a workshop on the "Future of metabolomics in ELIXIR" was organised at Frankfurt Airport in Germany. This one-day strategic workshop involved representatives of ELIXIR Nodes, members of the PhenoMeNal consortium developing an e-infrastructure that supports workflow-based metabolomics analysis pipelines, and experts from the international metabolomics community. The workshop established metabolite identification as the critical area, where a maximal impact of computational metabolomics and data management on other fields could be achieved. In particular, the existing four ELIXIR Use Cases, where the metabolomics community - both industry and academia - would benefit most, and which could be exhaustively mapped onto the current five ELIXIR Platforms were discussed. This opinion article is a call for support for a new ELIXIR metabolomics Use Case, which aligns with and complements the existing and planned ELIXIR Platforms and Use Cases.}, } @article {pmid29131508, year = {2018}, author = {Ahlstrand, E and Buetti-Dinh, A and Friedman, R}, title = {An interactive computer lab of the galvanic cell for students in biochemistry.}, journal = {Biochemistry and molecular biology education : a bimonthly publication of the International Union of Biochemistry and Molecular Biology}, volume = {46}, number = {1}, pages = {58-65}, doi = {10.1002/bmb.21091}, pmid = {29131508}, issn = {1539-3429}, mesh = {Biochemistry/*education ; Electrochemical Techniques/*instrumentation ; Humans ; *Laboratories ; Learning ; *Software ; *Students ; Thermodynamics ; }, abstract = {We describe an interactive module that can be used to teach basic concepts in electrochemistry and thermodynamics to first year natural science students. The module is used together with an experimental laboratory and improves the students' understanding of thermodynamic quantities such as Δr G, Δr H, and Δr S that are calculated but not directly measured in the lab. We also discuss how new technologies can substitute some parts of experimental chemistry courses, and improve accessibility to course material. Cloud computing platforms such as CoCalc facilitate the distribution of computer codes and allow students to access and apply interactive course tools beyond the course's scope. Despite some limitations imposed by cloud computing, the students appreciated the approach and the enhanced opportunities to discuss study questions with their classmates and instructor as facilitated by the interactive tools. © 2017 by The International Union of Biochemistry and Molecular Biology, 46(1):58-65, 2018.}, } @article {pmid29126246, year = {2018}, author = {Polanski, K and Gao, B and Mason, SA and Brown, P and Ott, S and Denby, KJ and Wild, DL}, title = {Bringing numerous methods for expression and promoter analysis to a public cloud computing service.}, journal = {Bioinformatics (Oxford, England)}, volume = {34}, number = {5}, pages = {884-886}, pmid = {29126246}, issn = {1367-4811}, support = {BB/M018431/1//Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {Algorithms ; *Cloud Computing ; Computational Biology/*methods ; Gene Expression Profiling/methods ; *Gene Expression Regulation ; *Promoter Regions, Genetic ; Sequence Analysis, DNA/methods ; Sequence Analysis, RNA/methods ; *Software ; }, abstract = {SUMMARY: Every year, a large number of novel algorithms are introduced to the scientific community for a myriad of applications, but using these across different research groups is often troublesome, due to suboptimal implementations and specific dependency requirements. This does not have to be the case, as public cloud computing services can easily house tractable implementations within self-contained dependency environments, making the methods easily accessible to a wider public. We have taken 14 popular methods, the majority related to expression data or promoter analysis, developed these up to a good implementation standard and housed the tools in isolated Docker containers which we integrated into the CyVerse Discovery Environment, making these easily usable for a wide community as part of the CyVerse UK project.

The integrated apps can be found at http://www.cyverse.org/discovery-environment, while the raw code is available at https://github.com/cyversewarwick and the corresponding Docker images are housed at https://hub.docker.com/r/cyversewarwick/.

CONTACT: info@cyverse.warwick.ac.uk or D.L.Wild@warwick.ac.uk.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid29123534, year = {2017}, author = {Jones, S and Baizan-Edge, A and MacFarlane, S and Torrance, L}, title = {Viral Diagnostics in Plants Using Next Generation Sequencing: Computational Analysis in Practice.}, journal = {Frontiers in plant science}, volume = {8}, number = {}, pages = {1770}, pmid = {29123534}, issn = {1664-462X}, abstract = {Viruses cause significant yield and quality losses in a wide variety of cultivated crops. Hence, the detection and identification of viruses is a crucial facet of successful crop production and of great significance in terms of world food security. Whilst the adoption of molecular techniques such as RT-PCR has increased the speed and accuracy of viral diagnostics, such techniques only allow the detection of known viruses, i.e., each test is specific to one or a small number of related viruses. Therefore, unknown viruses can be missed and testing can be slow and expensive if molecular tests are unavailable. Methods for simultaneous detection of multiple viruses have been developed, and (NGS) is now a principal focus of this area, as it enables unbiased and hypothesis-free testing of plant samples. The development of NGS protocols capable of detecting multiple known and emergent viruses present in infected material is proving to be a major advance for crops, nuclear stocks or imported plants and germplasm, in which disease symptoms are absent, unspecific or only triggered by multiple viruses. Researchers want to answer the question "how many different viruses are present in this crop plant?" without knowing what they are looking for: RNA-sequencing (RNA-seq) of plant material allows this question to be addressed. As well as needing efficient nucleic acid extraction and enrichment protocols, virus detection using RNA-seq requires fast and robust bioinformatics methods to enable host sequence removal and virus classification. In this review recent studies that use RNA-seq for virus detection in a variety of crop plants are discussed with specific emphasis on the computational methods implemented. The main features of a number of specific bioinformatics workflows developed for virus detection from NGS data are also outlined and possible reasons why these have not yet been widely adopted are discussed. The review concludes by discussing the future directions of this field, including the use of bioinformatics tools for virus detection deployed in analytical environments using cloud computing.}, } @article {pmid29106455, year = {2018}, author = {Maarala, AI and Bzhalava, Z and Dillner, J and Heljanko, K and Bzhalava, D}, title = {ViraPipe: scalable parallel pipeline for viral metagenome analysis from next generation sequencing reads.}, journal = {Bioinformatics (Oxford, England)}, volume = {34}, number = {6}, pages = {928-935}, doi = {10.1093/bioinformatics/btx702}, pmid = {29106455}, issn = {1367-4811}, mesh = {Algorithms ; Computers ; *Genome, Viral ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Metagenome ; Metagenomics/*methods ; Microbiota/genetics ; Sequence Analysis, DNA/methods ; Sequence Analysis, RNA/methods ; *Software ; Viruses/*genetics ; }, abstract = {MOTIVATION: Next Generation Sequencing (NGS) technology enables identification of microbial genomes from massive amount of human microbiomes more rapidly and cheaper than ever before. However, the traditional sequential genome analysis algorithms, tools, and platforms are inefficient for performing large-scale metagenomic studies on ever-growing sample data volumes. Currently, there is an urgent need for scalable analysis pipelines that enable harnessing all the power of parallel computation in computing clusters and in cloud computing environments. We propose ViraPipe, a scalable metagenome analysis pipeline that is able to analyze thousands of human microbiomes in parallel in tolerable time. The pipeline is tuned for analyzing viral metagenomes and the software is applicable for other metagenomic analyses as well. ViraPipe integrates parallel BWA-MEM read aligner, MegaHit De novo assembler, and BLAST and HMMER3 sequence search tools. We show the scalability of ViraPipe by running experiments on mining virus related genomes from NGS datasets in a distributed Spark computing cluster.

RESULTS: ViraPipe analyses 768 human samples in 210 minutes on a Spark computing cluster comprising 23 nodes and 1288 cores in total. The speedup of ViraPipe executed on 23 nodes was 11x compared to the sequential analysis pipeline executed on a single node. The whole process includes parallel decompression, read interleaving, BWA-MEM read alignment, filtering and normalizing of non-human reads, De novo contigs assembling, and searching of sequences with BLAST and HMMER3 tools.

CONTACT: ilari.maarala@aalto.fi.

https://github.com/NGSeq/ViraPipe.}, } @article {pmid29100109, year = {2018}, author = {Baldwin, PR and Tan, YZ and Eng, ET and Rice, WJ and Noble, AJ and Negro, CJ and Cianfrocco, MA and Potter, CS and Carragher, B}, title = {Big data in cryoEM: automated collection, processing and accessibility of EM data.}, journal = {Current opinion in microbiology}, volume = {43}, number = {}, pages = {1-8}, pmid = {29100109}, issn = {1879-0364}, support = {P41 GM103310/GM/NIGMS NIH HHS/United States ; R01 GM099678/GM/NIGMS NIH HHS/United States ; }, mesh = {Automation, Laboratory/instrumentation ; *Big Data ; Cloud Computing ; Cryoelectron Microscopy/*methods/statistics & numerical data ; }, abstract = {The scope and complexity of cryogenic electron microscopy (cryoEM) data has greatly increased, and will continue to do so, due to recent and ongoing technical breakthroughs that have led to much improved resolutions for macromolecular structures solved using this method. This big data explosion includes single particle data as well as tomographic tilt series, both generally acquired as direct detector movies of ∼10-100 frames per image or per tilt-series. We provide a brief survey of the developments leading to the current status, and describe existing cryoEM pipelines, with an emphasis on the scope of data acquisition, methods for automation, and use of cloud storage and computing.}, } @article {pmid29092928, year = {2017}, author = {Reynolds, SM and Miller, M and Lee, P and Leinonen, K and Paquette, SM and Rodebaugh, Z and Hahn, A and Gibbs, DL and Slagel, J and Longabaugh, WJ and Dhankani, V and Reyes, M and Pihl, T and Backus, M and Bookman, M and Deflaux, N and Bingham, J and Pot, D and Shmulevich, I}, title = {The ISB Cancer Genomics Cloud: A Flexible Cloud-Based Platform for Cancer Genomics Research.}, journal = {Cancer research}, volume = {77}, number = {21}, pages = {e7-e10}, pmid = {29092928}, issn = {1538-7445}, support = {HHSN261201400007C/CA/NCI NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Computational Biology ; Datasets as Topic ; Genome, Human ; *Genomics ; Humans ; Internet ; National Cancer Institute (U.S.) ; Neoplasms/*genetics ; Research/trends ; Software ; United States ; }, abstract = {The ISB Cancer Genomics Cloud (ISB-CGC) is one of three pilot projects funded by the National Cancer Institute to explore new approaches to computing on large cancer datasets in a cloud environment. With a focus on Data as a Service, the ISB-CGC offers multiple avenues for accessing and analyzing The Cancer Genome Atlas, TARGET, and other important references such as GENCODE and COSMIC using the Google Cloud Platform. The open approach allows researchers to choose approaches best suited to the task at hand: from analyzing terabytes of data using complex workflows to developing new analysis methods in common languages such as Python, R, and SQL; to using an interactive web application to create synthetic patient cohorts and to explore the wealth of available genomic data. Links to resources and documentation can be found at www.isb-cgc.org Cancer Res; 77(21); e7-10. ©2017 AACR.}, } @article {pmid29092927, year = {2017}, author = {Lau, JW and Lehnert, E and Sethi, A and Malhotra, R and Kaushik, G and Onder, Z and Groves-Kirkby, N and Mihajlovic, A and DiGiovanna, J and Srdic, M and Bajcic, D and Radenkovic, J and Mladenovic, V and Krstanovic, D and Arsenijevic, V and Klisic, D and Mitrovic, M and Bogicevic, I and Kural, D and Davis-Dusenbery, B and , }, title = {The Cancer Genomics Cloud: Collaborative, Reproducible, and Democratized-A New Paradigm in Large-Scale Computational Research.}, journal = {Cancer research}, volume = {77}, number = {21}, pages = {e3-e6}, pmid = {29092927}, issn = {1538-7445}, support = {HHSN261201400008C/CA/NCI NIH HHS/United States ; }, mesh = {*Computational Biology ; Genome, Human ; *Genomics ; Humans ; Internet ; Neoplasms/*genetics ; Research ; Software ; }, abstract = {The Seven Bridges Cancer Genomics Cloud (CGC; www.cancergenomicscloud.org) enables researchers to rapidly access and collaborate on massive public cancer genomic datasets, including The Cancer Genome Atlas. It provides secure on-demand access to data, analysis tools, and computing resources. Researchers from diverse backgrounds can easily visualize, query, and explore cancer genomic datasets visually or programmatically. Data of interest can be immediately analyzed in the cloud using more than 200 preinstalled, curated bioinformatics tools and workflows. Researchers can also extend the functionality of the platform by adding their own data and tools via an intuitive software development kit. By colocalizing these resources in the cloud, the CGC enables scalable, reproducible analyses. Researchers worldwide can use the CGC to investigate key questions in cancer genomics. Cancer Res; 77(21); e3-6. ©2017 AACR.}, } @article {pmid29086154, year = {2017}, author = {Hanwell, MD and de Jong, WA and Harris, CJ}, title = {Open chemistry: RESTful web APIs, JSON, NWChem and the modern web application.}, journal = {Journal of cheminformatics}, volume = {9}, number = {1}, pages = {55}, pmid = {29086154}, issn = {1758-2946}, abstract = {An end-to-end platform for chemical science research has been developed that integrates data from computational and experimental approaches through a modern web-based interface. The platform offers an interactive visualization and analytics environment that functions well on mobile, laptop and desktop devices. It offers pragmatic solutions to ensure that large and complex data sets are more accessible. Existing desktop applications/frameworks were extended to integrate with high-performance computing resources, and offer command-line tools to automate interaction-connecting distributed teams to this software platform on their own terms. The platform was developed openly, and all source code hosted on the GitHub platform with automated deployment possible using Ansible coupled with standard Ubuntu-based machine images deployed to cloud machines. The platform is designed to enable teams to reap the benefits of the connected web-going beyond what conventional search and analytics platforms offer in this area. It also has the goal of offering federated instances, that can be customized to the sites/research performed. Data gets stored using JSON, extending upon previous approaches using XML, building structures that support computational chemistry calculations. These structures were developed to make it easy to process data across different languages, and send data to a JavaScript-based web client.}, } @article {pmid29060385, year = {2017}, author = {Yizhou Jiang, and Yajie Qin, and IkHwan Kim, and Yuanyuan Wang, }, title = {Towards an IoT-based upper limb rehabilitation assessment system.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2017}, number = {}, pages = {2414-2417}, doi = {10.1109/EMBC.2017.8037343}, pmid = {29060385}, issn = {2694-0604}, mesh = {Humans ; Software ; Stroke ; Stroke Rehabilitation ; Survivors ; *Upper Extremity ; }, abstract = {Rehabilitation of stroke survivors has been increasing in importance in recent years with increase in the occurrence of stroke. However, current clinical classification assessment is time-consuming while the result is not accurate and varies across physicians. This paper introduces an IoT-based upper limb rehabilitation assessment system for stroke survivors based on wireless sensing sub-system, data cloud, computing cloud and software based on Android platform. The system can automatically perform objective assessment. It is designed for home rehabilitation as well as for the concept of graded rehabilitation therapy.}, } @article {pmid29060372, year = {2017}, author = {Kumari, P and Lopez-Benitez, M and Gyu Myoung Lee, and Tae-Seong Kim, and Minhas, AS}, title = {Wearable Internet of Things - from human activity tracking to clinical integration.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2017}, number = {}, pages = {2361-2364}, doi = {10.1109/EMBC.2017.8037330}, pmid = {29060372}, issn = {2694-0604}, mesh = {Cloud Computing ; Human Activities ; Humans ; Internet ; Smartphone ; *Wearable Electronic Devices ; }, abstract = {Wearable devices for human activity tracking have been emerging rapidly. Most of them are capable of sending health statistics to smartphones, smartwatches or smart bands. However, they only provide the data for individual analysis and their data is not integrated into clinical practice. Leveraging on the Internet of Things (IoT), edge and cloud computing technologies, we propose an architecture which is capable of providing cloud based clinical services using human activity data. Such services could supplement the shortage of staff in primary healthcare centers thereby reducing the burden on healthcare service providers. The enormous amount of data created from such services could also be utilized for planning future therapies by studying recovery cycles of existing patients. We provide a prototype based on our architecture and discuss its salient features. We also provide use cases of our system in personalized and home based healthcare services. We propose an International Telecommunication Union based standardization (ITU-T) for our design and discuss future directions in wearable IoT.}, } @article {pmid29060091, year = {2017}, author = {Kanakatte, A and Subramanya, R and Delampady, A and Nayak, R and Purushothaman, B and Gubbi, J}, title = {Cloud solution for histopathological image analysis using region of interest based compression.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2017}, number = {}, pages = {1202-1205}, doi = {10.1109/EMBC.2017.8037046}, pmid = {29060091}, issn = {2694-0604}, mesh = {Algorithms ; *Data Compression ; Microscopy ; }, abstract = {Recent technological gains have led to the adoption of innovative cloud based solutions in medical imaging field. Once the medical image is acquired, it can be viewed, modified, annotated and shared on many devices. This advancement is mainly due to the introduction of Cloud computing in medical domain. Tissue pathology images are complex and are normally collected at different focal lengths using a microscope. The single whole slide image contains many multi resolution images stored in a pyramidal structure with the highest resolution image at the base and the smallest thumbnail image at the top of the pyramid. Highest resolution image will be used for tissue pathology diagnosis and analysis. Transferring and storing such huge images is a big challenge. Compression is a very useful and effective technique to reduce the size of these images. As pathology images are used for diagnosis, no information can be lost during compression (lossless compression). A novel method of extracting the tissue region and applying lossless compression on this region and lossy compression on the empty regions has been proposed in this paper. The resulting compression ratio along with lossless compression on tissue region is in acceptable range allowing efficient storage and transmission to and from the Cloud.}, } @article {pmid29058212, year = {2018}, author = {Pardoe, HR and Kuzniecky, R}, title = {NAPR: a Cloud-Based Framework for Neuroanatomical Age Prediction.}, journal = {Neuroinformatics}, volume = {16}, number = {1}, pages = {43-49}, pmid = {29058212}, issn = {1559-0089}, mesh = {Adolescent ; Adult ; Aged ; Aged, 80 and over ; *Aging/pathology/physiology ; Cerebral Cortex/cytology/*diagnostic imaging/physiology ; Child ; *Cloud Computing/trends ; *Databases, Factual/trends ; Forecasting ; Humans ; Magnetic Resonance Imaging/trends ; Middle Aged ; Young Adult ; }, abstract = {The availability of cloud computing services has enabled the widespread adoption of the "software as a service" (SaaS) approach for software distribution, which utilizes network-based access to applications running on centralized servers. In this paper we apply the SaaS approach to neuroimaging-based age prediction. Our system, named "NAPR" (Neuroanatomical Age Prediction using R), provides access to predictive modeling software running on a persistent cloud-based Amazon Web Services (AWS) compute instance. The NAPR framework allows external users to estimate the age of individual subjects using cortical thickness maps derived from their own locally processed T1-weighted whole brain MRI scans. As a demonstration of the NAPR approach, we have developed two age prediction models that were trained using healthy control data from the ABIDE, CoRR, DLBS and NKI Rockland neuroimaging datasets (total N = 2367, age range 6-89 years). The provided age prediction models were trained using (i) relevance vector machines and (ii) Gaussian processes machine learning methods applied to cortical thickness surfaces obtained using Freesurfer v5.3. We believe that this transparent approach to out-of-sample evaluation and comparison of neuroimaging age prediction models will facilitate the development of improved age prediction models and allow for robust evaluation of the clinical utility of these methods.}, } @article {pmid29040639, year = {2018}, author = {Peisert, S and Dart, E and Barnett, W and Balas, E and Cuff, J and Grossman, RL and Berman, A and Shankar, A and Tierney, B}, title = {The medical science DMZ: a network design pattern for data-intensive medical science.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {25}, number = {3}, pages = {267-274}, pmid = {29040639}, issn = {1527-974X}, abstract = {OBJECTIVE: We describe a detailed solution for maintaining high-capacity, data-intensive network flows (eg, 10, 40, 100 Gbps+) in a scientific, medical context while still adhering to security and privacy laws and regulations.

MATERIALS AND METHODS: High-end networking, packet-filter firewalls, network intrusion-detection systems.

RESULTS: We describe a "Medical Science DMZ" concept as an option for secure, high-volume transport of large, sensitive datasets between research institutions over national research networks, and give 3 detailed descriptions of implemented Medical Science DMZs.

DISCUSSION: The exponentially increasing amounts of "omics" data, high-quality imaging, and other rapidly growing clinical datasets have resulted in the rise of biomedical research "Big Data." The storage, analysis, and network resources required to process these data and integrate them into patient diagnoses and treatments have grown to scales that strain the capabilities of academic health centers. Some data are not generated locally and cannot be sustained locally, and shared data repositories such as those provided by the National Library of Medicine, the National Cancer Institute, and international partners such as the European Bioinformatics Institute are rapidly growing. The ability to store and compute using these data must therefore be addressed by a combination of local, national, and industry resources that exchange large datasets. Maintaining data-intensive flows that comply with the Health Insurance Portability and Accountability Act (HIPAA) and other regulations presents a new challenge for biomedical research. We describe a strategy that marries performance and security by borrowing from and redefining the concept of a Science DMZ, a framework that is used in physical sciences and engineering research to manage high-capacity data flows.

CONCLUSION: By implementing a Medical Science DMZ architecture, biomedical researchers can leverage the scale provided by high-performance computer and cloud storage facilities and national high-speed research networks while preserving privacy and meeting regulatory requirements.}, } @article {pmid29039371, year = {2017}, author = {Crisan-Vida, M and Lupse, OS and Stoicu-Tivadar, L and Salvari, D and Catanet, R and Bernad, E}, title = {Regional Monitoring of Cervical Cancer.}, journal = {Studies in health technology and informatics}, volume = {244}, number = {}, pages = {28-32}, pmid = {29039371}, issn = {1879-8365}, mesh = {*Cloud Computing ; Delivery of Health Care ; Female ; Humans ; *Information Systems ; Romania ; Uterine Cervical Neoplasms/*diagnosis ; }, abstract = {Cervical cancer is one of the most important causes of death in women in fertile age in Romania. In order to discover high-risk situations in the first stages of the disease it is important to enhance prevention actions, and ICT, respectively cloud computing and Big Data currently support such activities. The national screening program uses an information system that based on data from different medical units gives feedback related to the women healthcare status and provides statistics and reports. In order to ensure the continuity of care it is updated with HL7 CDA support and cloud computing. The current paper presents the solution and several results.}, } @article {pmid29029689, year = {2017}, author = {Wu, CH and Chiu, RK and Yeh, HM and Wang, DW}, title = {Implementation of a cloud-based electronic medical record exchange system in compliance with the integrating healthcare enterprise's cross-enterprise document sharing integration profile.}, journal = {International journal of medical informatics}, volume = {107}, number = {}, pages = {30-39}, doi = {10.1016/j.ijmedinf.2017.09.001}, pmid = {29029689}, issn = {1872-8243}, mesh = {*Cloud Computing ; Computer Systems ; Delivery of Health Care/*standards ; Electronic Health Records/*standards ; Female ; *Health Plan Implementation ; Hospital Information Systems/*standards ; Hospitals/*standards ; Humans ; Male ; *Systems Integration ; Taiwan ; }, abstract = {In 2011, the Ministry of Health and Welfare of Taiwan established the National Electronic Medical Record Exchange Center (EEC) to permit the sharing of medical resources among hospitals. This system can presently exchange electronic medical records (EMRs) among hospitals, in the form of medical imaging reports, laboratory test reports, discharge summaries, outpatient records, and outpatient medication records. Hospitals can send or retrieve EMRs over the virtual private network by connecting to the EEC through a gateway. International standards should be adopted in the EEC to allow users with those standards to take advantage of this exchange service. In this study, a cloud-based EMR-exchange prototyping system was implemented on the basis of the Integrating the Healthcare Enterprise's Cross-Enterprise Document Sharing integration profile and the existing EMR exchange system. RESTful services were used to implement the proposed prototyping system on the Microsoft Azure cloud-computing platform. Four scenarios were created in Microsoft Azure to determine the feasibility and effectiveness of the proposed system. The experimental results demonstrated that the proposed system successfully completed EMR exchange under the four scenarios created in Microsoft Azure. Additional experiments were conducted to compare the efficiency of the EMR-exchanging mechanisms of the proposed system with those of the existing EEC system. The experimental results suggest that the proposed RESTful service approach is superior to the Simple Object Access Protocol method currently implemented in the EEC system, according to the irrespective response times under the four experimental scenarios.}, } @article {pmid29029688, year = {2017}, author = {Chung, CJ and Kuo, YC and Hsieh, YY and Li, TC and Lin, CC and Liang, WM and Liao, LN and Li, CI and Lin, HC}, title = {Subject-enabled analytics model on measurement statistics in health risk expert system for public health informatics.}, journal = {International journal of medical informatics}, volume = {107}, number = {}, pages = {18-29}, doi = {10.1016/j.ijmedinf.2017.08.011}, pmid = {29029688}, issn = {1872-8243}, mesh = {Aged ; *Cloud Computing ; *Disease ; *Expert Systems ; Female ; Health Status ; Humans ; Internet/*statistics & numerical data ; Male ; *Models, Theoretical ; *Public Health Informatics ; *Software ; }, abstract = {PURPOSE: This study applied open source technology to establish a subject-enabled analytics model that can enhance measurement statistics of case studies with the public health data in cloud computing.

METHODS: The infrastructure of the proposed model comprises three domains: 1) the health measurement data warehouse (HMDW) for the case study repository, 2) the self-developed modules of online health risk information statistics (HRIStat) for cloud computing, and 3) the prototype of a Web-based process automation system in statistics (PASIS) for the health risk assessment of case studies with subject-enabled evaluation. The system design employed freeware including Java applications, MySQL, and R packages to drive a health risk expert system (HRES). In the design, the HRIStat modules enforce the typical analytics methods for biomedical statistics, and the PASIS interfaces enable process automation of the HRES for cloud computing. The Web-based model supports both modes, step-by-step analysis and auto-computing process, respectively for preliminary evaluation and real time computation.

RESULTS: The proposed model was evaluated by computing prior researches in relation to the epidemiological measurement of diseases that were caused by either heavy metal exposures in the environment or clinical complications in hospital. The simulation validity was approved by the commercial statistics software. The model was installed in a stand-alone computer and in a cloud-server workstation to verify computing performance for a data amount of more than 230K sets. Both setups reached efficiency of about 10[5] sets per second.

CONCLUSIONS: The Web-based PASIS interface can be used for cloud computing, and the HRIStat module can be flexibly expanded with advanced subjects for measurement statistics. The analytics procedure of the HRES prototype is capable of providing assessment criteria prior to estimating the potential risk to public health.}, } @article {pmid29028892, year = {2018}, author = {Weber, N and Liou, D and Dommer, J and MacMenamin, P and Quiñones, M and Misner, I and Oler, AJ and Wan, J and Kim, L and Coakley McCarthy, M and Ezeji, S and Noble, K and Hurt, DE}, title = {Nephele: a cloud platform for simplified, standardized and reproducible microbiome data analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {34}, number = {8}, pages = {1411-1413}, pmid = {29028892}, issn = {1367-4811}, mesh = {*Cloud Computing ; Computational Biology/*methods ; Humans ; Metagenomics/methods ; Microbiota/*genetics ; Sequence Analysis, DNA/methods ; Sequence Analysis, RNA ; *Software ; }, abstract = {MOTIVATION: Widespread interest in the study of the microbiome has resulted in data proliferation and the development of powerful computational tools. However, many scientific researchers lack the time, training, or infrastructure to work with large datasets or to install and use command line tools.

RESULTS: The National Institute of Allergy and Infectious Diseases (NIAID) has created Nephele, a cloud-based microbiome data analysis platform with standardized pipelines and a simple web interface for transforming raw data into biological insights. Nephele integrates common microbiome analysis tools as well as valuable reference datasets like the healthy human subjects cohort of the Human Microbiome Project (HMP). Nephele is built on the Amazon Web Services cloud, which provides centralized and automated storage and compute capacity, thereby reducing the burden on researchers and their institutions.

https://nephele.niaid.nih.gov and https://github.com/niaid/Nephele.

CONTACT: darrell.hurt@nih.gov.}, } @article {pmid29020744, year = {2017}, author = {Hung, LH and Shi, K and Wu, M and Young, WC and Raftery, AE and Yeung, KY}, title = {fastBMA: scalable network inference and transitive reduction.}, journal = {GigaScience}, volume = {6}, number = {10}, pages = {1-10}, pmid = {29020744}, issn = {2047-217X}, support = {R01 HD054511/HD/NICHD NIH HHS/United States ; R01 HD070936/HD/NICHD NIH HHS/United States ; U54 HL127624/HL/NHLBI NIH HHS/United States ; }, mesh = {*Algorithms ; Bayes Theorem ; Gene Expression ; *Gene Regulatory Networks ; *Genome, Fungal ; *Genome, Human ; Humans ; Models, Statistical ; Saccharomyces cerevisiae ; }, abstract = {Inferring genetic networks from genome-wide expression data is extremely demanding computationally. We have developed fastBMA, a distributed, parallel, and scalable implementation of Bayesian model averaging (BMA) for this purpose. fastBMA also includes a computationally efficient module for eliminating redundant indirect edges in the network by mapping the transitive reduction to an easily solved shortest-path problem. We evaluated the performance of fastBMA on synthetic data and experimental genome-wide time series yeast and human datasets. When using a single CPU core, fastBMA is up to 100 times faster than the next fastest method, LASSO, with increased accuracy. It is a memory-efficient, parallel, and distributed application that scales to human genome-wide expression data. A 10 000-gene regulation network can be obtained in a matter of hours using a 32-core cloud cluster (2 nodes of 16 cores). fastBMA is a significant improvement over its predecessor ScanBMA. It is more accurate and orders of magnitude faster than other fast network inference methods such as the 1 based on LASSO. The improved scalability allows it to calculate networks from genome scale data in a reasonable time frame. The transitive reduction method can improve accuracy in denser networks. fastBMA is available as code (M.I.T. license) from GitHub (https://github.com/lhhunghimself/fastBMA), as part of the updated networkBMA Bioconductor package (https://www.bioconductor.org/packages/release/bioc/html/networkBMA.html) and as ready-to-deploy Docker images (https://hub.docker.com/r/biodepot/fastbma/).}, } @article {pmid28994719, year = {2017}, author = {Amaxilatis, D and Akrivopoulos, O and Mylonas, G and Chatzigiannakis, I}, title = {An IoT-Based Solution for Monitoring a Fleet of Educational Buildings Focusing on Energy Efficiency.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {10}, pages = {}, pmid = {28994719}, issn = {1424-8220}, abstract = {Raising awareness among young people and changing their behaviour and habits concerning energy usage is key to achieving sustained energy saving. Additionally, young people are very sensitive to environmental protection so raising awareness among children is much easier than with any other group of citizens. This work examines ways to create an innovative Information & Communication Technologies (ICT) ecosystem (including web-based, mobile, social and sensing elements) tailored specifically for school environments, taking into account both the users (faculty, staff, students, parents) and school buildings, thus motivating and supporting young citizens' behavioural change to achieve greater energy efficiency. A mixture of open-source IoT hardware and proprietary platforms on the infrastructure level, are currently being utilized for monitoring a fleet of 18 educational buildings across 3 countries, comprising over 700 IoT monitoring points. Hereon presented is the system's high-level architecture, as well as several aspects of its implementation, related to the application domain of educational building monitoring and energy efficiency. The system is developed based on open-source technologies and services in order to make it capable of providing open IT-infrastructure and support from different commercial hardware/sensor vendors as well as open-source solutions. The system presented can be used to develop and offer new app-based solutions that can be used either for educational purposes or for managing the energy efficiency of the building. The system is replicable and adaptable to settings that may be different than the scenarios envisioned here (e.g., targeting different climate zones), different IT infrastructures and can be easily extended to accommodate integration with other systems. The overall performance of the system is evaluated in real-world environment in terms of scalability, responsiveness and simplicity.}, } @article {pmid28983870, year = {2017}, author = {Wang, S and Chi, H and Yuan, H and Geng, J}, title = {Extraction and representation of common feature from uncertain facial expressions with cloud model.}, journal = {Environmental science and pollution research international}, volume = {24}, number = {36}, pages = {27778-27787}, pmid = {28983870}, issn = {1614-7499}, support = {61472039//National Natural Science Fund of China/ ; 2016YFC0803000 and 2016YFB0502603//National Key Research and Development Program/ ; 2016CX11006//Frontier and interdisciplinary innovation program of Beijing Institute of Technology/ ; }, mesh = {*Algorithms ; *Communication ; *Emotions ; *Facial Expression ; Female ; Humans ; *Uncertainty ; }, abstract = {Human facial expressions are key ingredient to convert an individual's innate emotion in communication. However, the variation of facial expressions affects the reliable identification of human emotions. In this paper, we present a cloud model to extract facial features for representing human emotion. First, the uncertainties in facial expression are analyzed in the context of cloud model. The feature extraction and representation algorithm is established under cloud generators. With forward cloud generator, facial expression images can be re-generated as many as we like for visually representing the extracted three features, and each feature shows different roles. The effectiveness of the computing model is tested on Japanese Female Facial Expression database. Three common features are extracted from seven facial expression images. Finally, the paper is concluded and remarked.}, } @article {pmid28983800, year = {2018}, author = {Higashihara, E and Horie, S and Kinoshita, M and Harris, PC and Okegawa, T and Tanbo, M and Hara, H and Yamaguchi, T and Shigemori, K and Kawano, H and Miyazaki, I and Kaname, S and Nutahara, K}, title = {A potentially crucial role of the PKD1 C-terminal tail in renal prognosis.}, journal = {Clinical and experimental nephrology}, volume = {22}, number = {2}, pages = {395-404}, pmid = {28983800}, issn = {1437-7799}, support = {P30 DK090728/DK/NIDDK NIH HHS/United States ; }, mesh = {Disease Progression ; Female ; Genetic Association Studies ; Genetic Predisposition to Disease ; Humans ; Kaplan-Meier Estimate ; Kidney/physiopathology ; Male ; *Mutation ; Mutation Rate ; Phenotype ; Polycystic Kidney, Autosomal Dominant/*genetics/mortality/physiopathology/therapy ; Prognosis ; Proportional Hazards Models ; Protein Domains ; Renal Insufficiency/*genetics/mortality/physiopathology/therapy ; Renal Replacement Therapy ; Risk Factors ; TRPP Cation Channels/chemistry/*genetics ; Tokyo ; }, abstract = {BACKGROUND: Autosomal dominant polycystic disease (ADPKD) often results in renal failure. Recently, allelic influences of PKD1 mutation types on renal survival were extensively investigated. Here, we analyzed integrated influences of PKD1 mutation types and positions on renal survival.

METHODS: We included 338 (82 pedigrees) and 72 (12 pedigrees) patients with PKD1 and PKD2 mutations, respectively, identified through comprehensive gene analysis of 101 probands with ADPKD. Genetic testing was performed using next-generation sequencing, long-range PCR, and multiplex ligation-dependent probe amplification. Pathogenic mutations were identified by a software package-integrated seven databases and provided access to five cloud-based computing systems.

RESULTS: Mean renal survivals of carriers with PKD1 non-truncating-type mutations at positions upstream of G-protein-coupled receptor proteolytic site (GPS-upstream domain), transmembrane domain, or cytoplasmic C-terminal tail (CTT) domain were 70.2, 67.0, and 50.1 years, respectively (P < 0.0001); renal survival was shorter for mutation positions closer to CTT domain, suggesting its crucial role in renal prognosis. Furthermore, in truncating-type mutations, strong inactivation is anticipated on nucleotides downstream from the mutation site, implying CTT domain inactivation irrespective of mutation site. Shorter mean renal survival was found for PKD1 truncating-type than non-truncating-type mutation carriers (P = 0.0348); mean renal survival was not different between PKD1 3'- and 5'-region truncating-type mutation carriers (P = 0.4375), but was shorter in PKD1 3'-region than in 5'-region non-truncating-type mutation carriers (P = 0.0014). Variable strength of CTT domain inactivation might account for these results.

CONCLUSIONS: Aforementioned findings indicate that CTT domain's crucial role in renal prognosis needs further investigation by larger studies (ClinicalTrials.gov; NCT02322385).}, } @article {pmid28971278, year = {2017}, author = {Ray, PP and Dash, D and De, D}, title = {A Systematic Review of Wearable Systems for Cancer Detection: Current State and Challenges.}, journal = {Journal of medical systems}, volume = {41}, number = {11}, pages = {180}, pmid = {28971278}, issn = {1573-689X}, mesh = {Artificial Intelligence ; Humans ; *Neoplasms ; }, abstract = {Rapid growth of sensor and computing platforms have introduced the wearable systems. In recent years, wearable systems have led to new applications across all medical fields. The aim of this review is to present current state-of-the-art approach in the field of wearable system based cancer detection and identify key challenges that resist it from clinical adoption. A total of 472 records were screened and 11 were finally included in this study. Two types of records were studied in this context that includes 45% research articles and 55% manufactured products. The review was performed per PRISMA guidelines where considerations was given to records that were published or reported between 2009 and 2017. The identified records included 4 cancer detecting wearable systems such as breast cancer (36.3%), skin cancer (36.3%), prostate cancer (18.1%), and multi-type cancer (9%). Most works involved sensor based smart systems comprising of microcontroller, Bluetooth module, and smart phone. Few demonstrated Ultra-Wide Band (i.e. UWB) antenna based wearable systems. Skin cancer detecting wearable systems were most comprehensible ones. The current works are gradually progressing with seamless integration of sensory units along with smart networking. However, they lack in cloud computing and long-range communication paradigms. Artificial intelligence and machine learning are key ports that need to be attached with current wearable systems. Further, clinical inertia, lack of awareness, and high cost are altogether pulling back the actual growth of such system. It is well comprehended that upon sincere orientation of all identified challenges, wearable systems would emerge as vital alternative to futuristic cancer detection.}, } @article {pmid28968834, year = {2017}, author = {Noguera-Julian, M and Edgil, D and Harrigan, PR and Sandstrom, P and Godfrey, C and Paredes, R}, title = {Next-Generation Human Immunodeficiency Virus Sequencing for Patient Management and Drug Resistance Surveillance.}, journal = {The Journal of infectious diseases}, volume = {216}, number = {suppl_9}, pages = {S829-S833}, pmid = {28968834}, issn = {1537-6613}, mesh = {Anti-HIV Agents/*therapeutic use ; Cloud Computing ; Drug Resistance, Viral/genetics ; HIV/drug effects/*genetics ; HIV Infections/drug therapy/*virology ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; }, abstract = {High-quality, simplified, and low-cost human immunodeficiency virus (HIV) drug resistance tests that are able to provide timely actionable HIV resistance data at individual, population, and programmatic levels are needed to confront the emerging drug-resistant HIV epidemic. Next-generation sequencing technologies embedded in automated cloud-computing analysis environments are ideally suited for such endeavor. Whereas NGS can reduce costs over Sanger sequencing, automated analysis pipelines make NGS accessible to molecular laboratories regardless of the available bioinformatic skills. They can also produce highly structured, high-quality data that could be examined by healthcare officials and program managers on a real-time basis to allow timely public health action. Here we discuss the opportunities and challenges of such an approach.}, } @article {pmid32287550, year = {2017}, author = {Sood, SK and Mahajan, I}, title = {Wearable IoT sensor based healthcare system for identifying and controlling chikungunya virus.}, journal = {Computers in industry}, volume = {91}, number = {}, pages = {33-44}, pmid = {32287550}, issn = {1872-6194}, abstract = {Chikungunya is a vector borne disease that spreads quickly in geographically affected areas. Its outbreak results in acute illness that may lead to chronic phase. Chikungunya virus (CHV) diagnosis solutions are not easily accessible and affordable in developing countries. Also old approaches are very slow in identifying and controlling the spread of CHV outbreak. The sudden development and advancement of wearable internet of things (IoT) sensors, fog computing, mobile technology, cloud computing and better internet coverage have enhanced the quality of remote healthcare services. IoT assisted fog health monitoring system can be used to identify possibly infected users from CHV in an early phase of their illness so that the outbreak of CHV can be controlled. Fog computing provides many benefits such as low latency, minimum response time, high mobility, enhanced service quality, location awareness and notification service itself at the edge of the network. In this paper, IoT and fog based healthcare system is proposed to identify and control the outbreak of CHV. Fuzzy-C means (FCM) is used to diagnose the possibly infected users and immediately generate diagnostic and emergency alerts to users from fog layer. Furthermore on cloud server, social network analysis (SNA) is used to represent the state of CHV outbreak. Outbreak role index is calculated from SNA graph which represents the probability of any user to receive or spread the infection. It also generates warning alerts to government and healthcare agencies to control the outbreak of CHV in risk prone or infected regions. The experimental results highlight the advantages of using both fog computing and cloud computing services together for achieving network bandwidth efficiency, high quality of service and minimum response time in generation of real time notification as compared to a cloud only model.}, } @article {pmid28961771, year = {2017}, author = {Pan, C and McInnes, G and Deflaux, N and Snyder, M and Bingham, J and Datta, S and Tsao, PS}, title = {Cloud-based interactive analytics for terabytes of genomic variants data.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {23}, pages = {3709-3715}, pmid = {28961771}, issn = {1367-4811}, support = {R01 HL101388/HL/NHLBI NIH HHS/United States ; P50 HG007735/HG/NHGRI NIH HHS/United States ; P50 HL083800/HL/NHLBI NIH HHS/United States ; P30 DK116074/DK/NIDDK NIH HHS/United States ; T32 LM012409/LM/NLM NIH HHS/United States ; }, mesh = {Data Compression ; Databases, Nucleic Acid ; Gene Frequency ; *Genetic Variation ; Genome, Human ; Genomics/*methods ; Genotype ; Humans ; Software ; Web Browser ; }, abstract = {MOTIVATION: Large scale genomic sequencing is now widely used to decipher questions in diverse realms such as biological function, human diseases, evolution, ecosystems, and agriculture. With the quantity and diversity these data harbor, a robust and scalable data handling and analysis solution is desired.

RESULTS: We present interactive analytics using a cloud-based columnar database built on Dremel to perform information compression, comprehensive quality controls, and biological information retrieval in large volumes of genomic data. We demonstrate such Big Data computing paradigms can provide orders of magnitude faster turnaround for common genomic analyses, transforming long-running batch jobs submitted via a Linux shell into questions that can be asked from a web browser in seconds. Using this method, we assessed a study population of 475 deeply sequenced human genomes for genomic call rate, genotype and allele frequency distribution, variant density across the genome, and pharmacogenomic information.

Our analysis framework is implemented in Google Cloud Platform and BigQuery. Codes are available at https://github.com/StanfordBioinformatics/mvp_aaa_codelabs.

CONTACT: cuiping@stanford.edu or ptsao@stanford.edu.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid28953943, year = {2017}, author = {Midekisa, A and Holl, F and Savory, DJ and Andrade-Pacheco, R and Gething, PW and Bennett, A and Sturrock, HJW}, title = {Mapping land cover change over continental Africa using Landsat and Google Earth Engine cloud computing.}, journal = {PloS one}, volume = {12}, number = {9}, pages = {e0184926}, pmid = {28953943}, issn = {1932-6203}, support = {MR/K00669X/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {Africa ; *Cloud Computing ; *Earth, Planet ; *Geographic Information Systems ; Models, Theoretical ; Spacecraft ; }, abstract = {Quantifying and monitoring the spatial and temporal dynamics of the global land cover is critical for better understanding many of the Earth's land surface processes. However, the lack of regularly updated, continental-scale, and high spatial resolution (30 m) land cover data limit our ability to better understand the spatial extent and the temporal dynamics of land surface changes. Despite the free availability of high spatial resolution Landsat satellite data, continental-scale land cover mapping using high resolution Landsat satellite data was not feasible until now due to the need for high-performance computing to store, process, and analyze this large volume of high resolution satellite data. In this study, we present an approach to quantify continental land cover and impervious surface changes over a long period of time (15 years) using high resolution Landsat satellite observations and Google Earth Engine cloud computing platform. The approach applied here to overcome the computational challenges of handling big earth observation data by using cloud computing can help scientists and practitioners who lack high-performance computational resources.}, } @article {pmid28951384, year = {2017}, author = {Chen, YH and Hung, CS and Huang, CC and Hung, YC and Hwang, JJ and Ho, YL}, title = {Atrial Fibrillation Screening in Nonmetropolitan Areas Using a Telehealth Surveillance System With an Embedded Cloud-Computing Algorithm: Prospective Pilot Study.}, journal = {JMIR mHealth and uHealth}, volume = {5}, number = {9}, pages = {e135}, pmid = {28951384}, issn = {2291-5222}, abstract = {BACKGROUND: Atrial fibrillation (AF) is a common form of arrhythmia that is associated with increased risk of stroke and mortality. Detecting AF before the first complication occurs is a recognized priority. No previous studies have examined the feasibility of undertaking AF screening using a telehealth surveillance system with an embedded cloud-computing algorithm; we address this issue in this study.

OBJECTIVE: The objective of this study was to evaluate the feasibility of AF screening in nonmetropolitan areas using a telehealth surveillance system with an embedded cloud-computing algorithm.

METHODS: We conducted a prospective AF screening study in a nonmetropolitan area using a single-lead electrocardiogram (ECG) recorder. All ECG measurements were reviewed on the telehealth surveillance system and interpreted by the cloud-computing algorithm and a cardiologist. The process of AF screening was evaluated with a satisfaction questionnaire.

RESULTS: Between March 11, 2016 and August 31, 2016, 967 ECGs were recorded from 922 residents in nonmetropolitan areas. A total of 22 (2.4%, 22/922) residents with AF were identified by the physician's ECG interpretation, and only 0.2% (2/967) of ECGs contained significant artifacts. The novel cloud-computing algorithm for AF detection had a sensitivity of 95.5% (95% CI 77.2%-99.9%) and specificity of 97.7% (95% CI 96.5%-98.5%). The overall satisfaction score for the process of AF screening was 92.1%.

CONCLUSIONS: AF screening in nonmetropolitan areas using a telehealth surveillance system with an embedded cloud-computing algorithm is feasible.}, } @article {pmid28949702, year = {2017}, author = {Huang, HL and Zhao, Q and Ma, X and Liu, C and Su, ZE and Wang, XL and Li, L and Liu, NL and Sanders, BC and Lu, CY and Pan, JW}, title = {Experimental Blind Quantum Computing for a Classical Client.}, journal = {Physical review letters}, volume = {119}, number = {5}, pages = {050503}, doi = {10.1103/PhysRevLett.119.050503}, pmid = {28949702}, issn = {1079-7114}, abstract = {To date, blind quantum computing demonstrations require clients to have weak quantum devices. Here we implement a proof-of-principle experiment for completely classical clients. Via classically interacting with two quantum servers that share entanglement, the client accomplishes the task of having the number 15 factorized by servers who are denied information about the computation itself. This concealment is accompanied by a verification protocol that tests servers' honesty and correctness. Our demonstration shows the feasibility of completely classical clients and thus is a key milestone towards secure cloud quantum computing.}, } @article {pmid28934126, year = {2017}, author = {Huang, D and Xu, C and Zhao, D and Song, W and He, Q}, title = {A Multi-Objective Partition Method for Marine Sensor Networks Based on Degree of Event Correlation.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {10}, pages = {}, pmid = {28934126}, issn = {1424-8220}, abstract = {Existing marine sensor networks acquire data from sea areas that are geographically divided, and store the data independently in their affiliated sea area data centers. In the case of marine events across multiple sea areas, the current network structure needs to retrieve data from multiple data centers, and thus severely affects real-time decision making. In this study, in order to provide a fast data retrieval service for a marine sensor network, we use all the marine sensors as the vertices, establish the edge based on marine events, and abstract the marine sensor network as a graph. Then, we construct a multi-objective balanced partition method to partition the abstract graph into multiple regions and store them in the cloud computing platform. This method effectively increases the correlation of the sensors and decreases the retrieval cost. On this basis, an incremental optimization strategy is designed to dynamically optimize existing partitions when new sensors are added into the network. Experimental results show that the proposed method can achieve the optimal layout for distributed storage in the process of disaster data retrieval in the China Sea area, and effectively optimize the result of partitions when new buoys are deployed, which eventually will provide efficient data access service for marine events.}, } @article {pmid28932174, year = {2017}, author = {Chow, JCL}, title = {Internet-based computer technology on radiotherapy.}, journal = {Reports of practical oncology and radiotherapy : journal of Greatpoland Cancer Center in Poznan and Polish Society of Radiation Oncology}, volume = {22}, number = {6}, pages = {455-462}, pmid = {28932174}, issn = {1507-1367}, abstract = {Recent rapid development of Internet-based computer technologies has made possible many novel applications in radiation dose delivery. However, translational speed of applying these new technologies in radiotherapy could hardly catch up due to the complex commissioning process and quality assurance protocol. Implementing novel Internet-based technology in radiotherapy requires corresponding design of algorithm and infrastructure of the application, set up of related clinical policies, purchase and development of software and hardware, computer programming and debugging, and national to international collaboration. Although such implementation processes are time consuming, some recent computer advancements in the radiation dose delivery are still noticeable. In this review, we will present the background and concept of some recent Internet-based computer technologies such as cloud computing, big data processing and machine learning, followed by their potential applications in radiotherapy, such as treatment planning and dose delivery. We will also discuss the current progress of these applications and their impacts on radiotherapy. We will explore and evaluate the expected benefits and challenges in implementation as well.}, } @article {pmid28868134, year = {2017}, author = {Colombo, AR and J Triche, T and Ramsingh, G}, title = {Arkas: Rapid reproducible RNAseq analysis.}, journal = {F1000Research}, volume = {6}, number = {}, pages = {586}, pmid = {28868134}, issn = {2046-1402}, abstract = {The recently introduced Kallisto pseudoaligner has radically simplified the quantification of transcripts in RNA-sequencing experiments. We offer cloud-scale RNAseq pipelines Arkas-Quantification, and Arkas-Analysis available within Illumina's BaseSpace cloud application platform which expedites Kallisto preparatory routines, reliably calculates differential expression, and performs gene-set enrichment of REACTOME pathways . Due to inherit inefficiencies of scale, Illumina's BaseSpace computing platform offers a massively parallel distributive environment improving data management services and data importing. Arkas-Quantification deploys Kallisto for parallel cloud computations and is conveniently integrated downstream from the BaseSpace Sequence Read Archive (SRA) import/conversion application titled SRA Import. Arkas-Analysis annotates the Kallisto results by extracting structured information directly from source FASTA files with per-contig metadata, calculates the differential expression and gene-set enrichment analysis on both coding genes and transcripts. The Arkas cloud pipeline supports ENSEMBL transcriptomes and can be used downstream from the SRA Import facilitating raw sequencing importing, SRA FASTQ conversion, RNA quantification and analysis steps.}, } @article {pmid28916508, year = {2017}, author = {Yu, H and Gao, F and Jiang, L and Ma, S}, title = {Development of a Whole Slide Imaging System on Smartphones and Evaluation With Frozen Section Samples.}, journal = {JMIR mHealth and uHealth}, volume = {5}, number = {9}, pages = {e132}, pmid = {28916508}, issn = {2291-5222}, abstract = {BACKGROUND: The aim was to develop scalable Whole Slide Imaging (sWSI), a WSI system based on mainstream smartphones coupled with regular optical microscopes. This ultra-low-cost solution should offer diagnostic-ready imaging quality on par with standalone scanners, supporting both oil and dry objective lenses of different magnifications, and reasonably high throughput. These performance metrics should be evaluated by expert pathologists and match those of high-end scanners.

OBJECTIVE: The aim was to develop scalable Whole Slide Imaging (sWSI), a whole slide imaging system based on smartphones coupled with optical microscopes. This ultra-low-cost solution should offer diagnostic-ready imaging quality on par with standalone scanners, supporting both oil and dry object lens of different magnification. All performance metrics should be evaluated by expert pathologists and match those of high-end scanners.

METHODS: In the sWSI design, the digitization process is split asynchronously between light-weight clients on smartphones and powerful cloud servers. The client apps automatically capture FoVs at up to 12-megapixel resolution and process them in real-time to track the operation of users, then give instant feedback of guidance. The servers first restitch each pair of FoVs, then automatically correct the unknown nonlinear distortion introduced by the lens of the smartphone on the fly, based on pair-wise stitching, before finally combining all FoVs into one gigapixel VS for each scan. These VSs can be viewed using Internet browsers anywhere. In the evaluation experiment, 100 frozen section slides from patients randomly selected among in-patients of the participating hospital were scanned by both a high-end Leica scanner and sWSI. All VSs were examined by senior pathologists whose diagnoses were compared against those made using optical microscopy as ground truth to evaluate the image quality.

RESULTS: The sWSI system is developed for both Android and iPhone smartphones and is currently being offered to the public. The image quality is reliable and throughput is approximately 1 FoV per second, yielding a 15-by-15 mm slide under 20X object lens in approximately 30-35 minutes, with little training required for the operator. The expected cost for setup is approximately US $100 and scanning each slide costs between US $1 and $10, making sWSI highly cost-effective for infrequent or low-throughput usage. In the clinical evaluation of sample-wise diagnostic reliability, average accuracy scores achieved by sWSI-scan-based diagnoses were as follows: 0.78 for breast, 0.88 for uterine corpus, 0.68 for thyroid, and 0.50 for lung samples. The respective low-sensitivity rates were 0.05, 0.05, 0.13, and 0.25 while the respective low-specificity rates were 0.18, 0.08, 0.20, and 0.25. The participating pathologists agreed that the overall quality of sWSI was generally on par with that produced by high-end scanners, and did not affect diagnosis in most cases. Pathologists confirmed that sWSI is reliable enough for standard diagnoses of most tissue categories, while it can be used for quick screening of difficult cases.

CONCLUSIONS: As an ultra-low-cost alternative to whole slide scanners, diagnosis-ready VS quality and robustness for commercial usage is achieved in the sWSI solution. Operated on main-stream smartphones installed on normal optical microscopes, sWSI readily offers affordable and reliable WSI to resource-limited or infrequent clinical users.}, } @article {pmid28900621, year = {2017}, author = {Ho, CK and Chen, FC and Chen, YL and Wang, HT and Lee, CH and Chung, WJ and Lin, CJ and Hsueh, SK and Hung, SC and Wu, KH and Liu, CF and Kung, CT and Cheng, CI}, title = {Using a Cloud Computing System to Reduce Door-to-Balloon Time in Acute ST-Elevation Myocardial Infarction Patients Transferred for Percutaneous Coronary Intervention.}, journal = {BioMed research international}, volume = {2017}, number = {}, pages = {2963172}, pmid = {28900621}, issn = {2314-6141}, mesh = {Aged ; Angioplasty, Balloon, Coronary/*methods ; Cloud Computing ; Electrocardiography ; *Emergency Service, Hospital ; Female ; Humans ; Male ; Middle Aged ; Percutaneous Coronary Intervention/*methods ; ST Elevation Myocardial Infarction/physiopathology/*therapy ; Time Factors ; Time-to-Treatment ; Treatment Outcome ; }, abstract = {BACKGROUND: This study evaluated the impact on clinical outcomes using a cloud computing system to reduce percutaneous coronary intervention hospital door-to-balloon (DTB) time for ST segment elevation myocardial infarction (STEMI).

METHODS: A total of 369 patients before and after implementation of the transfer protocol were enrolled. Of these patients, 262 were transferred through protocol while the other 107 patients were transferred through the traditional referral process.

RESULTS: There were no significant differences in DTB time, pain to door of STEMI receiving center arrival time, and pain to balloon time between the two groups. Pain to electrocardiography time in patients with Killip I/II and catheterization laboratory to balloon time in patients with Killip III/IV were significantly reduced in transferred through protocol group compared to in traditional referral process group (both p < 0.05). There were also no remarkable differences in the complication rate and 30-day mortality between two groups. The multivariate analysis revealed that the independent predictors of 30-day mortality were elderly patients, advanced Killip score, and higher level of troponin-I.

CONCLUSIONS: This study showed that patients transferred through our present protocol could reduce pain to electrocardiography and catheterization laboratory to balloon time in Killip I/II and III/IV patients separately. However, this study showed that using a cloud computing system in our present protocol did not reduce DTB time.}, } @article {pmid28884169, year = {2017}, author = {Bao, S and Plassard, AJ and Landman, BA and Gokhale, A}, title = {Cloud Engineering Principles and Technology Enablers for Medical Image Processing-as-a-Service.}, journal = {Proceedings of the IEEE International Conference on Cloud Engineering. IEEE International Conference on Cloud Engineering}, volume = {2017}, number = {}, pages = {127-137}, pmid = {28884169}, support = {T32 LM012412/LM/NLM NIH HHS/United States ; UL1 RR024975/RR/NCRR NIH HHS/United States ; UL1 TR000445/TR/NCATS NIH HHS/United States ; }, abstract = {Traditional in-house, laboratory-based medical imaging studies use hierarchical data structures (e.g., NFS file stores) or databases (e.g., COINS, XNAT) for storage and retrieval. The resulting performance from these approaches is, however, impeded by standard network switches since they can saturate network bandwidth during transfer from storage to processing nodes for even moderate-sized studies. To that end, a cloud-based "medical image processing-as-a-service" offers promise in utilizing the ecosystem of Apache Hadoop, which is a flexible framework providing distributed, scalable, fault tolerant storage and parallel computational modules, and HBase, which is a NoSQL database built atop Hadoop's distributed file system. Despite this promise, HBase's load distribution strategy of region split and merge is detrimental to the hierarchical organization of imaging data (e.g., project, subject, session, scan, slice). This paper makes two contributions to address these concerns by describing key cloud engineering principles and technology enhancements we made to the Apache Hadoop ecosystem for medical imaging applications. First, we propose a row-key design for HBase, which is a necessary step that is driven by the hierarchical organization of imaging data. Second, we propose a novel data allocation policy within HBase to strongly enforce collocation of hierarchically related imaging data. The proposed enhancements accelerate data processing by minimizing network usage and localizing processing to machines where the data already exist. Moreover, our approach is amenable to the traditional scan, subject, and project-level analysis procedures, and is compatible with standard command line/scriptable image processing software. Experimental results for an illustrative sample of imaging data reveals that our new HBase policy results in a three-fold time improvement in conversion of classic DICOM to NiFTI file formats when compared with the default HBase region split policy, and nearly a six-fold improvement over a commonly available network file system (NFS) approach even for relatively small file sets. Moreover, file access latency is lower than network attached storage.}, } @article {pmid28861861, year = {2017}, author = {Liu, Y and Tang, Y and Gao, X}, title = {GATE Monte Carlo simulation of dose distribution using MapReduce in a cloud computing environment.}, journal = {Australasian physical & engineering sciences in medicine}, volume = {40}, number = {4}, pages = {777-783}, doi = {10.1007/s13246-017-0580-0}, pmid = {28861861}, issn = {1879-5447}, support = {81571772//National Natural Science Foundation of China/ ; YZ201342//Research Equipment Development Program of Chinese Academy of Sciences/ ; SYG201501//Science and Technology Program of Suzhou/ ; }, mesh = {*Cloud Computing ; *Computer Simulation ; Dose-Response Relationship, Radiation ; *Monte Carlo Method ; Photons ; Time Factors ; }, abstract = {The GATE Monte Carlo simulation platform has good application prospects of treatment planning and quality assurance. However, accurate dose calculation using GATE is time consuming. The purpose of this study is to implement a novel cloud computing method for accurate GATE Monte Carlo simulation of dose distribution using MapReduce. An Amazon Machine Image installed with Hadoop and GATE is created to set up Hadoop clusters on Amazon Elastic Compute Cloud (EC2). Macros, the input files for GATE, are split into a number of self-contained sub-macros. Through Hadoop Streaming, the sub-macros are executed by GATE in Map tasks and the sub-results are aggregated into final outputs in Reduce tasks. As an evaluation, GATE simulations were performed in a cubical water phantom for X-ray photons of 6 and 18 MeV. The parallel simulation on the cloud computing platform is as accurate as the single-threaded simulation on a local server and the simulation correctness is not affected by the failure of some worker nodes. The cloud-based simulation time is approximately inversely proportional to the number of worker nodes. For the simulation of 10 million photons on a cluster with 64 worker nodes, time decreases of 41× and 32× were achieved compared to the single worker node case and the single-threaded case, respectively. The test of Hadoop's fault tolerance showed that the simulation correctness was not affected by the failure of some worker nodes. The results verify that the proposed method provides a feasible cloud computing solution for GATE.}, } @article {pmid28854616, year = {2017}, author = {Kim, B and Ali, T and Lijeron, C and Afgan, E and Krampis, K}, title = {Bio-Docklets: virtualization containers for single-step execution of NGS pipelines.}, journal = {GigaScience}, volume = {6}, number = {8}, pages = {1-7}, pmid = {28854616}, issn = {2047-217X}, mesh = {Chromatin Immunoprecipitation ; Computational Biology/*methods ; High-Throughput Nucleotide Sequencing ; Humans ; Reproducibility of Results ; Sequence Analysis, DNA/methods ; Sequence Analysis, RNA/methods ; *Software ; User-Computer Interface ; Web Browser ; Workflow ; }, abstract = {Processing of next-generation sequencing (NGS) data requires significant technical skills, involving installation, configuration, and execution of bioinformatics data pipelines, in addition to specialized postanalysis visualization and data mining software. In order to address some of these challenges, developers have leveraged virtualization containers toward seamless deployment of preconfigured bioinformatics software and pipelines on any computational platform. We present an approach for abstracting the complex data operations of multistep, bioinformatics pipelines for NGS data analysis. As examples, we have deployed 2 pipelines for RNA sequencing and chromatin immunoprecipitation sequencing, preconfigured within Docker virtualization containers we call Bio-Docklets. Each Bio-Docklet exposes a single data input and output endpoint and from a user perspective, running the pipelines as simply as running a single bioinformatics tool. This is achieved using a "meta-script" that automatically starts the Bio-Docklets and controls the pipeline execution through the BioBlend software library and the Galaxy Application Programming Interface. The pipeline output is postprocessed by integration with the Visual Omics Explorer framework, providing interactive data visualizations that users can access through a web browser. Our goal is to enable easy access to NGS data analysis pipelines for nonbioinformatics experts on any computing environment, whether a laboratory workstation, university computer cluster, or a cloud service provider. Beyond end users, the Bio-Docklets also enables developers to programmatically deploy and run a large number of pipeline instances for concurrent analysis of multiple datasets.}, } @article {pmid28850104, year = {2017}, author = {Suárez-Albela, M and Fernández-Caramés, TM and Fraga-Lamas, P and Castedo, L}, title = {A Practical Evaluation of a High-Security Energy-Efficient Gateway for IoT Fog Computing Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {9}, pages = {}, pmid = {28850104}, issn = {1424-8220}, abstract = {Fog computing extends cloud computing to the edge of a network enabling new Internet of Things (IoT) applications and services, which may involve critical data that require privacy and security. In an IoT fog computing system, three elements can be distinguished: IoT nodes that collect data, the cloud, and interconnected IoT gateways that exchange messages with the IoT nodes and with the cloud. This article focuses on securing IoT gateways, which are assumed to be constrained in terms of computational resources, but that are able to offload some processing from the cloud and to reduce the latency in the responses to the IoT nodes. However, it is usually taken for granted that IoT gateways have direct access to the electrical grid, which is not always the case: in mission-critical applications like natural disaster relief or environmental monitoring, it is common to deploy IoT nodes and gateways in large areas where electricity comes from solar or wind energy that charge the batteries that power every device. In this article, how to secure IoT gateway communications while minimizing power consumption is analyzed. The throughput and power consumption of Rivest-Shamir-Adleman (RSA) and Elliptic Curve Cryptography (ECC) are considered, since they are really popular, but have not been thoroughly analyzed when applied to IoT scenarios. Moreover, the most widespread Transport Layer Security (TLS) cipher suites use RSA as the main public key-exchange algorithm, but the key sizes needed are not practical for most IoT devices and cannot be scaled to high security levels. In contrast, ECC represents a much lighter and scalable alternative. Thus, RSA and ECC are compared for equivalent security levels, and power consumption and data throughput are measured using a testbed of IoT gateways. The measurements obtained indicate that, in the specific fog computing scenario proposed, ECC is clearly a much better alternative than RSA, obtaining energy consumption reductions of up to 50% and a data throughput that doubles RSA in most scenarios. These conclusions are then corroborated by a frame temporal analysis of Ethernet packets. In addition, current data compression algorithms are evaluated, concluding that, when dealing with the small payloads related to IoT applications, they do not pay off in terms of real data throughput and power consumption.}, } @article {pmid28837856, year = {2017}, author = {Bailey, SF and Scheible, MK and Williams, C and Silva, DSBS and Hoggan, M and Eichman, C and Faith, SA}, title = {Secure and robust cloud computing for high-throughput forensic microsatellite sequence analysis and databasing.}, journal = {Forensic science international. Genetics}, volume = {31}, number = {}, pages = {40-47}, doi = {10.1016/j.fsigen.2017.08.008}, pmid = {28837856}, issn = {1878-0326}, mesh = {*Cloud Computing ; Computer Security ; *Databases, Nucleic Acid ; Electronic Data Processing ; *High-Throughput Nucleotide Sequencing ; Humans ; *Microsatellite Repeats ; *Sequence Analysis, DNA ; User-Computer Interface ; }, abstract = {Next-generation Sequencing (NGS) is a rapidly evolving technology with demonstrated benefits for forensic genetic applications, and the strategies to analyze and manage the massive NGS datasets are currently in development. Here, the computing, data storage, connectivity, and security resources of the Cloud were evaluated as a model for forensic laboratory systems that produce NGS data. A complete front-to-end Cloud system was developed to upload, process, and interpret raw NGS data using a web browser dashboard. The system was extensible, demonstrating analysis capabilities of autosomal and Y-STRs from a variety of NGS instrumentation (Illumina MiniSeq and MiSeq, and Oxford Nanopore MinION). NGS data for STRs were concordant with standard reference materials previously characterized with capillary electrophoresis and Sanger sequencing. The computing power of the Cloud was implemented with on-demand auto-scaling to allow multiple file analysis in tandem. The system was designed to store resulting data in a relational database, amenable to downstream sample interpretations and databasing applications following the most recent guidelines in nomenclature for sequenced alleles. Lastly, a multi-layered Cloud security architecture was tested and showed that industry standards for securing data and computing resources were readily applied to the NGS system without disadvantageous effects for bioinformatic analysis, connectivity or data storage/retrieval. The results of this study demonstrate the feasibility of using Cloud-based systems for secured NGS data analysis, storage, databasing, and multi-user distributed connectivity.}, } @article {pmid28826509, year = {2017}, author = {Bruder, J}, title = {Infrastructural intelligence: Contemporary entanglements between neuroscience and AI.}, journal = {Progress in brain research}, volume = {233}, number = {}, pages = {101-128}, doi = {10.1016/bs.pbr.2017.06.004}, pmid = {28826509}, issn = {1875-7855}, mesh = {*Artificial Intelligence ; Brain/*physiology ; *Cognition ; Creativity ; Empathy ; Humans ; Neurosciences ; }, abstract = {In this chapter, I reflect on contemporary entanglements between artificial intelligence and the neurosciences by tracing the development of Google's recent DeepMind algorithms back to their roots in neuroscientific studies of episodic memory and imagination. Google promotes a new form of "infrastructural intelligence," which excels by constantly reassessing its cognitive architecture in exchange with a cloud of data that surrounds it, and exhibits putatively human capacities such as intuition. I argue that such (re)alignments of biological and artificial intelligence have been enabled by a paradigmatic infrastructuralization of the brain in contemporary neuroscience. This infrastructuralization is based in methodologies that epistemically liken the brain to complex systems of an entirely different scale (i.e., global logistics) and has given rise to diverse research efforts that target the neuronal infrastructures of higher cognitive functions such as empathy and creativity. What is at stake in this process is no less than the shape of brains to come and a revised understanding of the intelligent and creative social subject.}, } @article {pmid28822042, year = {2017}, author = {Yang, CT and Liu, JC and Chen, ST and Lu, HW}, title = {Implementation of a Big Data Accessing and Processing Platform for Medical Records in Cloud.}, journal = {Journal of medical systems}, volume = {41}, number = {10}, pages = {149}, pmid = {28822042}, issn = {1573-689X}, mesh = {Cloud Computing ; *Medical Records ; Software ; }, abstract = {Big Data analysis has become a key factor of being innovative and competitive. Along with population growth worldwide and the trend aging of population in developed countries, the rate of the national medical care usage has been increasing. Due to the fact that individual medical data are usually scattered in different institutions and their data formats are varied, to integrate those data that continue increasing is challenging. In order to have scalable load capacity for these data platforms, we must build them in good platform architecture. Some issues must be considered in order to use the cloud computing to quickly integrate big medical data into database for easy analyzing, searching, and filtering big data to obtain valuable information.This work builds a cloud storage system with HBase of Hadoop for storing and analyzing big data of medical records and improves the performance of importing data into database. The data of medical records are stored in HBase database platform for big data analysis. This system performs distributed computing on medical records data processing through Hadoop MapReduce programming, and to provide functions, including keyword search, data filtering, and basic statistics for HBase database. This system uses the Put with the single-threaded method and the CompleteBulkload mechanism to import medical data. From the experimental results, we find that when the file size is less than 300MB, the Put with single-threaded method is used and when the file size is larger than 300MB, the CompleteBulkload mechanism is used to improve the performance of data import into database. This system provides a web interface that allows users to search data, filter out meaningful information through the web, and analyze and convert data in suitable forms that will be helpful for medical staff and institutions.}, } @article {pmid28812228, year = {2018}, author = {Kolhar, M}, title = {E-commerce Review System to Detect False Reviews.}, journal = {Science and engineering ethics}, volume = {24}, number = {5}, pages = {1577-1588}, doi = {10.1007/s11948-017-9959-2}, pmid = {28812228}, issn = {1471-5546}, support = {2017/01/7977.//Deanship of Scientific Research at Prince Sattam Bin Abdulaziz University/International ; }, mesh = {*Commerce ; *Feedback ; Fraud/*prevention & control ; Humans ; *Internet ; }, abstract = {E-commerce sites have been doing profitable business since their induction in high-speed and secured networks. Moreover, they continue to influence consumers through various methods. One of the most effective methods is the e-commerce review rating system, in which consumers provide review ratings for the products used. However, almost all e-commerce review rating systems are unable to provide cumulative review ratings. Furthermore, review ratings are influenced by positive and negative malicious feedback ratings, collectively called false reviews. In this paper, we proposed an e-commerce review system framework developed using the cumulative sum method to detect and remove malicious review ratings.}, } @article {pmid28805797, year = {2017}, author = {Carrasco-Ramiro, F and Peiró-Pastor, R and Aguado, B}, title = {Human genomics projects and precision medicine.}, journal = {Gene therapy}, volume = {24}, number = {9}, pages = {551-561}, doi = {10.1038/gt.2017.77}, pmid = {28805797}, issn = {1476-5462}, mesh = {Genomics/*methods ; *Human Genome Project ; Precision Medicine/*methods/trends ; }, abstract = {The completion of the Human Genome Project (HGP) in 2001 opened the floodgates to a deeper understanding of medicine. There are dozens of HGP-like projects which involve from a few tens to several million genomes currently in progress, which vary from having specialized goals or a more general approach. However, data generation, storage, management and analysis in public and private cloud computing platforms have raised concerns about privacy and security. The knowledge gained from further research has changed the field of genomics and is now slowly permeating into clinical medicine. The new precision (personalized) medicine, where genome sequencing and data analysis are essential components, allows tailored diagnosis and treatment according to the information from the patient's own genome and specific environmental factors. P4 (predictive, preventive, personalized and participatory) medicine is introducing new concepts, challenges and opportunities. This review summarizes current sequencing technologies, concentrates on ongoing human genomics projects, and provides some examples in which precision medicine has already demonstrated clinical impact in diagnosis and/or treatment.}, } @article {pmid28800380, year = {2017}, author = {Choi, TM and Lambert, JH}, title = {Advances in Risk Analysis with Big Data.}, journal = {Risk analysis : an official publication of the Society for Risk Analysis}, volume = {37}, number = {8}, pages = {1435-1442}, doi = {10.1111/risa.12859}, pmid = {28800380}, issn = {1539-6924}, abstract = {With cloud computing, Internet-of-things, wireless sensors, social media, fast storage and retrieval, etc., organizations and enterprises have access to unprecedented amounts and varieties of data. Current risk analysis methodology and applications are experiencing related advances and breakthroughs. For example, highway operations data are readily available, and making use of them reduces risks of traffic crashes and travel delays. Massive data of financial and enterprise systems support decision making under risk by individuals, industries, regulators, etc. In this introductory article, we first discuss the meaning of big data for risk analysis. We then examine recent advances in risk analysis with big data in several topic areas. For each area, we identify and introduce the relevant articles that are featured in the special issue. We conclude with a discussion on future research opportunities.}, } @article {pmid28794902, year = {2017}, author = {Hasan, S and Valli Kumari, V}, title = {Generic-distributed framework for cloud services marketplace based on unified ontology.}, journal = {Journal of advanced research}, volume = {8}, number = {6}, pages = {569-576}, pmid = {28794902}, issn = {2090-1232}, abstract = {Cloud computing is a pattern for delivering ubiquitous and on demand computing resources based on pay-as-you-use financial model. Typically, cloud providers advertise cloud service descriptions in various formats on the Internet. On the other hand, cloud consumers use available search engines (Google and Yahoo) to explore cloud service descriptions and find the adequate service. Unfortunately, general purpose search engines are not designed to provide a small and complete set of results, which makes the process a big challenge. This paper presents a generic-distrusted framework for cloud services marketplace to automate cloud services discovery and selection process, and remove the barriers between service providers and consumers. Additionally, this work implements two instances of generic framework by adopting two different matching algorithms; namely dominant and recessive attributes algorithm borrowed from gene science and semantic similarity algorithm based on unified cloud service ontology. Finally, this paper presents unified cloud services ontology and models the real-life cloud services according to the proposed ontology. To the best of the authors' knowledge, this is the first attempt to build a cloud services marketplace where cloud providers and cloud consumers can trend cloud services as utilities. In comparison with existing work, semantic approach reduced the execution time by 20% and maintained the same values for all other parameters. On the other hand, dominant and recessive attributes approach reduced the execution time by 57% but showed lower value for recall.}, } @article {pmid28794828, year = {2017}, author = {Yang, A and Troup, M and Ho, JWK}, title = {Scalability and Validation of Big Data Bioinformatics Software.}, journal = {Computational and structural biotechnology journal}, volume = {15}, number = {}, pages = {379-386}, pmid = {28794828}, issn = {2001-0370}, abstract = {This review examines two important aspects that are central to modern big data bioinformatics analysis - software scalability and validity. We argue that not only are the issues of scalability and validation common to all big data bioinformatics analyses, they can be tackled by conceptually related methodological approaches, namely divide-and-conquer (scalability) and multiple executions (validation). Scalability is defined as the ability for a program to scale based on workload. It has always been an important consideration when developing bioinformatics algorithms and programs. Nonetheless the surge of volume and variety of biological and biomedical data has posed new challenges. We discuss how modern cloud computing and big data programming frameworks such as MapReduce and Spark are being used to effectively implement divide-and-conquer in a distributed computing environment. Validation of software is another important issue in big data bioinformatics that is often ignored. Software validation is the process of determining whether the program under test fulfils the task for which it was designed. Determining the correctness of the computational output of big data bioinformatics software is especially difficult due to the large input space and complex algorithms involved. We discuss how state-of-the-art software testing techniques that are based on the idea of multiple executions, such as metamorphic testing, can be used to implement an effective bioinformatics quality assurance strategy. We hope this review will raise awareness of these critical issues in bioinformatics.}, } @article {pmid28792476, year = {2017}, author = {Mora, H and Gilart-Iglesias, V and Pérez-Del Hoyo, R and Andújar-Montoya, MD}, title = {A Comprehensive System for Monitoring Urban Accessibility in Smart Cities.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {8}, pages = {}, pmid = {28792476}, issn = {1424-8220}, abstract = {The present work discusses the possibilities offered by the evolution of Information and Communication Technologies with the aim of designing a system to dynamically obtain knowledge of accessibility issues in urban environments. This system is facilitated by technology to analyse the urban user experience and movement accessibility, which enabling accurate identification of urban barriers and monitoring its effectiveness over time. Therefore, the main purpose of the system is to meet the real needs and requirements of people with movement disabilities. The information obtained can be provided as a support service for decision-making to be used by city government, institutions, researchers, professionals and other individuals of society in general to improve the liveability and quality of the lives of citizens. The proposed system is a means of social awareness that makes the most vulnerable groups of citizens visible by involving them as active participants. To perform and implement the system, the latest communication and positioning technologies for smart sensing have been used, as well as the cloud computing paradigm. Finally, to validate the proposal, a case study has been presented using the university environment as a pre-deployment step in urban environments.}, } @article {pmid28786365, year = {2017}, author = {Chen, F and Wang, C and Dai, W and Jiang, X and Mohammed, N and Al Aziz, MM and Sadat, MN and Sahinalp, C and Lauter, K and Wang, S}, title = {PRESAGE: PRivacy-preserving gEnetic testing via SoftwAre Guard Extension.}, journal = {BMC medical genomics}, volume = {10}, number = {Suppl 2}, pages = {48}, pmid = {28786365}, issn = {1755-8794}, support = {R21 LM012060/LM/NLM NIH HHS/United States ; U01 EB023685/EB/NIBIB NIH HHS/United States ; R01 GM118574/GM/NIGMS NIH HHS/United States ; R00 HG008175/HG/NHGRI NIH HHS/United States ; R01 GM118609/GM/NIGMS NIH HHS/United States ; }, mesh = {Cloud Computing ; *Computer Security ; *Genetic Testing ; Outsourced Services ; *Sequence Analysis, DNA ; *Software ; }, abstract = {BACKGROUND: Advances in DNA sequencing technologies have prompted a wide range of genomic applications to improve healthcare and facilitate biomedical research. However, privacy and security concerns have emerged as a challenge for utilizing cloud computing to handle sensitive genomic data.

METHODS: We present one of the first implementations of Software Guard Extension (SGX) based securely outsourced genetic testing framework, which leverages multiple cryptographic protocols and minimal perfect hash scheme to enable efficient and secure data storage and computation outsourcing.

RESULTS: We compared the performance of the proposed PRESAGE framework with the state-of-the-art homomorphic encryption scheme, as well as the plaintext implementation. The experimental results demonstrated significant performance over the homomorphic encryption methods and a small computational overhead in comparison to plaintext implementation.

CONCLUSIONS: The proposed PRESAGE provides an alternative solution for secure and efficient genomic data outsourcing in an untrusted cloud by using a hybrid framework that combines secure hardware and multiple crypto protocols.}, } @article {pmid28786363, year = {2017}, author = {Sousa, JS and Lefebvre, C and Huang, Z and Raisaro, JL and Aguilar-Melchor, C and Killijian, MO and Hubaux, JP}, title = {Efficient and secure outsourcing of genomic data storage.}, journal = {BMC medical genomics}, volume = {10}, number = {Suppl 2}, pages = {46}, pmid = {28786363}, issn = {1755-8794}, mesh = {Cloud Computing ; *Computer Security ; *Genomics ; Information Storage and Retrieval/*methods ; Models, Theoretical ; Outsourced Services/*methods ; }, abstract = {BACKGROUND: Cloud computing is becoming the preferred solution for efficiently dealing with the increasing amount of genomic data. Yet, outsourcing storage and processing sensitive information, such as genomic data, comes with important concerns related to privacy and security. This calls for new sophisticated techniques that ensure data protection from untrusted cloud providers and that still enable researchers to obtain useful information.

METHODS: We present a novel privacy-preserving algorithm for fully outsourcing the storage of large genomic data files to a public cloud and enabling researchers to efficiently search for variants of interest. In order to protect data and query confidentiality from possible leakage, our solution exploits optimal encoding for genomic variants and combines it with homomorphic encryption and private information retrieval. Our proposed algorithm is implemented in C++ and was evaluated on real data as part of the 2016 iDash Genome Privacy-Protection Challenge.

RESULTS: Results show that our solution outperforms the state-of-the-art solutions and enables researchers to search over millions of encrypted variants in a few seconds.

CONCLUSIONS: As opposed to prior beliefs that sophisticated privacy-enhancing technologies (PETs) are unpractical for real operational settings, our solution demonstrates that, in the case of genomic data, PETs are very efficient enablers.}, } @article {pmid28785418, year = {2016}, author = {Connor, TR and Loman, NJ and Thompson, S and Smith, A and Southgate, J and Poplawski, R and Bull, MJ and Richardson, E and Ismail, M and Thompson, SE and Kitchen, C and Guest, M and Bakke, M and Sheppard, SK and Pallen, MJ}, title = {CLIMB (the Cloud Infrastructure for Microbial Bioinformatics): an online resource for the medical microbiology community.}, journal = {Microbial genomics}, volume = {2}, number = {9}, pages = {e000086}, pmid = {28785418}, issn = {2057-5858}, support = {MR/L015080/1/MRC_/Medical Research Council/United Kingdom ; MR/M501621/1/MRC_/Medical Research Council/United Kingdom ; }, mesh = {*Cloud Computing ; Computational Biology/*methods ; Genome, Microbial ; Genomics ; High-Throughput Nucleotide Sequencing ; *Internet ; Microbiological Techniques/*methods ; *Software ; }, abstract = {The increasing availability and decreasing cost of high-throughput sequencing has transformed academic medical microbiology, delivering an explosion in available genomes while also driving advances in bioinformatics. However, many microbiologists are unable to exploit the resulting large genomics datasets because they do not have access to relevant computational resources and to an appropriate bioinformatics infrastructure. Here, we present the Cloud Infrastructure for Microbial Bioinformatics (CLIMB) facility, a shared computing infrastructure that has been designed from the ground up to provide an environment where microbiologists can share and reuse methods and data.}, } @article {pmid28775987, year = {2017}, author = {Durr, PA and Graham, K and van Klinken, RD}, title = {Sellers' Revisited: A Big Data Reassessment of Historical Outbreaks of Bluetongue and African Horse Sickness due to the Long-Distance Wind Dispersion of Culicoides Midges.}, journal = {Frontiers in veterinary science}, volume = {4}, number = {}, pages = {98}, pmid = {28775987}, issn = {2297-1769}, abstract = {The possibility that outbreaks of bluetongue (BT) and African horse sickness (AHS) might occur via long-distance wind dispersion (LDWD) of their insect vector (Culicoides spp.) was proposed by R. F. Sellers in a series of papers published between 1977 and 1991. These investigated the role of LDWD by means of visual examination of the wind direction of synoptic weather charts. Based on the hypothesis that simple wind direction analysis, which does not allow for wind speed, might have led to spurious conclusions, we reanalyzed six of the outbreak scenarios described in Sellers' papers. For this reanalysis, we used a custom-built Big Data application ("TAPPAS") which couples a user-friendly web-interface with an established atmospheric dispersal model ("HYSPLIT"), thus enabling more sophisticated modeling than was possible when Sellers undertook his analyzes. For the two AHS outbreaks, there was strong support from our reanalysis of the role of LDWD for that in Spain (1966), and to a lesser degree, for the outbreak in Cyprus (1960). However, for the BT outbreaks, the reassessments were more complex, and for one of these (western Turkey, 1977) we could discount LDWD as the means of direct introduction of the virus. By contrast, while the outbreak in Cyprus (1977) showed LDWD was a possible means of introduction, there is an apparent inconsistency in that the outbreaks were localized while the dispersion events covered much of the island. For Portugal (1956), LDWD from Morocco on the dates suggested by Sellers is very unlikely to have been the pathway for introduction, and for the detection of serotype 2 in Florida (1982), LDWD from Cuba would require an assumption of a lengthy survival time of the midges in the air column. Except for western Turkey, the BT reanalyses show the limitation of LDWD modeling when used by itself, and indicates the need to integrate susceptible host population distribution (and other covariate) data into the modeling process. A further refinement, which will become increasingly important to assess LDWD, will be the use of virus and vector genome sequence data collected from potential source and the incursion sites.}, } @article {pmid28772739, year = {2017}, author = {Watson, E and Steinhauser, MO}, title = {Discrete Particle Method for Simulating Hypervelocity Impact Phenomena.}, journal = {Materials (Basel, Switzerland)}, volume = {10}, number = {4}, pages = {}, pmid = {28772739}, issn = {1996-1944}, abstract = {In this paper, we introduce a computational model for the simulation of hypervelocity impact (HVI) phenomena which is based on the Discrete Element Method (DEM). Our paper constitutes the first application of DEM to the modeling and simulating of impact events for velocities beyond 5 kms[-1]. We present here the results of a systematic numerical study on HVI of solids. For modeling the solids, we use discrete spherical particles that interact with each other via potentials. In our numerical investigations we are particularly interested in the dynamics of material fragmentation upon impact. We model a typical HVI experiment configuration where a sphere strikes a thin plate and investigate the properties of the resulting debris cloud. We provide a quantitative computational analysis of the resulting debris cloud caused by impact and a comprehensive parameter study by varying key parameters of our model. We compare our findings from the simulations with recent HVI experiments performed at our institute. Our findings are that the DEM method leads to very stable, energy-conserving simulations of HVI scenarios that map the experimental setup where a sphere strikes a thin plate at hypervelocity speed. Our chosen interaction model works particularly well in the velocity range where the local stresses caused by impact shock waves markedly exceed the ultimate material strength.}, } @article {pmid28770487, year = {2017}, author = {Stockton, DB and Santamaria, F}, title = {Integrating the Allen Brain Institute Cell Types Database into Automated Neuroscience Workflow.}, journal = {Neuroinformatics}, volume = {15}, number = {4}, pages = {333-342}, pmid = {28770487}, issn = {1559-0089}, support = {G12 MD007591/MD/NIMHD NIH HHS/United States ; }, mesh = {Animals ; Automation ; Brain/*physiology ; Databases, Factual ; Information Storage and Retrieval/*methods ; Mice ; Neurosciences/*methods ; Software ; Workflow ; }, abstract = {We developed software tools to download, extract features, and organize the Cell Types Database from the Allen Brain Institute (ABI) in order to integrate its whole cell patch clamp characterization data into the automated modeling/data analysis cycle. To expand the potential user base we employed both Python and MATLAB. The basic set of tools downloads selected raw data and extracts cell, sweep, and spike features, using ABI's feature extraction code. To facilitate data manipulation we added a tool to build a local specialized database of raw data plus extracted features. Finally, to maximize automation, we extended our NeuroManager workflow automation suite to include these tools plus a separate investigation database. The extended suite allows the user to integrate ABI experimental and modeling data into an automated workflow deployed on heterogeneous computer infrastructures, from local servers, to high performance computing environments, to the cloud. Since our approach is focused on workflow procedures our tools can be modified to interact with the increasing number of neuroscience databases being developed to cover all scales and properties of the nervous system.}, } @article {pmid28770014, year = {2017}, author = {Tien, KW and Kulvatunyou, B and Jung, K and Prabhu, V}, title = {An Investigation to Manufacturing Analytical Services Composition using the Analytical Target Cascading Method.}, journal = {IFIP advances in information and communication technology}, volume = {IFIP International Conference on Advances in Production Management Systems}, number = {APMS 2016}, pages = {469-477}, pmid = {28770014}, issn = {1868-4238}, support = {9999-NIST//Intramural NIST DOC/United States ; }, abstract = {As cloud computing is increasingly adopted, the trend is to offer software functions as modular services and compose them into larger, more meaningful ones. The trend is attractive to analytical problems in the manufacturing system design and performance improvement domain because 1) finding a global optimization for the system is a complex problem; and 2) sub-problems are typically compartmentalized by the organizational structure. However, solving sub-problems by independent services can result in a sub-optimal solution at the system level. This paper investigates the technique called Analytical Target Cascading (ATC) to coordinate the optimization of loosely-coupled sub-problems, each may be modularly formulated by differing departments and be solved by modular analytical services. The result demonstrates that ATC is a promising method in that it offers system-level optimal solutions that can scale up by exploiting distributed and modular executions while allowing easier management of the problem formulation.}, } @article {pmid28758943, year = {2017}, author = {Wu, J and Su, Z and Wang, S and Li, J}, title = {Crowd Sensing-Enabling Security Service Recommendation for Social Fog Computing Systems.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {8}, pages = {}, pmid = {28758943}, issn = {1424-8220}, abstract = {Fog computing, shifting intelligence and resources from the remote cloud to edge networks, has the potential of providing low-latency for the communication from sensing data sources to users. For the objects from the Internet of Things (IoT) to the cloud, it is a new trend that the objects establish social-like relationships with each other, which efficiently brings the benefits of developed sociality to a complex environment. As fog service become more sophisticated, it will become more convenient for fog users to share their own services, resources, and data via social networks. Meanwhile, the efficient social organization can enable more flexible, secure, and collaborative networking. Aforementioned advantages make the social network a potential architecture for fog computing systems. In this paper, we design an architecture for social fog computing, in which the services of fog are provisioned based on "friend" relationships. To the best of our knowledge, this is the first attempt at an organized fog computing system-based social model. Meanwhile, social networking enhances the complexity and security risks of fog computing services, creating difficulties of security service recommendations in social fog computing. To address this, we propose a novel crowd sensing-enabling security service provisioning method to recommend security services accurately in social fog computing systems. Simulation results show the feasibilities and efficiency of the crowd sensing-enabling security service recommendation method for social fog computing systems.}, } @article {pmid28749322, year = {2017}, author = {Gofine, M and Clark, S}, title = {Integration of Slack, a cloud-based team collaboration application, into research coordination.}, journal = {Journal of innovation in health informatics}, volume = {24}, number = {2}, pages = {936}, doi = {10.14236/jhi.v24i2.936}, pmid = {28749322}, issn = {2058-4563}, mesh = {Academic Medical Centers ; Biomedical Research ; *Communication ; *Cooperative Behavior ; Epidemiology/*organization & administration ; Female ; Humans ; Information Dissemination ; Male ; Social Media/*statistics & numerical data ; Surveys and Questionnaires ; }, abstract = {Practitioners of epidemiology require efficient real-time communication and shared access to numerous documents in order to effectively manage a study. Much of this communication involves study logistics and does not require use of Protected Health Information. Slack is a team collaboration app; it archives all direct messages and group conversations, hosts documents internally, and integrates with the Google Docs application. Slack has both desktop and mobile applications, allowing users to communicate in real-time without the need to find email addresses or phone numbers or create contact lists. METHOD: We piloted the integration of Slack into our research team of one faculty member, one research coordinator, and approximately 20 research assistants. Statistics describing the app's usage were calculated twelve months after its implementation. RESULTS: Results indicating heavy usage by both research professionals and assistants are presented. Our Slack group included a cumulative 51 users. Between October 2015 and November 2016, approximately 10,600 messages were sent through Slack; 53% were sent by RA's and 47% were sent by us. Of the 106 files stored on Slack, 82% were uploaded by research staff. In a January 2016 survey, 100% of RA's agreed or strongly agreed that Slack improved communication within the team. CONCLUSION: We demonstrate a model for integration of communication technology into academic activities by research teams. Slack is easily integrated into the workflow at an urban, academic medical center and is adopted by users as a highly effective tool for meeting research teams' communication and document management needs.}, } @article {pmid28748430, year = {2017}, author = {Singh, H and Yadav, G and Mallaiah, R and Joshi, P and Joshi, V and Kaur, R and Bansal, S and Brahmachari, SK}, title = {iNICU - Integrated Neonatal Care Unit: Capturing Neonatal Journey in an Intelligent Data Way.}, journal = {Journal of medical systems}, volume = {41}, number = {8}, pages = {132}, pmid = {28748430}, issn = {1573-689X}, mesh = {Humans ; India ; Infant, Newborn ; Infant, Premature ; *Intensive Care Units, Neonatal ; Rural Population ; Workflow ; }, abstract = {Neonatal period represents first 28 days of life, which is the most vulnerable time for a child's survival especially for the preterm babies. High neonatal mortality is a prominent and persistent problem across the globe. Non-availability of trained staff and infrastructure are the major recognized hurdles in the quality care of these neonates. Hourly progress growth charts and reports are still maintained manually by nurses along with continuous calculation of drug dosage and nutrition as per the changing weight of the baby. iNICU (integrated Neonatology Intensive Care Unit) leverages Beaglebone and Intel Edison based IoT integration with biomedical devices in NICU i.e. monitor, ventilator and blood gas machine. iNICU is hosted on IBM Softlayer based cloud computing infrastructure and map NICU workflow in Java based responsive web application to provide translational research informatics support to the clinicians. iNICU captures real time vital parameters i.e. respiration rate, heart rate, lab data and PACS amounting for millions of data points per day per child. Stream of data is sent to Apache Kafka layer which stores the same in Apache Cassandra NoSQL. iNICU also captures clinical data like feed intake, urine output, and daily assessment of child in PostgreSQL database. It acts as first Big Data hub (of both structured and unstructured data) of neonates across India offering temporal (longitudinal) data of their stay in NICU and allow clinicians in evaluating efficacy of their interventions. iNICU leverages drools based clinical rule based engine and deep learning based big data analytical model coded in R and PMML. iNICU solution aims to improve care time, fills skill gap, enable remote monitoring of neonates in rural regions, assists in identifying the early onset of disease, and reduction in neonatal mortality.}, } @article {pmid28745086, year = {2017}, author = {Klonoff, DC}, title = {Fog Computing and Edge Computing Architectures for Processing Data From Diabetes Devices Connected to the Medical Internet of Things.}, journal = {Journal of diabetes science and technology}, volume = {11}, number = {4}, pages = {647-652}, pmid = {28745086}, issn = {1932-2968}, mesh = {Blood Glucose Self-Monitoring/instrumentation/trends ; *Computing Methodologies ; Diabetes Mellitus, Type 1/*blood ; Humans ; Insulin Infusion Systems/*trends ; *Internet ; }, abstract = {The Internet of Things (IoT) is generating an immense volume of data. With cloud computing, medical sensor and actuator data can be stored and analyzed remotely by distributed servers. The results can then be delivered via the Internet. The number of devices in IoT includes such wireless diabetes devices as blood glucose monitors, continuous glucose monitors, insulin pens, insulin pumps, and closed-loop systems. The cloud model for data storage and analysis is increasingly unable to process the data avalanche, and processing is being pushed out to the edge of the network closer to where the data-generating devices are. Fog computing and edge computing are two architectures for data handling that can offload data from the cloud, process it nearby the patient, and transmit information machine-to-machine or machine-to-human in milliseconds or seconds. Sensor data can be processed near the sensing and actuating devices with fog computing (with local nodes) and with edge computing (within the sensing devices). Compared to cloud computing, fog computing and edge computing offer five advantages: (1) greater data transmission speed, (2) less dependence on limited bandwidths, (3) greater privacy and security, (4) greater control over data generated in foreign countries where laws may limit use or permit unwanted governmental access, and (5) lower costs because more sensor-derived data are used locally and less data are transmitted remotely. Connected diabetes devices almost all use fog computing or edge computing because diabetes patients require a very rapid response to sensor input and cannot tolerate delays for cloud computing.}, } @article {pmid28738824, year = {2017}, author = {Azman, SK and Anwar, MZ and Henschel, A}, title = {Visibiome: an efficient microbiome search engine based on a scalable, distributed architecture.}, journal = {BMC bioinformatics}, volume = {18}, number = {1}, pages = {353}, pmid = {28738824}, issn = {1471-2105}, mesh = {Algorithms ; Databases, Factual ; *Microbiota ; Phylogeny ; Principal Component Analysis ; RNA, Ribosomal, 16S/chemistry/classification/metabolism ; *Search Engine ; }, abstract = {BACKGROUND: Given the current influx of 16S rRNA profiles of microbiota samples, it is conceivable that large amounts of them eventually are available for search, comparison and contextualization with respect to novel samples. This process facilitates the identification of similar compositional features in microbiota elsewhere and therefore can help to understand driving factors for microbial community assembly.

RESULTS: We present Visibiome, a microbiome search engine that can perform exhaustive, phylogeny based similarity search and contextualization of user-provided samples against a comprehensive dataset of 16S rRNA profiles environments, while tackling several computational challenges. In order to scale to high demands, we developed a distributed system that combines web framework technology, task queueing and scheduling, cloud computing and a dedicated database server. To further ensure speed and efficiency, we have deployed Nearest Neighbor search algorithms, capable of sublinear searches in high-dimensional metric spaces in combination with an optimized Earth Mover Distance based implementation of weighted UniFrac. The search also incorporates pairwise (adaptive) rarefaction and optionally, 16S rRNA copy number correction. The result of a query microbiome sample is the contextualization against a comprehensive database of microbiome samples from a diverse range of environments, visualized through a rich set of interactive figures and diagrams, including barchart-based compositional comparisons and ranking of the closest matches in the database.

CONCLUSIONS: Visibiome is a convenient, scalable and efficient framework to search microbiomes against a comprehensive database of environmental samples. The search engine leverages a popular but computationally expensive, phylogeny based distance metric, while providing numerous advantages over the current state of the art tool.}, } @article {pmid28737733, year = {2017}, author = {Fan, K and Wang, J and Wang, X and Li, H and Yang, Y}, title = {A Secure and Verifiable Outsourced Access Control Scheme in Fog-Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {7}, pages = {}, pmid = {28737733}, issn = {1424-8220}, abstract = {With the rapid development of big data and Internet of things (IOT), the number of networking devices and data volume are increasing dramatically. Fog computing, which extends cloud computing to the edge of the network can effectively solve the bottleneck problems of data transmission and data storage. However, security and privacy challenges are also arising in the fog-cloud computing environment. Ciphertext-policy attribute-based encryption (CP-ABE) can be adopted to realize data access control in fog-cloud computing systems. In this paper, we propose a verifiable outsourced multi-authority access control scheme, named VO-MAACS. In our construction, most encryption and decryption computations are outsourced to fog devices and the computation results can be verified by using our verification method. Meanwhile, to address the revocation issue, we design an efficient user and attribute revocation method for it. Finally, analysis and simulation results show that our scheme is both secure and highly efficient.}, } @article {pmid28732109, year = {2017}, author = {Hayley, K}, title = {The Present State and Future Application of Cloud Computing for Numerical Groundwater Modeling.}, journal = {Ground water}, volume = {55}, number = {5}, pages = {678-682}, doi = {10.1111/gwat.12555}, pmid = {28732109}, issn = {1745-6584}, mesh = {*Cloud Computing ; *Groundwater ; Internet ; Software ; }, } @article {pmid28727557, year = {2017}, author = {Huang, PT and Jong, TL and Li, CM and Chen, WL and Lin, CH}, title = {Integrating Flexible Sensor and Virtual Self-Organizing DC Grid Model With Cloud Computing for Blood Leakage Detection During Hemodialysis.}, journal = {IEEE transactions on biomedical circuits and systems}, volume = {11}, number = {4}, pages = {784-793}, doi = {10.1109/TBCAS.2017.2695798}, pmid = {28727557}, issn = {1940-9990}, mesh = {Algorithms ; *Cloud Computing ; Computer Systems ; Hemorrhage/*diagnosis ; Humans ; Monitoring, Physiologic/*instrumentation ; *Renal Dialysis ; *Wireless Technology ; }, abstract = {Blood leakage and blood loss are serious complications during hemodialysis. From the hemodialysis survey reports, these life-threatening events occur to attract nephrology nurses and patients themselves. When the venous needle and blood line are disconnected, it takes only a few minutes for an adult patient to lose over 40% of his / her blood, which is a sufficient amount of blood loss to cause the patient to die. Therefore, we propose integrating a flexible sensor and self-organizing algorithm to design a cloud computing-based warning device for blood leakage detection. The flexible sensor is fabricated via a screen-printing technique using metallic materials on a soft substrate in an array configuration. The self-organizing algorithm constructs a virtual direct current grid-based alarm unit in an embedded system. This warning device is employed to identify blood leakage levels via a wireless network and cloud computing. It has been validated experimentally, and the experimental results suggest specifications for its commercial designs. The proposed model can also be implemented in an embedded system.}, } @article {pmid28715268, year = {2017}, author = {Wiederhold, BK}, title = {Living in Fragments: The Necessity of Cloud Computing and Virtual Reality.}, journal = {Cyberpsychology, behavior and social networking}, volume = {20}, number = {7}, pages = {405-406}, doi = {10.1089/cyber.2017.29077.bkw}, pmid = {28715268}, issn = {2152-2723}, } @article {pmid28711183, year = {2017}, author = {Calvo-Ortega, JF and Pozo, M and Moragues, S and Casals, J}, title = {Fast protocol for radiochromic film dosimetry using a cloud computing web application.}, journal = {Physica medica : PM : an international journal devoted to the applications of physics to medicine and biology : official journal of the Italian Association of Biomedical Physics (AIFB)}, volume = {39}, number = {}, pages = {1-8}, doi = {10.1016/j.ejmp.2017.05.072}, pmid = {28711183}, issn = {1724-191X}, mesh = {Calibration ; *Cloud Computing ; Film Dosimetry/*methods ; Humans ; Radiometry ; *Radiotherapy, Intensity-Modulated ; }, abstract = {PURPOSE: To investigate the feasibility of a fast protocol for radiochromic film dosimetry to verify intensity-modulated radiotherapy (IMRT) plans.

METHOD AND MATERIALS: EBT3 film dosimetry was conducted in this study using the triple-channel method implemented in the cloud computing application (Radiochromic.com). We described a fast protocol for radiochromic film dosimetry to obtain measurement results within 1h. Ten IMRT plans were delivered to evaluate the feasibility of the fast protocol. The dose distribution of the verification film was derived at 15, 30, 45min using the fast protocol and also at 24h after completing the irradiation. The four dose maps obtained per plan were compared using global and local gamma index (5%/3mm) with the calculated one by the treatment planning system. Gamma passing rates obtained for 15, 30 and 45min post-exposure were compared with those obtained after 24h.

RESULTS: Small differences respect to the 24h protocol were found in the gamma passing rates obtained for films digitized at 15min (global: 99.6%±0.9% vs. 99.7%±0.5%; local: 96.3%±3.4% vs. 96.3%±3.8%), at 30min (global: 99.5%±0.9% vs. 99.7%±0.5%; local: 96.5%±3.2% vs. 96.3±3.8%) and at 45min (global: 99.2%±1.5% vs. 99.7%±0.5%; local: 96.1%±3.8% vs. 96.3±3.8%).

CONCLUSIONS: The fast protocol permits dosimetric results within 1h when IMRT plans are verified, with similar results as those reported by the standard 24h protocol.}, } @article {pmid28706727, year = {2017}, author = {Tsiouris, KM and Gatsios, D and Rigas, G and Miljkovic, D and Koroušić Seljak, B and Bohanec, M and Arredondo, MT and Antonini, A and Konitsiotis, S and Koutsouris, DD and Fotiadis, DI}, title = {PD_Manager: an mHealth platform for Parkinson's disease patient management.}, journal = {Healthcare technology letters}, volume = {4}, number = {3}, pages = {102-108}, pmid = {28706727}, issn = {2053-3713}, abstract = {PD_Manager is a mobile health platform designed to cover most of the aspects regarding the management of Parkinson's disease (PD) in a holistic approach. Patients are unobtrusively monitored using commercial wrist and insole sensors paired with a smartphone, to automatically estimate the severity of most of the PD motor symptoms. Besides motor symptoms monitoring, the patient's mobile application also provides various non-motor self-evaluation tests for assessing cognition, mood and nutrition to motivate them in becoming more active in managing their disease. All data from the mobile application and the sensors is transferred to a cloud infrastructure to allow easy access for clinicians and further processing. Clinicians can access this information using a separate mobile application that is specifically designed for their respective needs to provide faster and more accurate assessment of PD symptoms that facilitate patient evaluation. Machine learning techniques are used to estimate symptoms and disease progression trends to further enhance the provided information. The platform is also complemented with a decision support system (DSS) that notifies clinicians for the detection of new symptoms or the worsening of existing ones. As patient's symptoms are progressing, the DSS can also provide specific suggestions regarding appropriate medication changes.}, } @article {pmid28692697, year = {2017}, author = {Pervez, Z and Ahmad, M and Khattak, AM and Ramzan, N and Khan, WA}, title = {[Formula: see text]: Oblivious similarity based searching for encrypted data outsourced to an untrusted domain.}, journal = {PloS one}, volume = {12}, number = {7}, pages = {e0179720}, pmid = {28692697}, issn = {1932-6203}, mesh = {Algorithms ; Cloud Computing ; *Computer Security ; *Information Dissemination ; *Search Engine ; }, abstract = {Public cloud storage services are becoming prevalent and myriad data sharing, archiving and collaborative services have emerged which harness the pay-as-you-go business model of public cloud. To ensure privacy and confidentiality often encrypted data is outsourced to such services, which further complicates the process of accessing relevant data by using search queries. Search over encrypted data schemes solve this problem by exploiting cryptographic primitives and secure indexing to identify outsourced data that satisfy the search criteria. Almost all of these schemes rely on exact matching between the encrypted data and search criteria. A few schemes which extend the notion of exact matching to similarity based search, lack realism as those schemes rely on trusted third parties or due to increase storage and computational complexity. In this paper we propose Oblivious Similarity based Search ([Formula: see text]) for encrypted data. It enables authorized users to model their own encrypted search queries which are resilient to typographical errors. Unlike conventional methodologies, [Formula: see text] ranks the search results by using similarity measure offering a better search experience than exact matching. It utilizes encrypted bloom filter and probabilistic homomorphic encryption to enable authorized users to access relevant data without revealing results of search query evaluation process to the untrusted cloud service provider. Encrypted bloom filter based search enables [Formula: see text] to reduce search space to potentially relevant encrypted data avoiding unnecessary computation on public cloud. The efficacy of [Formula: see text] is evaluated on Google App Engine for various bloom filter lengths on different cloud configurations.}, } @article {pmid28658599, year = {2017}, author = {Cuenca-Alba, J and Del Cano, L and Gómez Blanco, J and de la Rosa Trevín, JM and Conesa Mingo, P and Marabini, R and S Sorzano, CO and Carazo, JM}, title = {ScipionCloud: An integrative and interactive gateway for large scale cryo electron microscopy image processing on commercial and academic clouds.}, journal = {Journal of structural biology}, volume = {200}, number = {1}, pages = {20-27}, doi = {10.1016/j.jsb.2017.06.004}, pmid = {28658599}, issn = {1095-8657}, mesh = {*Cryoelectron Microscopy ; Image Processing, Computer-Assisted ; *Information Storage and Retrieval ; Software ; }, abstract = {New instrumentation for cryo electron microscopy (cryoEM) has significantly increased data collection rate as well as data quality, creating bottlenecks at the image processing level. Current image processing model of moving the acquired images from the data source (electron microscope) to desktops or local clusters for processing is encountering many practical limitations. However, computing may also take place in distributed and decentralized environments. In this way, cloud is a new form of accessing computing and storage resources on demand. Here, we evaluate on how this new computational paradigm can be effectively used by extending our current integrative framework for image processing, creating ScipionCloud. This new development has resulted in a full installation of Scipion both in public and private clouds, accessible as public "images", with all the required preinstalled cryoEM software, just requiring a Web browser to access all Graphical User Interfaces. We have profiled the performance of different configurations on Amazon Web Services and the European Federated Cloud, always on architectures incorporating GPU's, and compared them with a local facility. We have also analyzed the economical convenience of different scenarios, so cryoEM scientists have a clearer picture of the setup that is best suited for their needs and budgets.}, } @article {pmid28655296, year = {2017}, author = {de Castro, MR and Tostes, CDS and Dávila, AMR and Senger, H and da Silva, FAB}, title = {SparkBLAST: scalable BLAST processing using in-memory operations.}, journal = {BMC bioinformatics}, volume = {18}, number = {1}, pages = {318}, pmid = {28655296}, issn = {1471-2105}, mesh = {Algorithms ; Cloud Computing ; Comparative Genomic Hybridization ; Databases, Factual ; Sequence Alignment ; *Software ; }, abstract = {BACKGROUND: The demand for processing ever increasing amounts of genomic data has raised new challenges for the implementation of highly scalable and efficient computational systems. In this paper we propose SparkBLAST, a parallelization of a sequence alignment application (BLAST) that employs cloud computing for the provisioning of computational resources and Apache Spark as the coordination framework. As a proof of concept, some radionuclide-resistant bacterial genomes were selected for similarity analysis.

RESULTS: Experiments in Google and Microsoft Azure clouds demonstrated that SparkBLAST outperforms an equivalent system implemented on Hadoop in terms of speedup and execution times.

CONCLUSIONS: The superior performance of SparkBLAST is mainly due to the in-memory operations available through the Spark framework, consequently reducing the number of local I/O operations required for distributed BLAST processing.}, } @article {pmid28655203, year = {2017}, author = {Taghiyar, MJ and Rosner, J and Grewal, D and Grande, BM and Aniba, R and Grewal, J and Boutros, PC and Morin, RD and Bashashati, A and Shah, SP}, title = {Kronos: a workflow assembler for genome analytics and informatics.}, journal = {GigaScience}, volume = {6}, number = {7}, pages = {1-10}, pmid = {28655203}, issn = {2047-217X}, mesh = {High-Throughput Nucleotide Sequencing/*methods ; *Software ; Whole Genome Sequencing/*methods ; }, abstract = {BACKGROUND: The field of next-generation sequencing informatics has matured to a point where algorithmic advances in sequence alignment and individual feature detection methods have stabilized. Practical and robust implementation of complex analytical workflows (where such tools are structured into "best practices" for automated analysis of next-generation sequencing datasets) still requires significant programming investment and expertise.

RESULTS: We present Kronos, a software platform for facilitating the development and execution of modular, auditable, and distributable bioinformatics workflows. Kronos obviates the need for explicit coding of workflows by compiling a text configuration file into executable Python applications. Making analysis modules would still require programming. The framework of each workflow includes a run manager to execute the encoded workflows locally (or on a cluster or cloud), parallelize tasks, and log all runtime events. The resulting workflows are highly modular and configurable by construction, facilitating flexible and extensible meta-applications that can be modified easily through configuration file editing. The workflows are fully encoded for ease of distribution and can be instantiated on external systems, a step toward reproducible research and comparative analyses. We introduce a framework for building Kronos components that function as shareable, modular nodes in Kronos workflows.

CONCLUSIONS: The Kronos platform provides a standard framework for developers to implement custom tools, reuse existing tools, and contribute to the community at large. Kronos is shipped with both Docker and Amazon Web Services Machine Images. It is free, open source, and available through the Python Package Index and at https://github.com/jtaghiyar/kronos.}, } @article {pmid28633659, year = {2017}, author = {Molnár-Gábor, F and Lueck, R and Yakneen, S and Korbel, JO}, title = {Computing patient data in the cloud: practical and legal considerations for genetics and genomics research in Europe and internationally.}, journal = {Genome medicine}, volume = {9}, number = {1}, pages = {58}, pmid = {28633659}, issn = {1756-994X}, mesh = {*Biomedical Research ; Cloud Computing/*legislation & jurisprudence ; Computer Security/*legislation & jurisprudence ; Europe ; *Genomics ; Humans ; }, abstract = {Biomedical research is becoming increasingly large-scale and international. Cloud computing enables the comprehensive integration of genomic and clinical data, and the global sharing and collaborative processing of these data within a flexibly scalable infrastructure. Clouds offer novel research opportunities in genomics, as they facilitate cohort studies to be carried out at unprecedented scale, and they enable computer processing with superior pace and throughput, allowing researchers to address questions that could not be addressed by studies using limited cohorts. A well-developed example of such research is the Pan-Cancer Analysis of Whole Genomes project, which involves the analysis of petabyte-scale genomic datasets from research centers in different locations or countries and different jurisdictions. Aside from the tremendous opportunities, there are also concerns regarding the utilization of clouds; these concerns pertain to perceived limitations in data security and protection, and the need for due consideration of the rights of patient donors and research participants. Furthermore, the increased outsourcing of information technology impedes the ability of researchers to act within the realm of existing local regulations owing to fundamental differences in the understanding of the right to data protection in various legal systems. In this Opinion article, we address the current opportunities and limitations of cloud computing and highlight the responsible use of federated and hybrid clouds that are set up between public and private partners as an adequate solution for genetics and genomics research in Europe, and under certain conditions between Europe and international partners. This approach could represent a sensible middle ground between fragmented individual solutions and a "one-size-fits-all" approach.}, } @article {pmid28632188, year = {2017}, author = {Rateni, G and Dario, P and Cavallo, F}, title = {Smartphone-Based Food Diagnostic Technologies: A Review.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {6}, pages = {}, pmid = {28632188}, issn = {1424-8220}, mesh = {Food ; Humans ; Internet ; *Smartphone ; Software ; }, abstract = {A new generation of mobile sensing approaches offers significant advantages over traditional platforms in terms of test speed, control, low cost, ease-of-operation, and data management, and requires minimal equipment and user involvement. The marriage of novel sensing technologies with cellphones enables the development of powerful lab-on-smartphone platforms for many important applications including medical diagnosis, environmental monitoring, and food safety analysis. This paper reviews the recent advancements and developments in the field of smartphone-based food diagnostic technologies, with an emphasis on custom modules to enhance smartphone sensing capabilities. These devices typically comprise multiple components such as detectors, sample processors, disposable chips, batteries and software, which are integrated with a commercial smartphone. One of the most important aspects of developing these systems is the integration of these components onto a compact and lightweight platform that requires minimal power. To date, researchers have demonstrated several promising approaches employing various sensing techniques and device configurations. We aim to provide a systematic classification according to the detection strategy, providing a critical discussion of strengths and weaknesses. We have also extended the analysis to the food scanning devices that are increasingly populating the Internet of Things (IoT) market, demonstrating how this field is indeed promising, as the research outputs are quickly capitalized on new start-up companies.}, } @article {pmid28629131, year = {2017}, author = {Xiao, M and Zhou, J and Liu, X and Jiang, M}, title = {A Hybrid Scheme for Fine-Grained Search and Access Authorization in Fog Computing Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {6}, pages = {}, pmid = {28629131}, issn = {1424-8220}, abstract = {In the fog computing environment, the encrypted sensitive data may be transferred to multiple fog nodes on the edge of a network for low latency; thus, fog nodes need to implement a search over encrypted data as a cloud server. Since the fog nodes tend to provide service for IoT applications often running on resource-constrained end devices, it is necessary to design lightweight solutions. At present, there is little research on this issue. In this paper, we propose a fine-grained owner-forced data search and access authorization scheme spanning user-fog-cloud for resource constrained end users. Compared to existing schemes only supporting either index encryption with search ability or data encryption with fine-grained access control ability, the proposed hybrid scheme supports both abilities simultaneously, and index ciphertext and data ciphertext are constructed based on a single ciphertext-policy attribute based encryption (CP-ABE) primitive and share the same key pair, thus the data access efficiency is significantly improved and the cost of key management is greatly reduced. Moreover, in the proposed scheme, the resource constrained end devices are allowed to rapidly assemble ciphertexts online and securely outsource most of decryption task to fog nodes, and mediated encryption mechanism is also adopted to achieve instantaneous user revocation instead of re-encrypting ciphertexts with many copies in many fog nodes. The security and the performance analysis show that our scheme is suitable for a fog computing environment.}, } @article {pmid28626296, year = {2016}, author = {Shkapsky, A and Yang, M and Interlandi, M and Chiu, H and Condie, T and Zaniolo, C}, title = {Big Data Analytics with Datalog Queries on Spark.}, journal = {Proceedings. ACM-SIGMOD International Conference on Management of Data}, volume = {2016}, number = {}, pages = {1135-1149}, doi = {10.1145/2882903.2915229}, pmid = {28626296}, issn = {0730-8078}, support = {U01 HG008488/HG/NHGRI NIH HHS/United States ; U54 EB020404/EB/NIBIB NIH HHS/United States ; }, abstract = {There is great interest in exploiting the opportunity provided by cloud computing platforms for large-scale analytics. Among these platforms, Apache Spark is growing in popularity for machine learning and graph analytics. Developing efficient complex analytics in Spark requires deep understanding of both the algorithm at hand and the Spark API or subsystem APIs (e.g., Spark SQL, GraphX). Our BigDatalog system addresses the problem by providing concise declarative specification of complex queries amenable to efficient evaluation. Towards this goal, we propose compilation and optimization techniques that tackle the important problem of efficiently supporting recursion in Spark. We perform an experimental comparison with other state-of-the-art large-scale Datalog systems and verify the efficacy of our techniques and effectiveness of Spark in supporting Datalog-based analytics.}, } @article {pmid28610458, year = {2017}, author = {Das, AK and Koppa, PK and Goswami, S and Platania, R and Park, SJ}, title = {Large-scale parallel genome assembler over cloud computing environment.}, journal = {Journal of bioinformatics and computational biology}, volume = {15}, number = {3}, pages = {1740003}, doi = {10.1142/S0219720017400030}, pmid = {28610458}, issn = {1757-6334}, mesh = {*Cloud Computing ; Databases, Genetic ; Escherichia coli/genetics ; *Genome ; Genome, Human ; High-Throughput Nucleotide Sequencing ; Humans ; Male ; *Software ; Staphylococcus aureus/genetics ; }, abstract = {The size of high throughput DNA sequencing data has already reached the terabyte scale. To manage this huge volume of data, many downstream sequencing applications started using locality-based computing over different cloud infrastructures to take advantage of elastic (pay as you go) resources at a lower cost. However, the locality-based programming model (e.g. MapReduce) is relatively new. Consequently, developing scalable data-intensive bioinformatics applications using this model and understanding the hardware environment that these applications require for good performance, both require further research. In this paper, we present a de Bruijn graph oriented Parallel Giraph-based Genome Assembler (GiGA), as well as the hardware platform required for its optimal performance. GiGA uses the power of Hadoop (MapReduce) and Giraph (large-scale graph analysis) to achieve high scalability over hundreds of compute nodes by collocating the computation and data. GiGA achieves significantly higher scalability with competitive assembly quality compared to contemporary parallel assemblers (e.g. ABySS and Contrail) over traditional HPC cluster. Moreover, we show that the performance of GiGA is significantly improved by using an SSD-based private cloud infrastructure over traditional HPC cluster. We observe that the performance of GiGA on 256 cores of this SSD-based cloud infrastructure closely matches that of 512 cores of traditional HPC cluster.}, } @article {pmid28609295, year = {2017}, author = {Miller, M and Zhu, C and Bromberg, Y}, title = {clubber: removing the bioinformatics bottleneck in big data analyses.}, journal = {Journal of integrative bioinformatics}, volume = {14}, number = {2}, pages = {}, pmid = {28609295}, issn = {1613-4516}, support = {U01 GM115486/GM/NIGMS NIH HHS/United States ; }, mesh = {Automation ; Bathing Beaches ; Computational Biology/*methods ; *Computing Methodologies ; Datasets as Topic ; Gulf of Mexico ; Metagenome/genetics ; Microbiota/genetics ; Molecular Sequence Annotation ; Petroleum Pollution/adverse effects ; *Software ; }, abstract = {With the advent of modern day high-throughput technologies, the bottleneck in biological discovery has shifted from the cost of doing experiments to that of analyzing results. clubber is our automated cluster-load balancing system developed for optimizing these "big data" analyses. Its plug-and-play framework encourages re-use of existing solutions for bioinformatics problems. clubber's goals are to reduce computation times and to facilitate use of cluster computing. The first goal is achieved by automating the balance of parallel submissions across available high performance computing (HPC) resources. Notably, the latter can be added on demand, including cloud-based resources, and/or featuring heterogeneous environments. The second goal of making HPCs user-friendly is facilitated by an interactive web interface and a RESTful API, allowing for job monitoring and result retrieval. We used clubber to speed up our pipeline for annotating molecular functionality of metagenomes. Here, we analyzed the Deepwater Horizon oil-spill study data to quantitatively show that the beach sands have not yet entirely recovered. Further, our analysis of the CAMI-challenge data revealed that microbiome taxonomic shifts do not necessarily correlate with functional shifts. These examples (21 metagenomes processed in 172 min) clearly illustrate the importance of clubber in the everyday computational biology environment.}, } @article {pmid28605406, year = {2017}, author = {Bais, P and Namburi, S and Gatti, DM and Zhang, X and Chuang, JH}, title = {CloudNeo: a cloud pipeline for identifying patient-specific tumor neoantigens.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {19}, pages = {3110-3112}, pmid = {28605406}, issn = {1367-4811}, support = {P30 CA034196/CA/NCI NIH HHS/United States ; R21 CA191848/CA/NCI NIH HHS/United States ; }, mesh = {Antigens, Neoplasm/chemistry/*genetics ; Genomics ; *High-Throughput Nucleotide Sequencing ; Histocompatibility Testing ; Humans ; Mutation ; Peptides/chemistry/genetics ; *Software ; Workflow ; }, abstract = {SUMMARY: We present CloudNeo, a cloud-based computational workflow for identifying patient-specific tumor neoantigens from next generation sequencing data. Tumor-specific mutant peptides can be detected by the immune system through their interactions with the human leukocyte antigen complex, and neoantigen presence has recently been shown to correlate with anti T-cell immunity and efficacy of checkpoint inhibitor therapy. However computing capabilities to identify neoantigens from genomic sequencing data are a limiting factor for understanding their role. This challenge has grown as cancer datasets become increasingly abundant, making them cumbersome to store and analyze on local servers. Our cloud-based pipeline provides scalable computation capabilities for neoantigen identification while eliminating the need to invest in local infrastructure for data transfer, storage or compute. The pipeline is a Common Workflow Language (CWL) implementation of human leukocyte antigen (HLA) typing using Polysolver or HLAminer combined with custom scripts for mutant peptide identification and NetMHCpan for neoantigen prediction. We have demonstrated the efficacy of these pipelines on Amazon cloud instances through the Seven Bridges Genomics implementation of the NCI Cancer Genomics Cloud, which provides graphical interfaces for running and editing, infrastructure for workflow sharing and version tracking, and access to TCGA data.

The CWL implementation is at: https://github.com/TheJacksonLaboratory/CloudNeo. For users who have obtained licenses for all internal software, integrated versions in CWL and on the Seven Bridges Cancer Genomics Cloud platform (https://cgc.sbgenomics.com/, recommended version) can be obtained by contacting the authors.

CONTACT: jeff.chuang@jax.org.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid28602100, year = {2017}, author = {Nowotka, MM and Gaulton, A and Mendez, D and Bento, AP and Hersey, A and Leach, A}, title = {Using ChEMBL web services for building applications and data processing workflows relevant to drug discovery.}, journal = {Expert opinion on drug discovery}, volume = {12}, number = {8}, pages = {757-767}, pmid = {28602100}, issn = {1746-045X}, support = {//Wellcome Trust/United Kingdom ; WT086151/Z/08/Z//Wellcome Trust/United Kingdom ; WT104104/Z/14/Z//Wellcome Trust/United Kingdom ; }, mesh = {Cloud Computing ; *Databases, Chemical ; Drug Discovery/*methods ; Humans ; Internet ; Pharmaceutical Preparations/*chemistry ; Software ; }, abstract = {ChEMBL is a manually curated database of bioactivity data on small drug-like molecules, used by drug discovery scientists. Among many access methods, a REST API provides programmatic access, allowing the remote retrieval of ChEMBL data and its integration into other applications. This approach allows scientists to move from a world where they go to the ChEMBL web site to search for relevant data, to one where ChEMBL data can be simply integrated into their everyday tools and work environment. Areas covered: This review highlights some of the audiences who may benefit from using the ChEMBL API, and the goals they can address, through the description of several use cases. The examples cover a team communication tool (Slack), a data analytics platform (KNIME), batch job management software (Luigi) and Rich Internet Applications. Expert opinion: The advent of web technologies, cloud computing and micro services oriented architectures have made REST APIs an essential ingredient of modern software development models. The widespread availability of tools consuming RESTful resources have made them useful for many groups of users. The ChEMBL API is a valuable resource of drug discovery bioactivity data for professional chemists, chemistry students, data scientists, scientific and web developers.}, } @article {pmid28592736, year = {2017}, author = {Nomoto, S and Utsumi, M and Sasayama, S and Dekigai, H}, title = {[A cloud-based home health care information sharing system to connect patients with home healthcare staff -A case report of a study in a mountainous region].}, journal = {Nihon Ronen Igakkai zasshi. Japanese journal of geriatrics}, volume = {54}, number = {2}, pages = {165-171}, doi = {10.3143/geriatrics.54.165}, pmid = {28592736}, issn = {0300-9173}, mesh = {Aged ; Altitude ; *Cloud Computing ; Delivery of Health Care ; Female ; *Home Care Services ; Humans ; *Information Dissemination ; Male ; }, abstract = {We have developed a cloud system, the e-Renraku Notebook (e-RN) for sharing of home care information based on the concept of "patient-centricity". In order to assess the likelihood that our system will enhance the communication and sharing of information between home healthcare staff members and home-care patients, we selected patients who were residing in mountainous regions for inclusion in our study. We herein report the findings.Eighteen staff members from 7 medical facilities and 9 patients participated in the present study.The e-RN was developed for two reasons: to allow patients to independently report their health status and to have staff members view and respond to the information received. The patients and staff members were given iPads with the pre-installed applications and the information being exchanged was reviewed over a 54-day period.Information was mainly input by the patients (61.6%), followed by the nurses who performed home visits (19.9%). The amount of information input by patients requiring high-level nursing care and their corresponding staff member was significantly greater than that input by patients who required low-level of nursing care.This patient-centric system in which patients can independently report and share information with a member of the healthcare staff provides a sense of security. It also allows staff members to understand the patient's health status before making a home visit, thereby giving them a sense of security and confidence. It was also noteworthy that elderly patients requiring high-level nursing care and their staff counterpart input information in the system significantly more frequently than patients who required low-level care.}, } @article {pmid28587246, year = {2017}, author = {Kim, Y and Oh, H and Kang, S}, title = {Proof of Concept of Home IoT Connected Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {6}, pages = {}, pmid = {28587246}, issn = {1424-8220}, abstract = {The way in which we interact with our cars is changing, driven by the increased use of mobile devices, cloud-based services, and advanced automotive technology. In particular, the requirements and market demand for the Internet of Things (IoT) device-connected vehicles will continuously increase. In addition, the advances in cloud computing and IoT have provided a promising opportunity for developing vehicular software and services in the automotive domain. In this paper, we introduce the concept of a home IoT connected vehicle with a voice-based virtual personal assistant comprised of a vehicle agent and a home agent. The proposed concept is evaluated by implementing a smartphone linked with home IoT devices that are connected to an infotainment system for the vehicle, a smartphone-based natural language interface input device, and cloud-based home IoT devices for the home. The home-to-vehicle connected service scenarios that aim to reduce the inconvenience due to simple and repetitive tasks by improving the urban mobility efficiency in IoT environments are substantiated by analyzing real vehicle testing and lifestyle research. Remarkable benefits are derived by making repetitive routine tasks one task that is executed by a command and by executing essential tasks automatically, without any request. However, it should be used with authorized permission, applied without any error at the right time, and applied under limited conditions to sense the habitants' intention correctly and to gain the required trust regarding the remote execution of tasks.}, } @article {pmid28580909, year = {2017}, author = {Castaño-Díez, D}, title = {The Dynamo package for tomography and subtomogram averaging: components for MATLAB, GPU computing and EC2 Amazon Web Services.}, journal = {Acta crystallographica. Section D, Structural biology}, volume = {73}, number = {Pt 6}, pages = {478-487}, pmid = {28580909}, issn = {2059-7983}, mesh = {Cloud Computing ; Imaging, Three-Dimensional/methods ; Programming Languages ; *Software ; Tomography/*methods ; }, abstract = {Dynamo is a package for the processing of tomographic data. As a tool for subtomogram averaging, it includes different alignment and classification strategies. Furthermore, its data-management module allows experiments to be organized in groups of tomograms, while offering specialized three-dimensional tomographic browsers that facilitate visualization, location of regions of interest, modelling and particle extraction in complex geometries. Here, a technical description of the package is presented, focusing on its diverse strategies for optimizing computing performance. Dynamo is built upon mbtools (middle layer toolbox), a general-purpose MATLAB library for object-oriented scientific programming specifically developed to underpin Dynamo but usable as an independent tool. Its structure intertwines a flexible MATLAB codebase with precompiled C++ functions that carry the burden of numerically intensive operations. The package can be delivered as a precompiled standalone ready for execution without a MATLAB license. Multicore parallelization on a single node is directly inherited from the high-level parallelization engine provided for MATLAB, automatically imparting a balanced workload among the threads in computationally intense tasks such as alignment and classification, but also in logistic-oriented tasks such as tomogram binning and particle extraction. Dynamo supports the use of graphical processing units (GPUs), yielding considerable speedup factors both for native Dynamo procedures (such as the numerically intensive subtomogram alignment) and procedures defined by the user through its MATLAB-based GPU library for three-dimensional operations. Cloud-based virtual computing environments supplied with a pre-installed version of Dynamo can be publicly accessed through the Amazon Elastic Compute Cloud (EC2), enabling users to rent GPU computing time on a pay-as-you-go basis, thus avoiding upfront investments in hardware and longterm software maintenance.}, } @article {pmid28580809, year = {2017}, author = {Kawel-Boehm, N and Bluemke, DA}, title = {Cardiovascular imaging environment: will the future be cloud-based?.}, journal = {Expert review of medical devices}, volume = {14}, number = {7}, pages = {521-528}, doi = {10.1080/17434440.2017.1338134}, pmid = {28580809}, issn = {1745-2422}, mesh = {Cardiovascular System/*diagnostic imaging ; *Cloud Computing ; *Computer Security ; *Data Warehousing ; Forecasting ; Humans ; *Magnetic Resonance Imaging ; Software ; *Tomography, X-Ray Computed ; }, abstract = {In cardiovascular CT and MR imaging large datasets have to be stored, post-processed, analyzed and distributed. Beside basic assessment of volume and function in cardiac magnetic resonance imaging e.g., more sophisticated quantitative analysis is requested requiring specific software. Several institutions cannot afford various types of software and provide expertise to perform sophisticated analysis. Areas covered: Various cloud services exist related to data storage and analysis specifically for cardiovascular CT and MR imaging. Instead of on-site data storage, cloud providers offer flexible storage services on a pay-per-use basis. To avoid purchase and maintenance of specialized software for cardiovascular image analysis, e.g. to assess myocardial iron overload, MR 4D flow and fractional flow reserve, evaluation can be performed with cloud based software by the consumer or complete analysis is performed by the cloud provider. However, challenges to widespread implementation of cloud services include regulatory issues regarding patient privacy and data security. Expert commentary: If patient privacy and data security is guaranteed cloud imaging is a valuable option to cope with storage of large image datasets and offer sophisticated cardiovascular image analysis for institutions of all sizes.}, } @article {pmid28572842, year = {2017}, author = {Moore, JH and Andrews, PC and Olson, RS and Carlson, SE and Larock, CR and Bulhoes, MJ and O'Connor, JP and Greytak, EM and Armentrout, SL}, title = {Grid-based stochastic search for hierarchical gene-gene interactions in population-based genetic studies of common human diseases.}, journal = {BioData mining}, volume = {10}, number = {}, pages = {19}, pmid = {28572842}, issn = {1756-0381}, support = {R42 GM097765/GM/NIGMS NIH HHS/United States ; }, abstract = {BACKGROUND: Large-scale genetic studies of common human diseases have focused almost exclusively on the independent main effects of single-nucleotide polymorphisms (SNPs) on disease susceptibility. These studies have had some success, but much of the genetic architecture of common disease remains unexplained. Attention is now turning to detecting SNPs that impact disease susceptibility in the context of other genetic factors and environmental exposures. These context-dependent genetic effects can manifest themselves as non-additive interactions, which are more challenging to model using parametric statistical approaches. The dimensionality that results from a multitude of genotype combinations, which results from considering many SNPs simultaneously, renders these approaches underpowered. We previously developed the multifactor dimensionality reduction (MDR) approach as a nonparametric and genetic model-free machine learning alternative. Approaches such as MDR can improve the power to detect gene-gene interactions but are limited in their ability to exhaustively consider SNP combinations in genome-wide association studies (GWAS), due to the combinatorial explosion of the search space. We introduce here a stochastic search algorithm called Crush for the application of MDR to modeling high-order gene-gene interactions in genome-wide data. The Crush-MDR approach uses expert knowledge to guide probabilistic searches within a framework that capitalizes on the use of biological knowledge to filter gene sets prior to analysis. Here we evaluated the ability of Crush-MDR to detect hierarchical sets of interacting SNPs using a biology-based simulation strategy that assumes non-additive interactions within genes and additivity in genetic effects between sets of genes within a biochemical pathway.

RESULTS: We show that Crush-MDR is able to identify genetic effects at the gene or pathway level significantly better than a baseline random search with the same number of model evaluations. We then applied the same methodology to a GWAS for Alzheimer's disease and showed base level validation that Crush-MDR was able to identify a set of interacting genes with biological ties to Alzheimer's disease.

CONCLUSIONS: We discuss the role of stochastic search and cloud computing for detecting complex genetic effects in genome-wide data.}, } @article {pmid28554829, year = {2017}, author = {Meier, R and Ruttkies, C and Treutler, H and Neumann, S}, title = {Bioinformatics can boost metabolomics research.}, journal = {Journal of biotechnology}, volume = {261}, number = {}, pages = {137-141}, doi = {10.1016/j.jbiotec.2017.05.018}, pmid = {28554829}, issn = {1873-4863}, mesh = {*Biomedical Research ; *Computational Biology ; Mass Spectrometry ; *Metabolomics ; }, abstract = {Metabolomics is the modern term for the field of small molecule research in biology and biochemistry. Currently, metabolomics is undergoing a transition where the classic analytical chemistry is combined with modern cheminformatics and bioinformatics methods, paving the way for large-scale data analysis. We give some background on past developments, highlight current state-of-the-art approaches, and give a perspective on future requirements.}, } @article {pmid28552576, year = {2017}, author = {Frazier, Z and Xu, M and Alber, F}, title = {TomoMiner and TomoMinerCloud: A Software Platform for Large-Scale Subtomogram Structural Analysis.}, journal = {Structure (London, England : 1993)}, volume = {25}, number = {6}, pages = {951-961.e2}, pmid = {28552576}, issn = {1878-4186}, support = {P41 GM103712/GM/NIGMS NIH HHS/United States ; R01 GM096089/GM/NIGMS NIH HHS/United States ; }, mesh = {Chaperonin 10/chemistry ; Chaperonin 60/chemistry ; Cloud Computing/economics ; Clusterin ; Electron Microscope Tomography/*methods ; Image Processing, Computer-Assisted/methods ; *Software ; }, abstract = {Cryo-electron tomography (cryo-ET) captures the 3D electron density distribution of macromolecular complexes in close to native state. With the rapid advance of cryo-ET acquisition technologies, it is possible to generate large numbers (>100,000) of subtomograms, each containing a macromolecular complex. Often, these subtomograms represent a heterogeneous sample due to variations in the structure and composition of a complex in situ form or because particles are a mixture of different complexes. In this case subtomograms must be classified. However, classification of large numbers of subtomograms is a time-intensive task and often a limiting bottleneck. This paper introduces an open source software platform, TomoMiner, for large-scale subtomogram classification, template matching, subtomogram averaging, and alignment. Its scalable and robust parallel processing allows efficient classification of tens to hundreds of thousands of subtomograms. In addition, TomoMiner provides a pre-configured TomoMinerCloud computing service permitting users without sufficient computing resources instant access to TomoMiners high-performance features.}, } @article {pmid28552118, year = {2017}, author = {Long, J and Yuan, MJ}, title = {A novel clinical decision support algorithm for constructing complete medication histories.}, journal = {Computer methods and programs in biomedicine}, volume = {145}, number = {}, pages = {127-133}, doi = {10.1016/j.cmpb.2017.04.004}, pmid = {28552118}, issn = {1872-7565}, mesh = {Algorithms ; *Decision Support Systems, Clinical ; Humans ; Medical History Taking/*methods ; Prescription Drugs/*administration & dosage ; }, abstract = {A patient's complete medication history is a crucial element for physicians to develop a full understanding of the patient's medical conditions and treatment options. However, due to the fragmented nature of medical data, this process can be very time-consuming and often impossible for physicians to construct a complete medication history for complex patients. In this paper, we describe an accurate, computationally efficient and scalable algorithm to construct a medication history timeline. The algorithm is developed and validated based on 1 million random prescription records from a large national prescription data aggregator. Our evaluation shows that the algorithm can be scaled horizontally on-demand, making it suitable for future delivery in a cloud-computing environment. We also propose that this cloud-based medication history computation algorithm could be integrated into Electronic Medical Records, enabling informed clinical decision-making at the point of care.}, } @article {pmid28545151, year = {2017}, author = {Imran, M and Hlavacs, H and Haq, IU and Jan, B and Khan, FA and Ahmad, A}, title = {Provenance based data integrity checking and verification in cloud environments.}, journal = {PloS one}, volume = {12}, number = {5}, pages = {e0177576}, pmid = {28545151}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; Internet ; }, abstract = {Cloud computing is a recent tendency in IT that moves computing and data away from desktop and hand-held devices into large scale processing hubs and data centers respectively. It has been proposed as an effective solution for data outsourcing and on demand computing to control the rising cost of IT setups and management in enterprises. However, with Cloud platforms user's data is moved into remotely located storages such that users lose control over their data. This unique feature of the Cloud is facing many security and privacy challenges which need to be clearly understood and resolved. One of the important concerns that needs to be addressed is to provide the proof of data integrity, i.e., correctness of the user's data stored in the Cloud storage. The data in Clouds is physically not accessible to the users. Therefore, a mechanism is required where users can check if the integrity of their valuable data is maintained or compromised. For this purpose some methods are proposed like mirroring, checksumming and using third party auditors amongst others. However, these methods use extra storage space by maintaining multiple copies of data or the presence of a third party verifier is required. In this paper, we address the problem of proving data integrity in Cloud computing by proposing a scheme through which users are able to check the integrity of their data stored in Clouds. In addition, users can track the violation of data integrity if occurred. For this purpose, we utilize a relatively new concept in the Cloud computing called "Data Provenance". Our scheme is capable to reduce the need of any third party services, additional hardware support and the replication of data items on client side for integrity checking.}, } @article {pmid28544911, year = {2017}, author = {Retico, A and Arezzini, S and Bosco, P and Calderoni, S and Ciampa, A and Coscetti, S and Cuomo, S and De Santis, L and Fabiani, D and Fantacci, ME and Giuliano, A and Mazzoni, E and Mercatali, P and Miscali, G and Pardini, M and Prosperi, M and Romano, F and Tamburini, E and Tosetti, M and Muratori, F}, title = {ARIANNA: A research environment for neuroimaging studies in autism spectrum disorders.}, journal = {Computers in biology and medicine}, volume = {87}, number = {}, pages = {1-7}, doi = {10.1016/j.compbiomed.2017.05.017}, pmid = {28544911}, issn = {1879-0534}, mesh = {Autism Spectrum Disorder/*diagnostic imaging ; Brain/*diagnostic imaging ; Female ; Humans ; Internet ; Magnetic Resonance Imaging ; Male ; Neuroimaging/*methods ; }, abstract = {The complexity and heterogeneity of Autism Spectrum Disorders (ASD) require the implementation of dedicated analysis techniques to obtain the maximum from the interrelationship among many variables that describe affected individuals, spanning from clinical phenotypic characterization and genetic profile to structural and functional brain images. The ARIANNA project has developed a collaborative interdisciplinary research environment that is easily accessible to the community of researchers working on ASD (https://arianna.pi.infn.it). The main goals of the project are: to analyze neuroimaging data acquired in multiple sites with multivariate approaches based on machine learning; to detect structural and functional brain characteristics that allow the distinguishing of individuals with ASD from control subjects; to identify neuroimaging-based criteria to stratify the population with ASD to support the future development of personalized treatments. Secure data handling and storage are guaranteed within the project, as well as the access to fast grid/cloud-based computational resources. This paper outlines the web-based architecture, the computing infrastructure and the collaborative analysis workflows at the basis of the ARIANNA interdisciplinary working environment. It also demonstrates the full functionality of the research platform. The availability of this innovative working environment for analyzing clinical and neuroimaging information of individuals with ASD is expected to support researchers in disentangling complex data thus facilitating their interpretation.}, } @article {pmid28529761, year = {2017}, author = {Yin, J and Hu, J and Mu, Z}, title = {Developing and evaluating a mobile driver fatigue detection network based on electroencephalograph signals.}, journal = {Healthcare technology letters}, volume = {4}, number = {1}, pages = {34-38}, pmid = {28529761}, issn = {2053-3713}, abstract = {The rapid development of driver fatigue detection technology indicates important significance of traffic safety. The authors' main goals of this Letter are principally three: (i) A middleware architecture, defined as process unit (PU), which can communicate with personal electroencephalography (EEG) node (PEN) and cloud server (CS). The PU receives EEG signals from PEN, recognises the fatigue state of the driver, and transfer this information to CS. The CS sends notification messages to the surrounding vehicles. (ii) An android application for fatigue detection is built. The application can be used for the driver to detect the state of his/her fatigue based on EEG signals, and warn neighbourhood vehicles. (iii) The detection algorithm for driver fatigue is applied based on fuzzy entropy. The idea of 10-fold cross-validation and support vector machine are used for classified calculation. Experimental results show that the average accurate rate of detecting driver fatigue is about 95%, which implying that the algorithm is validity in detecting state of driver fatigue.}, } @article {pmid28522612, year = {2017}, author = {Mashl, RJ and Scott, AD and Huang, KL and Wyczalkowski, MA and Yoon, CJ and Niu, B and DeNardo, E and Yellapantula, VD and Handsaker, RE and Chen, K and Koboldt, DC and Ye, K and Fenyö, D and Raphael, BJ and Wendl, MC and Ding, L}, title = {GenomeVIP: a cloud platform for genomic variant discovery and interpretation.}, journal = {Genome research}, volume = {27}, number = {8}, pages = {1450-1459}, pmid = {28522612}, issn = {1549-5469}, support = {U24 CA210972/CA/NCI NIH HHS/United States ; R01 CA180006/CA/NCI NIH HHS/United States ; U24 CA211006/CA/NCI NIH HHS/United States ; R01 CA178383/CA/NCI NIH HHS/United States ; R01 CA172652/CA/NCI NIH HHS/United States ; U01 HG006517/HG/NHGRI NIH HHS/United States ; }, mesh = {*Cloud Computing ; Databases, Genetic ; *Genetic Variation ; *Genome, Human ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/methods ; Humans ; Neoplasms/*genetics ; *Software ; }, abstract = {Identifying genomic variants is a fundamental first step toward the understanding of the role of inherited and acquired variation in disease. The accelerating growth in the corpus of sequencing data that underpins such analysis is making the data-download bottleneck more evident, placing substantial burdens on the research community to keep pace. As a result, the search for alternative approaches to the traditional "download and analyze" paradigm on local computing resources has led to a rapidly growing demand for cloud-computing solutions for genomics analysis. Here, we introduce the Genome Variant Investigation Platform (GenomeVIP), an open-source framework for performing genomics variant discovery and annotation using cloud- or local high-performance computing infrastructure. GenomeVIP orchestrates the analysis of whole-genome and exome sequence data using a set of robust and popular task-specific tools, including VarScan, GATK, Pindel, BreakDancer, Strelka, and Genome STRiP, through a web interface. GenomeVIP has been used for genomic analysis in large-data projects such as the TCGA PanCanAtlas and in other projects, such as the ICGC Pilots, CPTAC, ICGC-TCGA DREAM Challenges, and the 1000 Genomes SV Project. Here, we demonstrate GenomeVIP's ability to provide high-confidence annotated somatic, germline, and de novo variants of potential biological significance using publicly available data sets.}, } @article {pmid28518075, year = {2017}, author = {Joyce, BL and Haug-Baltzell, AK and Hulvey, JP and McCarthy, F and Devisetty, UK and Lyons, E}, title = {Leveraging CyVerse Resources for De Novo Comparative Transcriptomics of Underserved (Non-model) Organisms.}, journal = {Journal of visualized experiments : JoVE}, volume = {}, number = {123}, pages = {}, pmid = {28518075}, issn = {1940-087X}, mesh = {Animals ; Computational Biology/education/*methods ; Gene Expression Profiling/*methods ; Internet ; Sequence Analysis, RNA/methods ; *Software ; }, abstract = {This workflow allows novice researchers to leverage advanced computational resources such as cloud computing to carry out pairwise comparative transcriptomics. It also serves as a primer for biologists to develop data scientist computational skills, e.g. executing bash commands, visualization and management of large data sets. All command line code and further explanations of each command or step can be found on the wiki (https://wiki.cyverse.org/wiki/x/dgGtAQ). The Discovery Environment and Atmosphere platforms are connected together through the CyVerse Data Store. As such, once the initial raw sequencing data has been uploaded there is no more need to transfer large data files over an Internet connection, minimizing the amount of time needed to conduct analyses. This protocol is designed to analyze only two experimental treatments or conditions. Differential gene expression analysis is conducted through pairwise comparisons, and will not be suitable to test multiple factors. This workflow is also designed to be manual rather than automated. Each step must be executed and investigated by the user, yielding a better understanding of data and analytical outputs, and therefore better results for the user. Once complete, this protocol will yield de novo assembled transcriptome(s) for underserved (non-model) organisms without the need to map to previously assembled reference genomes (which are usually not available in underserved organism). These de novo transcriptomes are further used in pairwise differential gene expression analysis to investigate genes differing between two experimental conditions. Differentially expressed genes are then functionally annotated to understand the genetic response organisms have to experimental conditions. In total, the data derived from this protocol is used to test hypotheses about biological responses of underserved organisms.}, } @article {pmid28508809, year = {2017}, author = {Sadoughi, F and Erfannia, L}, title = {Health Information System in a Cloud Computing Context.}, journal = {Studies in health technology and informatics}, volume = {236}, number = {}, pages = {290-297}, pmid = {28508809}, issn = {1879-8365}, mesh = {*Cloud Computing ; Delivery of Health Care ; *Health Information Systems ; Humans ; Information Management ; *Medical Informatics ; }, abstract = {Healthcare as a worldwide industry is experiencing a period of growth based on health information technology. The capabilities of cloud systems make it as an option to develop eHealth goals. The main objectives of the present study was to evaluate the advantages and limitations of health information systems implementation in a cloud-computing context that was conducted as a systematic review in 2016. Science direct, Scopus, Web of science, IEEE, PubMed and Google scholar were searched according study criteria. Among 308 articles initially found, 21 articles were entered in the final analysis. All the studies had considered cloud computing as a positive tool to help advance health technology, but none had insisted too much on its limitations and threats. Electronic health record systems have been mostly studied in the fields of implementation, designing, and presentation of models and prototypes. According to this research, the main advantages of cloud-based health information systems could be categorized into the following groups: economic benefits and advantages of information management. The main limitations of the implementation of cloud-based health information systems could be categorized into the 4 groups of security, legal, technical, and human restrictions. Compared to earlier studies, the present research had the advantage of dealing with the issue of health information systems in a cloud platform. The high frequency of studies conducted on the implementation of cloud-based health information systems revealed health industry interest in the application of this technology. Security was a subject discussed in most studies due to health information sensitivity. In this investigation, some mechanisms and solutions were discussed concerning the mentioned systems, which would provide a suitable area for future scientific research on this issue. The limitations and solutions discussed in this systematic study would help healthcare managers and decision-makers take better and more efficient advantages of this technology and make better planning to adopt cloud-based health information systems.}, } @article {pmid28483746, year = {2017}, author = {Franco, RZ and Alawadhi, B and Fallaize, R and Lovegrove, JA and Hwang, F}, title = {A Web-Based Graphical Food Frequency Assessment System: Design, Development and Usability Metrics.}, journal = {JMIR human factors}, volume = {4}, number = {2}, pages = {e13}, pmid = {28483746}, issn = {2292-9495}, abstract = {BACKGROUND: Food frequency questionnaires (FFQs) are well established in the nutrition field, but there remain important questions around how to develop online tools in a way that can facilitate wider uptake. Also, FFQ user acceptance and evaluation have not been investigated extensively.

OBJECTIVE: This paper presents a Web-based graphical food frequency assessment system that addresses challenges of reproducibility, scalability, mobile friendliness, security, and usability and also presents the utilization metrics and user feedback from a deployment study.

METHODS: The application design employs a single-page application Web architecture with back-end services (database, authentication, and authorization) provided by Google Firebase's free plan. Its design and responsiveness take advantage of the Bootstrap framework. The FFQ was deployed in Kuwait as part of the EatWellQ8 study during 2016. The EatWellQ8 FFQ contains 146 food items (including drinks). Participants were recruited in Kuwait without financial incentive. Completion time was based on browser timestamps and usability was measured using the System Usability Scale (SUS), scoring between 0 and 100. Products with a SUS higher than 70 are considered to be good.

RESULTS: A total of 235 participants created accounts in the system, and 163 completed the FFQ. Of those 163 participants, 142 reported their gender (93 female, 49 male) and 144 reported their date of birth (mean age of 35 years, range from 18-65 years). The mean completion time for all FFQs (n=163), excluding periods of interruption, was 14.2 minutes (95% CI 13.3-15.1 minutes). Female participants (n=93) completed in 14.1 minutes (95% CI 12.9-15.3 minutes) and male participants (n=49) completed in 14.3 minutes (95% CI 12.6-15.9 minutes). Participants using laptops or desktops (n=69) completed the FFQ in an average of 13.9 minutes (95% CI 12.6-15.1 minutes) and participants using smartphones or tablets (n=91) completed in an average of 14.5 minutes (95% CI 13.2-15.8 minutes). The median SUS score (n=141) was 75.0 (interquartile range [IQR] 12.5), and 84% of the participants who completed the SUS classified the system either "good" (n=50) or "excellent" (n=69). Considering only participants using smartphones or tablets (n=80), the median score was 72.5 (IQR 12.5), slightly below the SUS median for desktops and laptops (n=58), which was 75.0 (IQR 12.5). No significant differences were found between genders or age groups (below and above the median) for the SUS or completion time.

CONCLUSIONS: Taking into account all the requirements, the deployment used professional cloud computing at no cost, and the resulting system had good user acceptance. The results for smartphones/tablets were comparable with desktops/laptops. This work has potential to promote wider uptake of online tools that can assess dietary intake at scale.}, } @article {pmid28482810, year = {2017}, author = {Yang, S and Santillana, M and Brownstein, JS and Gray, J and Richardson, S and Kou, SC}, title = {Using electronic health records and Internet search information for accurate influenza forecasting.}, journal = {BMC infectious diseases}, volume = {17}, number = {1}, pages = {332}, pmid = {28482810}, issn = {1471-2334}, support = {R01 LM010812/LM/NLM NIH HHS/United States ; }, mesh = {*Centers for Disease Control and Prevention, U.S. ; *Electronic Health Records ; Forecasting ; Humans ; Influenza, Human/*epidemiology ; Internet ; Population Surveillance/methods ; Seasons ; United States ; }, abstract = {BACKGROUND: Accurate influenza activity forecasting helps public health officials prepare and allocate resources for unusual influenza activity. Traditional flu surveillance systems, such as the Centers for Disease Control and Prevention's (CDC) influenza-like illnesses reports, lag behind real-time by one to 2 weeks, whereas information contained in cloud-based electronic health records (EHR) and in Internet users' search activity is typically available in near real-time. We present a method that combines the information from these two data sources with historical flu activity to produce national flu forecasts for the United States up to 4 weeks ahead of the publication of CDC's flu reports.

METHODS: We extend a method originally designed to track flu using Google searches, named ARGO, to combine information from EHR and Internet searches with historical flu activities. Our regularized multivariate regression model dynamically selects the most appropriate variables for flu prediction every week. The model is assessed for the flu seasons within the time period 2013-2016 using multiple metrics including root mean squared error (RMSE).

RESULTS: Our method reduces the RMSE of the publicly available alternative (Healthmap flutrends) method by 33, 20, 17 and 21%, for the four time horizons: real-time, one, two, and 3 weeks ahead, respectively. Such accuracy improvements are statistically significant at the 5% level. Our real-time estimates correctly identified the peak timing and magnitude of the studied flu seasons.

CONCLUSIONS: Our method significantly reduces the prediction error when compared to historical publicly available Internet-based prediction systems, demonstrating that: (1) the method to combine data sources is as important as data quality; (2) effectively extracting information from a cloud-based EHR and Internet search activity leads to accurate forecast of flu.}, } @article {pmid28479694, year = {2017}, author = {Gupta, N and Jadhav, K and Shah, V}, title = {Emperipolesis, entosis and cell cannibalism: Demystifying the cloud.}, journal = {Journal of oral and maxillofacial pathology : JOMFP}, volume = {21}, number = {1}, pages = {92-98}, pmid = {28479694}, issn = {0973-029X}, abstract = {There are intense published data in literature related to cell engulfment phenomena such as emperipolesis, entosis and cell cannibalism. All these are closely related phenomena with a very fine line of differences. Its correct identification has a significant diagnostic and prognostic value. After extensive literature search, a gap of knowledge was found in concept designing and clarity about understanding of aforementioned terminologies. The authors have attempted to review data of these closely knit terminologies and further organize its characteristic appearances, pathogenetic aspects and prognostic implications. The data published in English Language, from 1925 to 2015, were collected using keywords such as emperipolesis, entosis and cell cannibalism through scientific database systems such as MEDLINE, Science Direct, Cochrane Library and Google Scholar. Articles were selected which have focused to explain the phenomenon, presentation and pathogenesis of one or more of this phenomenon. A total of 48 articles were retrieved, thirty of which were selected. The various cell engulfment phenomena are very similar looking but operate through entirely different pathways.}, } @article {pmid28478867, year = {2017}, author = {Calvo-Ortega, JF and Pozo, M and Moragues, S and Casals, J}, title = {Targeting accuracy of single-isocenter intensity-modulated radiosurgery for multiple lesions.}, journal = {Medical dosimetry : official journal of the American Association of Medical Dosimetrists}, volume = {42}, number = {2}, pages = {104-110}, doi = {10.1016/j.meddos.2017.01.006}, pmid = {28478867}, issn = {1873-4022}, mesh = {Film Dosimetry ; Humans ; Neoplasms, Multiple Primary/*diagnostic imaging/*radiotherapy ; Phantoms, Imaging ; Radiosurgery/*methods ; Radiotherapy Dosage ; Radiotherapy Planning, Computer-Assisted/*methods ; Radiotherapy, Image-Guided/*methods ; Radiotherapy, Intensity-Modulated/*methods ; Reproducibility of Results ; Scattering, Radiation ; Sensitivity and Specificity ; Tomography, X-Ray Computed/instrumentation/*methods ; Treatment Outcome ; }, abstract = {To investigate the targeting accuracy of intensity-modulated SRS (IMRS) plans designed to simultaneously treat multiple brain metastases with a single isocenter. A home-made acrylic phantom able to support a film (EBT3) in its coronal plane was used. The phantom was CT scanned and three coplanar small targets (a central and two peripheral) were outlined in the Eclipse system. Peripheral targets were 6 cm apart from the central one. A reference IMRS plan was designed to simultaneously treat the three targets, but only a single isocenter located at the center of the central target was used. After positioning the phantom on the linac using the room lasers, a CBCT scan was acquired and the reference plan were mapped on it, by placing the planned isocenter at the intersection of the landmarks used in the film showing the linac isocenter. The mapped plan was then recalculated and delivered. The film dose distribution was derived using a cloud computing application (www.radiochromic.com) that uses a triple-channel dosimetry algorithm. Comparison of dose distributions using the gamma index (5%/1 mm) were performed over a 5 × 5 cm[2] region centered over each target. 2D shifts required to get the best gamma passing rates on the peripheral target regions were compared with the reported ones for the central target. The experiment was repeated ten times in different sessions. Average 2D shifts required to achieve optimal gamma passing rates (99%, 97%, 99%) were 0.7 mm (SD: 0.3 mm), 0.8 mm (SD: 0.4 mm) and 0.8 mm (SD: 0.3 mm), for the central and the two peripheral targets, respectively. No statistical differences (p > 0.05) were found for targeting accuracy between the central and the two peripheral targets. The study revealed a targeting accuracy within 1 mm for off-isocenter targets within 6 cm of the linac isocenter, when a single-isocenter IMRS plan is designed.}, } @article {pmid28478372, year = {2017}, author = {Scardapane, S and Di Lorenzo, P}, title = {A framework for parallel and distributed training of neural networks.}, journal = {Neural networks : the official journal of the International Neural Network Society}, volume = {91}, number = {}, pages = {42-54}, doi = {10.1016/j.neunet.2017.04.004}, pmid = {28478372}, issn = {1879-2782}, mesh = {Humans ; *Neural Networks, Computer ; }, abstract = {The aim of this paper is to develop a general framework for training neural networks (NNs) in a distributed environment, where training data is partitioned over a set of agents that communicate with each other through a sparse, possibly time-varying, connectivity pattern. In such distributed scenario, the training problem can be formulated as the (regularized) optimization of a non-convex social cost function, given by the sum of local (non-convex) costs, where each agent contributes with a single error term defined with respect to its local dataset. To devise a flexible and efficient solution, we customize a recently proposed framework for non-convex optimization over networks, which hinges on a (primal) convexification-decomposition technique to handle non-convexity, and a dynamic consensus procedure to diffuse information among the agents. Several typical choices for the training criterion (e.g., squared loss, cross entropy, etc.) and regularization (e.g., ℓ2 norm, sparsity inducing penalties, etc.) are included in the framework and explored along the paper. Convergence to a stationary solution of the social non-convex problem is guaranteed under mild assumptions. Additionally, we show a principled way allowing each agent to exploit a possible multi-core architecture (e.g., a local cloud) in order to parallelize its local optimization step, resulting in strategies that are both distributed (across the agents) and parallel (inside each agent) in nature. A comprehensive set of experimental results validate the proposed approach.}, } @article {pmid28475668, year = {2017}, author = {Expósito, RR and Veiga, J and González-Domínguez, J and Touriño, J}, title = {MarDRe: efficient MapReduce-based removal of duplicate DNA reads in the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {17}, pages = {2762-2764}, doi = {10.1093/bioinformatics/btx307}, pmid = {28475668}, issn = {1367-4811}, mesh = {Algorithms ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {SUMMARY: This article presents MarDRe, a de novo cloud-ready duplicate and near-duplicate removal tool that can process single- and paired-end reads from FASTQ/FASTA datasets. MarDRe takes advantage of the widely adopted MapReduce programming model to fully exploit Big Data technologies on cloud-based infrastructures. Written in Java to maximize cross-platform compatibility, MarDRe is built upon the open-source Apache Hadoop project, the most popular distributed computing framework for scalable Big Data processing. On a 16-node cluster deployed on the Amazon EC2 cloud platform, MarDRe is up to 8.52 times faster than a representative state-of-the-art tool.

Source code in Java and Hadoop as well as a user's guide are freely available under the GNU GPLv3 license at http://mardre.des.udc.es .

CONTACT: rreye@udc.es.}, } @article {pmid28475110, year = {2017}, author = {Li, S and Cui, J and Zhong, H and Liu, L}, title = {Public Auditing with Privacy Protection in a Multi-User Model of Cloud-Assisted Body Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {5}, pages = {}, pmid = {28475110}, issn = {1424-8220}, abstract = {Wireless Body Sensor Networks (WBSNs) are gaining importance in the era of the Internet of Things (IoT). The modern medical system is a particular area where the WBSN techniques are being increasingly adopted for various fundamental operations. Despite such increasing deployments of WBSNs, issues such as the infancy in the size, capabilities and limited data processing capacities of the sensor devices restrain their adoption in resource-demanding applications. Though providing computing and storage supplements from cloud servers can potentially enrich the capabilities of the WBSNs devices, data security is one of the prevailing issues that affects the reliability of cloud-assisted services. Sensitive applications such as modern medical systems demand assurance of the privacy of the users' medical records stored in distant cloud servers. Since it is economically impossible to set up private cloud servers for every client, auditing data security managed in the remote servers has necessarily become an integral requirement of WBSNs' applications relying on public cloud servers. To this end, this paper proposes a novel certificateless public auditing scheme with integrated privacy protection. The multi-user model in our scheme supports groups of users to store and share data, thus exhibiting the potential for WBSNs' deployments within community environments. Furthermore, our scheme enriches user experiences by offering public verifiability, forward security mechanisms and revocation of illegal group members. Experimental evaluations demonstrate the security effectiveness of our proposed scheme under the Random Oracle Model (ROM) by outperforming existing cloud-assisted WBSN models.}, } @article {pmid28468745, year = {2017}, author = {Mostaghimi, A and Olszewski, AE and Bell, SK and Roberts, DH and Crotty, BH}, title = {Erosion of Digital Professionalism During Medical Students' Core Clinical Clerkships.}, journal = {JMIR medical education}, volume = {3}, number = {1}, pages = {e9}, pmid = {28468745}, issn = {2369-3762}, abstract = {BACKGROUND: The increased use of social media, cloud computing, and mobile devices has led to the emergence of guidelines and novel teaching efforts to guide students toward the appropriate use of technology. Despite this, violations of professional conduct are common.

OBJECTIVE: We sought to explore professional behaviors specific to appropriate use of technology by looking at changes in third-year medical students' attitudes and behaviors at the beginning and conclusion of their clinical clerkships.

METHODS: After formal teaching about digital professionalism, we administered a survey to medical students that described 35 technology-related behaviors and queried students about professionalism of the behavior (on a 5-point Likert scale), observation of others engaging in the behavior (yes or no), as well as personal participation in the behavior (yes or no). Students were resurveyed at the end of the academic year.

RESULTS: Over the year, perceptions of what is considered acceptable behavior regarding privacy, data security, communications, and social media boundaries changed, despite formal teaching sessions to reinforce professional behavior. Furthermore, medical students who observed unprofessional behaviors were more likely to participate in such behaviors.

CONCLUSIONS: Although technology is a useful tool to enhance teaching and learning, our results reflect an erosion of professionalism related to information security that occurred despite medical school and hospital-based teaching sessions to promote digital professionalism. True alteration of trainee behavior will require a cultural shift that includes continual education, better role models, and frequent reminders for faculty, house staff, students, and staff.}, } @article {pmid28467505, year = {2017}, author = {Madni, SHH and Abd Latiff, MS and Abdullahi, M and Abdulhamid, SM and Usman, MJ}, title = {Performance comparison of heuristic algorithms for task scheduling in IaaS cloud computing environment.}, journal = {PloS one}, volume = {12}, number = {5}, pages = {e0176321}, pmid = {28467505}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; *Heuristics ; *Task Performance and Analysis ; }, abstract = {Cloud computing infrastructure is suitable for meeting computational needs of large task sizes. Optimal scheduling of tasks in cloud computing environment has been proved to be an NP-complete problem, hence the need for the application of heuristic methods. Several heuristic algorithms have been developed and used in addressing this problem, but choosing the appropriate algorithm for solving task assignment problem of a particular nature is difficult since the methods are developed under different assumptions. Therefore, six rule based heuristic algorithms are implemented and used to schedule autonomous tasks in homogeneous and heterogeneous environments with the aim of comparing their performance in terms of cost, degree of imbalance, makespan and throughput. First Come First Serve (FCFS), Minimum Completion Time (MCT), Minimum Execution Time (MET), Max-min, Min-min and Sufferage are the heuristic algorithms considered for the performance comparison and analysis of task scheduling in cloud computing.}, } @article {pmid28449639, year = {2017}, author = {Agrawal, S and Arze, C and Adkins, RS and Crabtree, J and Riley, D and Vangala, M and Galens, K and Fraser, CM and Tettelin, H and White, O and Angiuoli, SV and Mahurkar, A and Fricke, WF}, title = {CloVR-Comparative: automated, cloud-enabled comparative microbial genome sequence analysis pipeline.}, journal = {BMC genomics}, volume = {18}, number = {1}, pages = {332}, pmid = {28449639}, issn = {1471-2164}, support = {HHSN272200900009C/AI/NIAID NIH HHS/United States ; }, mesh = {Automation ; *Cloud Computing ; Genome, Microbial/genetics ; Genomics/*methods ; Sequence Alignment ; Sequence Analysis ; *Software ; }, abstract = {BACKGROUND: The benefit of increasing genomic sequence data to the scientific community depends on easy-to-use, scalable bioinformatics support. CloVR-Comparative combines commonly used bioinformatics tools into an intuitive, automated, and cloud-enabled analysis pipeline for comparative microbial genomics.

RESULTS: CloVR-Comparative runs on annotated complete or draft genome sequences that are uploaded by the user or selected via a taxonomic tree-based user interface and downloaded from NCBI. CloVR-Comparative runs reference-free multiple whole-genome alignments to determine unique, shared and core coding sequences (CDSs) and single nucleotide polymorphisms (SNPs). Output includes short summary reports and detailed text-based results files, graphical visualizations (phylogenetic trees, circular figures), and a database file linked to the Sybil comparative genome browser. Data up- and download, pipeline configuration and monitoring, and access to Sybil are managed through CloVR-Comparative web interface. CloVR-Comparative and Sybil are distributed as part of the CloVR virtual appliance, which runs on local computers or the Amazon EC2 cloud. Representative datasets (e.g. 40 draft and complete Escherichia coli genomes) are processed in <36 h on a local desktop or at a cost of <$20 on EC2.

CONCLUSIONS: CloVR-Comparative allows anybody with Internet access to run comparative genomics projects, while eliminating the need for on-site computational resources and expertise.}, } @article {pmid28448434, year = {2017}, author = {Jia, G and Han, G and Wang, H and Yang, X}, title = {Static Memory Deduplication for Performance Optimization in Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {5}, pages = {}, pmid = {28448434}, issn = {1424-8220}, abstract = {In a cloud computing environment, the number of virtual machines (VMs) on a single physical server and the number of applications running on each VM are continuously growing. This has led to an enormous increase in the demand of memory capacity and subsequent increase in the energy consumption in the cloud. Lack of enough memory has become a major bottleneck for scalability and performance of virtualization interfaces in cloud computing. To address this problem, memory deduplication techniques which reduce memory demand through page sharing are being adopted. However, such techniques suffer from overheads in terms of number of online comparisons required for the memory deduplication. In this paper, we propose a static memory deduplication (SMD) technique which can reduce memory capacity requirement and provide performance optimization in cloud computing. The main innovation of SMD is that the process of page detection is performed offline, thus potentially reducing the performance cost, especially in terms of response time. In SMD, page comparisons are restricted to the code segment, which has the highest shared content. Our experimental results show that SMD efficiently reduces memory capacity requirement and improves performance. We demonstrate that, compared to other approaches, the cost in terms of the response time is negligible.}, } @article {pmid28434408, year = {2017}, author = {Sareen, S and Sood, SK and Gupta, SK}, title = {SECURE INTERNET OF THINGS-BASED CLOUD FRAMEWORK TO CONTROL ZIKA VIRUS OUTBREAK.}, journal = {International journal of technology assessment in health care}, volume = {33}, number = {1}, pages = {11-18}, doi = {10.1017/S0266462317000113}, pmid = {28434408}, issn = {1471-6348}, mesh = {*Bayes Theorem ; *Disease Outbreaks ; Humans ; *Internet ; Zika Virus ; Zika Virus Infection/epidemiology/*prevention & control ; }, abstract = {OBJECTIVES: Zika virus (ZikaV) is currently one of the most important emerging viruses in the world which has caused outbreaks and epidemics and has also been associated with severe clinical manifestations and congenital malformations. Traditional approaches to combat the ZikaV outbreak are not effective for detection and control. The aim of this study is to propose a cloud-based system to prevent and control the spread of Zika virus disease using integration of mobile phones and Internet of Things (IoT).

METHODS: A Naive Bayesian Network (NBN) is used to diagnose the possibly infected users, and Google Maps Web service is used to provide the geographic positioning system (GPS)-based risk assessment to prevent the outbreak. It is used to represent each ZikaV infected user, mosquito-dense sites, and breeding sites on the Google map that helps the government healthcare authorities to control such risk-prone areas effectively and efficiently.

RESULTS: The performance and accuracy of the proposed system are evaluated using dataset for 2 million users. Our system provides high accuracy for initial diagnosis of different users according to their symptoms and appropriate GPS-based risk assessment.

CONCLUSIONS: The cloud-based proposed system contributed to the accurate NBN-based classification of infected users and accurate identification of risk-prone areas using Google Maps.}, } @article {pmid28434256, year = {2017}, author = {Scheuermann, RH and Sinkovits, RS and Schenkelberg, T and Koff, WC}, title = {A bioinformatics roadmap for the human vaccines project.}, journal = {Expert review of vaccines}, volume = {16}, number = {6}, pages = {535-544}, doi = {10.1080/14760584.2017.1322752}, pmid = {28434256}, issn = {1744-8395}, mesh = {Computational Biology/*methods ; Drug Discovery/*methods ; Humans ; Vaccines/*immunology ; }, abstract = {Biomedical research has become a data intensive science in which high throughput experimentation is producing comprehensive data about biological systems at an ever-increasing pace. The Human Vaccines Project is a new public-private partnership, with the goal of accelerating development of improved vaccines and immunotherapies for global infectious diseases and cancers by decoding the human immune system. To achieve its mission, the Project is developing a Bioinformatics Hub as an open-source, multidisciplinary effort with the overarching goal of providing an enabling infrastructure to support the data processing, analysis and knowledge extraction procedures required to translate high throughput, high complexity human immunology research data into biomedical knowledge, to determine the core principles driving specific and durable protective immune responses.}, } @article {pmid28423800, year = {2017}, author = {Ramachandran, N and Mohamedally, D and Taylor, P}, title = {Project PEACH at UCLH: Student Projects in Healthcare Computing.}, journal = {Studies in health technology and informatics}, volume = {235}, number = {}, pages = {288-292}, pmid = {28423800}, issn = {1879-8365}, mesh = {Cloud Computing ; *Computing Methodologies ; Humans ; London ; Medical Informatics/*education ; Students ; }, abstract = {A collaboration between clinicians at UCLH and the Dept of Computer Science at UCL is giving students of computer science the opportunity to undertake real healthcare computing projects as part of their education. This is enabling the creation of a significant research computing platform within the Trust, based on open source components and hosted in the cloud, while providing a large group of students with experience of the specific challenges of health IT.}, } @article {pmid28419324, year = {2018}, author = {Karim, MR and Michel, A and Zappa, A and Baranov, P and Sahay, R and Rebholz-Schuhmann, D}, title = {Improving data workflow systems with cloud services and use of open data for bioinformatics research.}, journal = {Briefings in bioinformatics}, volume = {19}, number = {5}, pages = {1035-1050}, pmid = {28419324}, issn = {1477-4054}, mesh = {Big Data ; *Cloud Computing ; Computational Biology/*methods ; Data Interpretation, Statistical ; Database Management Systems ; Drug Discovery/statistics & numerical data ; Genomics/statistics & numerical data ; Humans ; Information Dissemination ; Knowledge Bases ; Semantic Web/statistics & numerical data ; User-Computer Interface ; *Workflow ; }, abstract = {Data workflow systems (DWFSs) enable bioinformatics researchers to combine components for data access and data analytics, and to share the final data analytics approach with their collaborators. Increasingly, such systems have to cope with large-scale data, such as full genomes (about 200 GB each), public fact repositories (about 100 TB of data) and 3D imaging data at even larger scales. As moving the data becomes cumbersome, the DWFS needs to embed its processes into a cloud infrastructure, where the data are already hosted. As the standardized public data play an increasingly important role, the DWFS needs to comply with Semantic Web technologies. This advancement to DWFS would reduce overhead costs and accelerate the progress in bioinformatics research based on large-scale data and public resources, as researchers would require less specialized IT knowledge for the implementation. Furthermore, the high data growth rates in bioinformatics research drive the demand for parallel and distributed computing, which then imposes a need for scalability and high-throughput capabilities onto the DWFS. As a result, requirements for data sharing and access to public knowledge bases suggest that compliance of the DWFS with Semantic Web standards is necessary. In this article, we will analyze the existing DWFS with regard to their capabilities toward public open data use as well as large-scale computational and human interface requirements. We untangle the parameters for selecting a preferable solution for bioinformatics research with particular consideration to using cloud services and Semantic Web technologies. Our analysis leads to research guidelines and recommendations toward the development of future DWFS for the bioinformatics research community.}, } @article {pmid28414531, year = {2017}, author = {Sağiroğlu, MŞ and Külekcİ, MO}, title = {A System Architecture for Efficient Transmission of Massive DNA Sequencing Data.}, journal = {Journal of computational biology : a journal of computational molecular cell biology}, volume = {24}, number = {11}, pages = {1081-1088}, doi = {10.1089/cmb.2017.0016}, pmid = {28414531}, issn = {1557-8666}, mesh = {Computational Biology/*methods ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Sequence Analysis, DNA/*methods ; *Software ; *Systems Biology ; }, abstract = {The DNA sequencing data analysis pipelines require significant computational resources. In that sense, cloud computing infrastructures appear as a natural choice for this processing. However, the first practical difficulty in reaching the cloud computing services is the transmission of the massive DNA sequencing data from where they are produced to where they will be processed. The daily practice here begins with compressing the data in FASTQ file format, and then sending these data via fast data transmission protocols. In this study, we address the weaknesses in that daily practice and present a new system architecture that incorporates the computational resources available on the client side while dynamically adapting itself to the available bandwidth. Our proposal considers the real-life scenarios, where the bandwidth of the connection between the parties may fluctuate, and also the computing power on the client side may be of any size ranging from moderate personal computers to powerful workstations. The proposed architecture aims at utilizing both the communication bandwidth and the computing resources for satisfying the ultimate goal of reaching the results as early as possible. We present a prototype implementation of the proposed architecture, and analyze several real-life cases, which provide useful insights for the sequencing centers, especially on deciding when to use a cloud service and in what conditions.}, } @article {pmid28413096, year = {2017}, author = {Yuan, S and Chan, HCS and Hu, Z}, title = {Implementing WebGL and HTML5 in Macromolecular Visualization and Modern Computer-Aided Drug Design.}, journal = {Trends in biotechnology}, volume = {35}, number = {6}, pages = {559-571}, doi = {10.1016/j.tibtech.2017.03.009}, pmid = {28413096}, issn = {1879-3096}, mesh = {Animals ; *Drug Design ; Humans ; *User-Computer Interface ; *Web Browser ; }, abstract = {Web browsers have long been recognized as potential platforms for remote macromolecule visualization. However, the difficulty in transferring large-scale data to clients and the lack of native support for hardware-accelerated applications in the local browser undermine the feasibility of such utilities. With the introduction of WebGL and HTML5 technologies in recent years, it is now possible to exploit the power of a graphics-processing unit (GPU) from a browser without any third-party plugin. Many new tools have been developed for biological molecule visualization and modern drug discovery. In contrast to traditional offline tools, real-time computing, interactive data analysis, and cross-platform analyses feature WebGL- and HTML5-based tools, facilitating biological research in a more efficient and user-friendly way.}, } @article {pmid28394305, year = {2017}, author = {Mar, J and Chang, TY and Wang, YJ}, title = {A Quadrilateral Geometry Classification Method and Device for Femtocell Positioning Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {4}, pages = {}, pmid = {28394305}, issn = {1424-8220}, abstract = {This article proposes a normalization multi-layer perception (NMLP) geometry classifier to autonomously determine the optimal four femtocell evolved Node Bs (FeNBs), which can use time difference of arrival (TDOA) to measure the location of the macrocell user equipment (MUE) with the lowest GDOP value. The iterative geometry training (IGT) algorithm is designed to obtain the training data for the NMLP geometry classifier. The architecture of the proposed NMLP geometry classifier is realized in the server of the cloud computing platform, to identify the optimal geometry disposition of four FeNBs for positioning the MUE located between two buildings. Six by six neurons are chosen for two hidden layers, in order to shorten the convergent time. The feasibility of the proposed method is demonstrated by means of numerical simulations. In addition, the simulation results also show that the proposed method is particularly suitable for the application of the MUE positioning with a huge number of FeNBs. Finally, three quadrilateral optimum geometry disposition decision criteria are analyzed for the validation of the simulation results.}, } @article {pmid28394287, year = {2017}, author = {Zhou, L and Chen, N and Chen, Z}, title = {Efficient Streaming Mass Spatio-Temporal Vehicle Data Access in Urban Sensor Networks Based on Apache Storm.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {4}, pages = {}, pmid = {28394287}, issn = {1424-8220}, abstract = {The efficient data access of streaming vehicle data is the foundation of analyzing, using and mining vehicle data in smart cities, which is an approach to understand traffic environments. However, the number of vehicles in urban cities has grown rapidly, reaching hundreds of thousands in number. Accessing the mass streaming data of vehicles is hard and takes a long time due to limited computation capability and backward modes. We propose an efficient streaming spatio-temporal data access based on Apache Storm (ESDAS) to achieve real-time streaming data access and data cleaning. As a popular streaming data processing tool, Apache Storm can be applied to streaming mass data access and real time data cleaning. By designing the Spout/bolt workflow of topology in ESDAS and by developing the speeding bolt and other bolts, Apache Storm can achieve the prospective aim. In our experiments, Taiyuan BeiDou bus location data is selected as the mass spatio-temporal data source. In the experiments, the data access results with different bolts are shown in map form, and the filtered buses' aggregation forms are different. In terms of performance evaluation, the consumption time in ESDAS for ten thousand records per second for a speeding bolt is approximately 300 milliseconds, and that for MongoDB is approximately 1300 milliseconds. The efficiency of ESDAS is approximately three times higher than that of MongoDB.}, } @article {pmid28386197, year = {2017}, author = {Sun, Y and Zhang, N}, title = {A resource-sharing model based on a repeated game in fog computing.}, journal = {Saudi journal of biological sciences}, volume = {24}, number = {3}, pages = {687-694}, doi = {10.1016/j.sjbs.2017.01.043}, pmid = {28386197}, issn = {1319-562X}, abstract = {With the rapid development of cloud computing techniques, the number of users is undergoing exponential growth. It is difficult for traditional data centers to perform many tasks in real time because of the limited bandwidth of resources. The concept of fog computing is proposed to support traditional cloud computing and to provide cloud services. In fog computing, the resource pool is composed of sporadic distributed resources that are more flexible and movable than a traditional data center. In this paper, we propose a fog computing structure and present a crowd-funding algorithm to integrate spare resources in the network. Furthermore, to encourage more resource owners to share their resources with the resource pool and to supervise the resource supporters as they actively perform their tasks, we propose an incentive mechanism in our algorithm. Simulation results show that our proposed incentive mechanism can effectively reduce the SLA violation rate and accelerate the completion of tasks.}, } @article {pmid28382224, year = {2017}, author = {Christensen, PA and Lee, NE and Thrall, MJ and Powell, SZ and Chevez-Barrios, P and Long, SW}, title = {RecutClub.com: An Open Source, Whole Slide Image-based Pathology Education System.}, journal = {Journal of pathology informatics}, volume = {8}, number = {}, pages = {10}, pmid = {28382224}, issn = {2229-5089}, abstract = {BACKGROUND: Our institution's pathology unknown conferences provide educational cases for our residents. However, the cases have not been previously available digitally, have not been collated for postconference review, and were not accessible to a wider audience. Our objective was to create an inexpensive whole slide image (WSI) education suite to address these limitations and improve the education of pathology trainees.

MATERIALS AND METHODS: We surveyed residents regarding their preference between four unique WSI systems. We then scanned weekly unknown conference cases and study set cases and uploaded them to our custom built WSI viewer located at RecutClub.com. We measured site utilization and conference participation.

RESULTS: Residents preferred our OpenLayers WSI implementation to Ventana Virtuoso, Google Maps API, and OpenSlide. Over 16 months, we uploaded 1366 cases from 77 conferences and ten study sets, occupying 793.5 GB of cloud storage. Based on resident evaluations, the interface was easy to use and demonstrated minimal latency. Residents are able to review cases from home and from their mobile devices. Worldwide, 955 unique IP addresses from 52 countries have viewed cases in our site.

CONCLUSIONS: We implemented a low-cost, publicly available repository of WSI slides for resident education. Our trainees are very satisfied with the freedom to preview either the glass slides or WSI and review the WSI postconference. Both local users and worldwide users actively and repeatedly view cases in our study set.}, } @article {pmid28347445, year = {2017}, author = {Iribarren, SJ and Brown, W and Giguere, R and Stone, P and Schnall, R and Staggers, N and Carballo-Diéguez, A}, title = {Scoping review and evaluation of SMS/text messaging platforms for mHealth projects or clinical interventions.}, journal = {International journal of medical informatics}, volume = {101}, number = {}, pages = {28-40}, pmid = {28347445}, issn = {1872-8243}, support = {P30 MH062246/MH/NIMH NIH HHS/United States ; T15 LM007079/LM/NLM NIH HHS/United States ; R01 LM012355/LM/NLM NIH HHS/United States ; P30 MH043520/MH/NIMH NIH HHS/United States ; T32 NR014205/NR/NINR NIH HHS/United States ; R03 MH103957/MH/NIMH NIH HHS/United States ; }, mesh = {Cell Phone/*statistics & numerical data ; *Early Medical Intervention ; Evaluation Studies as Topic ; Humans ; *Telemedicine ; Text Messaging/*statistics & numerical data ; }, abstract = {OBJECTIVES: Mobile technology supporting text messaging interventions (TMIs) continues to evolve, presenting challenges for researchers and healthcare professionals who need to choose software solutions to best meet their program needs. The objective of this review was to systematically identify and compare text messaging platforms and to summarize their advantages and disadvantages as described in peer-reviewed literature.

METHODS: A scoping review was conducted using four steps: 1) identify currently available platforms through online searches and in mHealth repositories; 2) expand evaluation criteria of an mHealth mobile messaging toolkit and integrate prior user experiences as researchers; 3) evaluate each platform's functions and features based on the expanded criteria and a vendor survey; and 4) assess the documentation of platform use in the peer-review literature. Platforms meeting inclusion criteria were assessed independently by three reviewers and discussed until consensus was reached. The PRISMA guidelines were followed to report findings.

RESULTS: Of the 1041 potentially relevant search results, 27 platforms met inclusion criteria. Most were excluded because they were not platforms (e.g., guides, toolkits, reports, or SMS gateways). Of the 27 platforms, only 12 were identified in existing mHealth repositories, 10 from Google searches, while five were found in both. The expanded evaluation criteria included 22 items. Results indicate no uniform presentation of platform features and functions, often making these difficult to discern. Fourteen of the platforms were reported as open source, 10 focused on health care and 16 were tailored to meet needs of low resource settings (not mutually exclusive). Fifteen platforms had do-it-yourself setup (programming not required) while the remainder required coding/programming skills or setups could be built to specification by the vendor. Frequently described features included data security and access to the platform via cloud-based systems. Pay structures and reported targeted end-users varied. Peer-reviewed publications listed only 6 of the 27 platforms across 21 publications. The majority of these articles reported the name of the platform used but did not describe advantages or disadvantages.

CONCLUSIONS: Searching for and comparing mHealth platforms for TMIs remains a challenge. The results of this review can serve as a resource for researchers and healthcare professionals wanting to integrate TMIs into health interventions. Steps to identify, compare and assess advantages and disadvantages are outlined for consideration. Expanded evaluation criteria can be used by future researchers. Continued and more comprehensive platform tools should be integrated into mHealth repositories. Detailed descriptions of platform advantages and disadvantages are needed when mHealth researchers publish findings to expand the body of research on TMI tools for healthcare. Standardized descriptions and features are recommended for vendor sites.}, } @article {pmid28338620, year = {2017}, author = {Wang, L and Liu, G and Sun, L}, title = {A Secure and Privacy-Preserving Navigation Scheme Using Spatial Crowdsourcing in Fog-Based VANETs.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {4}, pages = {}, pmid = {28338620}, issn = {1424-8220}, abstract = {Fog-based VANETs (Vehicular ad hoc networks) is a new paradigm of vehicular ad hoc networks with the advantages of both vehicular cloud and fog computing. Real-time navigation schemes based on fog-based VANETs can promote the scheme performance efficiently. In this paper, we propose a secure and privacy-preserving navigation scheme by using vehicular spatial crowdsourcing based on fog-based VANETs. Fog nodes are used to generate and release the crowdsourcing tasks, and cooperatively find the optimal route according to the real-time traffic information collected by vehicles in their coverage areas. Meanwhile, the vehicle performing the crowdsourcing task can get a reasonable reward. The querying vehicle can retrieve the navigation results from each fog node successively when entering its coverage area, and follow the optimal route to the next fog node until it reaches the desired destination. Our scheme fulfills the security and privacy requirements of authentication, confidentiality and conditional privacy preservation. Some cryptographic primitives, including the Elgamal encryption algorithm, AES, randomized anonymous credentials and group signatures, are adopted to achieve this goal. Finally, we analyze the security and the efficiency of the proposed scheme.}, } @article {pmid28335569, year = {2017}, author = {Jing, X and Hu, H and Yang, H and Au, MH and Li, S and Xiong, N and Imran, M and Vasilakos, AV}, title = {A Quantitative Risk Assessment Model Involving Frequency and Threat Degree under Line-of-Business Services for Infrastructure of Emerging Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {3}, pages = {}, pmid = {28335569}, issn = {1424-8220}, abstract = {The prospect of Line-of-Business Services (LoBSs) for infrastructure of Emerging Sensor Networks (ESNs) is exciting. Access control remains a top challenge in this scenario as the service provider's server contains a lot of valuable resources. LoBSs' users are very diverse as they may come from a wide range of locations with vastly different characteristics. Cost of joining could be low and in many cases, intruders are eligible users conducting malicious actions. As a result, user access should be adjusted dynamically. Assessing LoBSs' risk dynamically based on both frequency and threat degree of malicious operations is therefore necessary. In this paper, we proposed a Quantitative Risk Assessment Model (QRAM) involving frequency and threat degree based on value at risk. To quantify the threat degree as an elementary intrusion effort, we amend the influence coefficient of risk indexes in the network security situation assessment model. To quantify threat frequency as intrusion trace effort, we make use of multiple behavior information fusion. Under the influence of intrusion trace, we adapt the historical simulation method of value at risk to dynamically access LoBSs' risk. Simulation based on existing data is used to select appropriate parameters for QRAM. Our simulation results show that the duration influence on elementary intrusion effort is reasonable when the normalized parameter is 1000. Likewise, the time window of intrusion trace and the weight between objective risk and subjective risk can be set to 10 s and 0.5, respectively. While our focus is to develop QRAM for assessing the risk of LoBSs for infrastructure of ESNs dynamically involving frequency and threat degree, we believe it is also appropriate for other scenarios in cloud computing.}, } @article {pmid28328925, year = {2017}, author = {Yu, Y and Liang, M and Wang, Z}, title = {A source-controlled data center network model.}, journal = {PloS one}, volume = {12}, number = {3}, pages = {e0173442}, doi = {10.1371/journal.pone.0173442}, pmid = {28328925}, issn = {1932-6203}, mesh = {Algorithms ; Computer Communication Networks/*instrumentation ; Information Storage and Retrieval/methods ; Models, Theoretical ; Signal Processing, Computer-Assisted/instrumentation ; Software ; }, abstract = {The construction of data center network by applying SDN technology has become a hot research topic. The SDN architecture has innovatively separated the control plane from the data plane which makes the network more software-oriented and agile. Moreover, it provides virtual multi-tenancy, effective scheduling resources and centralized control strategies to meet the demand for cloud computing data center. However, the explosion of network information is facing severe challenges for SDN controller. The flow storage and lookup mechanisms based on TCAM device have led to the restriction of scalability, high cost and energy consumption. In view of this, a source-controlled data center network (SCDCN) model is proposed herein. The SCDCN model applies a new type of source routing address named the vector address (VA) as the packet-switching label. The VA completely defines the communication path and the data forwarding process can be finished solely relying on VA. There are four advantages in the SCDCN architecture. 1) The model adopts hierarchical multi-controllers and abstracts large-scale data center network into some small network domains that has solved the restriction for the processing ability of single controller and reduced the computational complexity. 2) Vector switches (VS) developed in the core network no longer apply TCAM for table storage and lookup that has significantly cut down the cost and complexity for switches. Meanwhile, the problem of scalability can be solved effectively. 3) The SCDCN model simplifies the establishment process for new flows and there is no need to download flow tables to VS. The amount of control signaling consumed when establishing new flows can be significantly decreased. 4) We design the VS on the NetFPGA platform. The statistical results show that the hardware resource consumption in a VS is about 27% of that in an OFS.}, } @article {pmid28327936, year = {2017}, author = {Mittal, V and Hung, LH and Keswani, J and Kristiyanto, D and Lee, SB and Yeung, KY}, title = {GUIdock-VNC: using a graphical desktop sharing system to provide a browser-based interface for containerized software.}, journal = {GigaScience}, volume = {6}, number = {4}, pages = {1-6}, pmid = {28327936}, issn = {2047-217X}, support = {U54 HL127624/HL/NHLBI NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; Gene Regulatory Networks ; *Software ; Systems Biology/methods ; *User-Computer Interface ; *Web Browser ; }, abstract = {BACKGROUND: Software container technology such as Docker can be used to package and distribute bioinformatics workflows consisting of multiple software implementations and dependencies. However, Docker is a command line-based tool, and many bioinformatics pipelines consist of components that require a graphical user interface.

RESULTS: We present a container tool called GUIdock-VNC that uses a graphical desktop sharing system to provide a browser-based interface for containerized software. GUIdock-VNC uses the Virtual Network Computing protocol to render the graphics within most commonly used browsers. We also present a minimal image builder that can add our proposed graphical desktop sharing system to any Docker packages, with the end result that any Docker packages can be run using a graphical desktop within a browser. In addition, GUIdock-VNC uses the Oauth2 authentication protocols when deployed on the cloud.

CONCLUSIONS: As a proof-of-concept, we demonstrated the utility of GUIdock-noVNC in gene network inference. We benchmarked our container implementation on various operating systems and showed that our solution creates minimal overhead.}, } @article {pmid28327935, year = {2017}, author = {Kiar, G and Gorgolewski, KJ and Kleissas, D and Roncal, WG and Litt, B and Wandell, B and Poldrack, RA and Wiener, M and Vogelstein, RJ and Burns, R and Vogelstein, JT}, title = {Science in the cloud (SIC): A use case in MRI connectomics.}, journal = {GigaScience}, volume = {6}, number = {5}, pages = {1-10}, pmid = {28327935}, issn = {2047-217X}, mesh = {*Cloud Computing ; Connectome ; Humans ; Image Processing, Computer-Assisted ; Internet ; Magnetic Resonance Imaging ; *Science ; Software ; }, abstract = {Modern technologies are enabling scientists to collect extraordinary amounts of complex and sophisticated data across a huge range of scales like never before. With this onslaught of data, we can allow the focal point to shift from data collection to data analysis. Unfortunately, lack of standardized sharing mechanisms and practices often make reproducing or extending scientific results very difficult. With the creation of data organization structures and tools that drastically improve code portability, we now have the opportunity to design such a framework for communicating extensible scientific discoveries. Our proposed solution leverages these existing technologies and standards, and provides an accessible and extensible model for reproducible research, called 'science in the cloud' (SIC). Exploiting scientific containers, cloud computing, and cloud data services, we show the capability to compute in the cloud and run a web service that enables intimate interaction with the tools and data presented. We hope this model will inspire the community to produce reproducible and, importantly, extensible results that will enable us to collectively accelerate the rate at which scientific breakthroughs are discovered, replicated, and extended.}, } @article {pmid28316653, year = {2017}, author = {Capuccini, M and Ahmed, L and Schaal, W and Laure, E and Spjuth, O}, title = {Large-scale virtual screening on public cloud resources with Apache Spark.}, journal = {Journal of cheminformatics}, volume = {9}, number = {}, pages = {15}, pmid = {28316653}, issn = {1758-2946}, abstract = {BACKGROUND: Structure-based virtual screening is an in-silico method to screen a target receptor against a virtual molecular library. Applying docking-based screening to large molecular libraries can be computationally expensive, however it constitutes a trivially parallelizable task. Most of the available parallel implementations are based on message passing interface, relying on low failure rate hardware and fast network connection. Google's MapReduce revolutionized large-scale analysis, enabling the processing of massive datasets on commodity hardware and cloud resources, providing transparent scalability and fault tolerance at the software level. Open source implementations of MapReduce include Apache Hadoop and the more recent Apache Spark.

RESULTS: We developed a method to run existing docking-based screening software on distributed cloud resources, utilizing the MapReduce approach. We benchmarked our method, which is implemented in Apache Spark, docking a publicly available target receptor against [Formula: see text]2.2 M compounds. The performance experiments show a good parallel efficiency (87%) when running in a public cloud environment.

CONCLUSION: Our method enables parallel Structure-based virtual screening on public cloud resources or commodity computer clusters. The degree of scalability that we achieve allows for trying out our method on relatively small libraries first and then to scale to larger libraries. Our implementation is named Spark-VS and it is freely available as open source from GitHub (https://github.com/mcapuccini/spark-vs).Graphical abstract.}, } @article {pmid28302555, year = {2017}, author = {Mulder, NJ and Adebiyi, E and Adebiyi, M and Adeyemi, S and Ahmed, A and Ahmed, R and Akanle, B and Alibi, M and Armstrong, DL and Aron, S and Ashano, E and Baichoo, S and Benkahla, A and Brown, DK and Chimusa, ER and Fadlelmola, FM and Falola, D and Fatumo, S and Ghedira, K and Ghouila, A and Hazelhurst, S and Isewon, I and Jung, S and Kassim, SK and Kayondo, JK and Mbiyavanga, M and Meintjes, A and Mohammed, S and Mosaku, A and Moussa, A and Muhammd, M and Mungloo-Dilmohamud, Z and Nashiru, O and Odia, T and Okafor, A and Oladipo, O and Osamor, V and Oyelade, J and Sadki, K and Salifu, SP and Soyemi, J and Panji, S and Radouani, F and Souiai, O and Tastan Bishop, Ö and , }, title = {Development of Bioinformatics Infrastructure for Genomics Research.}, journal = {Global heart}, volume = {12}, number = {2}, pages = {91-98}, pmid = {28302555}, issn = {2211-8179}, support = {U41 HG006941/HG/NHGRI NIH HHS/United States ; }, mesh = {Africa ; Biomedical Research/*methods ; Computational Biology/*trends ; Genomics/*methods ; Humans ; }, abstract = {BACKGROUND: Although pockets of bioinformatics excellence have developed in Africa, generally, large-scale genomic data analysis has been limited by the availability of expertise and infrastructure. H3ABioNet, a pan-African bioinformatics network, was established to build capacity specifically to enable H3Africa (Human Heredity and Health in Africa) researchers to analyze their data in Africa. Since the inception of the H3Africa initiative, H3ABioNet's role has evolved in response to changing needs from the consortium and the African bioinformatics community.

OBJECTIVES: H3ABioNet set out to develop core bioinformatics infrastructure and capacity for genomics research in various aspects of data collection, transfer, storage, and analysis.

METHODS AND RESULTS: Various resources have been developed to address genomic data management and analysis needs of H3Africa researchers and other scientific communities on the continent. NetMap was developed and used to build an accurate picture of network performance within Africa and between Africa and the rest of the world, and Globus Online has been rolled out to facilitate data transfer. A participant recruitment database was developed to monitor participant enrollment, and data is being harmonized through the use of ontologies and controlled vocabularies. The standardized metadata will be integrated to provide a search facility for H3Africa data and biospecimens. Because H3Africa projects are generating large-scale genomic data, facilities for analysis and interpretation are critical. H3ABioNet is implementing several data analysis platforms that provide a large range of bioinformatics tools or workflows, such as Galaxy, the Job Management System, and eBiokits. A set of reproducible, portable, and cloud-scalable pipelines to support the multiple H3Africa data types are also being developed and dockerized to enable execution on multiple computing infrastructures. In addition, new tools have been developed for analysis of the uniquely divergent African data and for downstream interpretation of prioritized variants. To provide support for these and other bioinformatics queries, an online bioinformatics helpdesk backed by broad consortium expertise has been established. Further support is provided by means of various modes of bioinformatics training.

CONCLUSIONS: For the past 4 years, the development of infrastructure support and human capacity through H3ABioNet, have significantly contributed to the establishment of African scientific networks, data analysis facilities, and training programs. Here, we describe the infrastructure and how it has affected genomics and bioinformatics research in Africa.}, } @article {pmid28295580, year = {2017}, author = {Guigas, B}, title = {SpecPad: device-independent NMR data visualization and processing based on the novel DART programming language and Html5 Web technology.}, journal = {Magnetic resonance in chemistry : MRC}, volume = {55}, number = {9}, pages = {821-827}, doi = {10.1002/mrc.4592}, pmid = {28295580}, issn = {1097-458X}, abstract = {SpecPad is a new device-independent software program for the visualization and processing of one-dimensional and two-dimensional nuclear magnetic resonance (NMR) time domain (FID) and frequency domain (spectrum) data. It is the result of a project to investigate whether the novel programming language DART, in combination with Html5 Web technology, forms a suitable base to write an NMR data evaluation software which runs on modern computing devices such as Android, iOS, and Windows tablets as well as on Windows, Linux, and Mac OS X desktop PCs and notebooks. Another topic of interest is whether this technique also effectively supports the required sophisticated graphical and computational algorithms. SpecPad is device-independent because DART's compiled executable code is JavaScript and can, therefore, be run by the browsers of PCs and tablets. Because of Html5 browser cache technology, SpecPad may be operated off-line. Network access is only required during data import or export, e.g. via a Cloud service, or for software updates. A professional and easy to use graphical user interface consistent across all hardware platforms supports touch screen features on mobile devices for zooming and panning and for NMR-related interactive operations such as phasing, integration, peak picking, or atom assignment. Copyright © 2017 John Wiley & Sons, Ltd.}, } @article {pmid28282962, year = {2017}, author = {Li, J and Hu, H and Ke, Q and Xiong, N}, title = {A Novel Topology Link-Controlling Approach for Active Defense of a Node in a Network.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {3}, pages = {}, pmid = {28282962}, issn = {1424-8220}, abstract = {With the rapid development of virtual machine technology and cloud computing, distributed denial of service (DDoS) attacks, or some peak traffic, poses a great threat to the security of the network. In this paper, a novel topology link control technique and mitigation attacks in real-time environments is proposed. Firstly, a non-invasive method of deploying virtual sensors in the nodes is built, which uses the resource manager of each monitored node as a sensor. Secondly, a general topology-controlling approach of resisting the tolerant invasion is proposed. In the proposed approach, a prediction model is constructed by using copula functions for predicting the peak of a resource through another resource. The result of prediction determines whether or not to initiate the active defense. Finally, a minority game with incomplete strategy is employed to suppress attack flows and improve the permeability of the normal flows. The simulation results show that the proposed approach is very effective in protecting nodes.}, } @article {pmid28278229, year = {2017}, author = {Liu, J and Wu, Z and Wu, J and Dong, J and Zhao, Y and Wen, D}, title = {A Weibull distribution accrual failure detector for cloud computing.}, journal = {PloS one}, volume = {12}, number = {3}, pages = {e0173666}, doi = {10.1371/journal.pone.0173666}, pmid = {28278229}, issn = {1932-6203}, mesh = {*Algorithms ; Cloud Computing/*standards ; Humans ; Information Storage and Retrieval/*standards ; *Software ; *Statistical Distributions ; }, abstract = {Failure detectors are used to build high availability distributed systems as the fundamental component. To meet the requirement of a complicated large-scale distributed system, accrual failure detectors that can adapt to multiple applications have been studied extensively. However, several implementations of accrual failure detectors do not adapt well to the cloud service environment. To solve this problem, a new accrual failure detector based on Weibull Distribution, called the Weibull Distribution Failure Detector, has been proposed specifically for cloud computing. It can adapt to the dynamic and unexpected network conditions in cloud computing. The performance of the Weibull Distribution Failure Detector is evaluated and compared based on public classical experiment data and cloud computing experiment data. The results show that the Weibull Distribution Failure Detector has better performance in terms of speed and accuracy in unstable scenarios, especially in cloud computing.}, } @article {pmid28272305, year = {2017}, author = {Ramírez De La Pinta, J and Maestre Torreblanca, JM and Jurado, I and Reyes De Cozar, S}, title = {Off the Shelf Cloud Robotics for the Smart Home: Empowering a Wireless Robot through Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {3}, pages = {}, pmid = {28272305}, issn = {1424-8220}, abstract = {In this paper, we explore the possibilities offered by the integration of home automation systems and service robots. In particular, we examine how advanced computationally expensive services can be provided by using a cloud computing approach to overcome the limitations of the hardware available at the user's home. To this end, we integrate two wireless low-cost, off-the-shelf systems in this work, namely, the service robot Rovio and the home automation system Z-wave. Cloud computing is used to enhance the capabilities of these systems so that advanced sensing and interaction services based on image processing and voice recognition can be offered.}, } @article {pmid28269829, year = {2016}, author = {Bremer, E and Kurc, T and Gao, Y and Saltz, J and Almeida, JS}, title = {Safe "cloudification" of large images through picker APIs.}, journal = {AMIA ... Annual Symposium proceedings. AMIA Symposium}, volume = {2016}, number = {}, pages = {342-351}, pmid = {28269829}, issn = {1942-597X}, support = {R01 LM009239/LM/NLM NIH HHS/United States ; R01 LM011119/LM/NLM NIH HHS/United States ; U24 CA180924/CA/NCI NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Computer Systems ; *Image Processing, Computer-Assisted ; Internet ; *Software ; }, abstract = {The "Box model" allows users with no particular training in informatics, or access to specialized infrastructure, operate generic cloud computing resources through a temporary URI dereferencing mechanism known as "drop-file-picker API" ("picker API" for sort). This application programming interface (API) was popularized in the web app development community by DropBox, and is now a consumer-facing feature of all major cloud computing platforms such as Box.com, Google Drive and Amazon S3. This reports describes a prototype web service application that uses picker APIs to expose a new, "cloudified", API tailored for image analysis, without compromising the private governance of the data exposed. In order to better understand this cross-platform cloud computing landscape, we first measured the time for both transfer and traversing of large image files generated by whole slide imaging (WSI) in Digital Pathology. The verification that there is extensive interconnectivity between cloud resources let to the development of a prototype software application that exposes an image-traversing REST API to image files stored in any of the consumer-facing "boxes". In summary, an image file can be upload/synchronized into a any cloud resource with a file picker API and the prototype service described here will expose an HTTP REST API that remains within the safety of the user's own governance. The open source prototype is publicly available at sbu-bmi.github.io/imagebox. Availability The accompanying prototype application is made publicly available, fully functional, with open source, at http://sbu-bmi.github.io/imagebox://sbu-bmi.github.io/imagebox. An illustrative webcasted use of this Web App is included with the project codebase at https://github.com/SBU-BMI/imageboxs://github.com/SBU-BMI/imagebox.}, } @article {pmid28269816, year = {2017}, author = {Zhang, Z and Wen, T and Huang, W and Wang, M and Li, C}, title = {Automatic epileptic seizure detection in EEGs using MF-DFA, SVM based on cloud computing.}, journal = {Journal of X-ray science and technology}, volume = {25}, number = {2}, pages = {261-272}, doi = {10.3233/XST-17258}, pmid = {28269816}, issn = {1095-9114}, mesh = {*Cloud Computing ; Electroencephalography/*methods ; Epilepsy/*diagnosis/physiopathology ; Humans ; *Signal Processing, Computer-Assisted ; *Support Vector Machine ; }, abstract = {BACKGROUND: Epilepsy is a chronic disease with transient brain dysfunction that results from the sudden abnormal discharge of neurons in the brain. Since electroencephalogram (EEG) is a harmless and noninvasive detection method, it plays an important role in the detection of neurological diseases. However, the process of analyzing EEG to detect neurological diseases is often difficult because the brain electrical signals are random, non-stationary and nonlinear.

OBJECTIVE: In order to overcome such difficulty, this study aims to develop a new computer-aided scheme for automatic epileptic seizure detection in EEGs based on multi-fractal detrended fluctuation analysis (MF-DFA) and support vector machine (SVM).

METHODS: New scheme first extracts features from EEG by MF-DFA during the first stage. Then, the scheme applies a genetic algorithm (GA) to calculate parameters used in SVM and classify the training data according to the selected features using SVM. Finally, the trained SVM classifier is exploited to detect neurological diseases. The algorithm utilizes MLlib from library of SPARK and runs on cloud platform.

RESULTS: Applying to a public dataset for experiment, the study results show that the new feature extraction method and scheme can detect signals with less features and the accuracy of the classification reached up to 99%.

CONCLUSIONS: MF-DFA is a promising approach to extract features for analyzing EEG, because of its simple algorithm procedure and less parameters. The features obtained by MF-DFA can represent samples as well as traditional wavelet transform and Lyapunov exponents. GA can always find useful parameters for SVM with enough execution time. The results illustrate that the classification model can achieve comparable accuracy, which means that it is effective in epileptic seizure detection.}, } @article {pmid28269619, year = {2016}, author = {Patel, S and McGinnis, RS and Silva, I and DiCristofaro, S and Mahadevan, N and Jortberg, E and Franco, J and Martin, A and Lust, J and Raj, M and McGrane, B and DePetrillo, P and Aranyosi, AJ and Ceruolo, M and Pindado, J and Ghaffari, R}, title = {A wearable computing platform for developing cloud-based machine learning models for health monitoring applications.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2016}, number = {}, pages = {5997-6001}, doi = {10.1109/EMBC.2016.7592095}, pmid = {28269619}, issn = {2694-0604}, mesh = {Activities of Daily Living ; Adult ; *Cloud Computing ; Female ; Humans ; *Machine Learning ; Male ; *Models, Theoretical ; Monitoring, Ambulatory/*instrumentation/*methods ; }, abstract = {Wearable sensors have the potential to enable clinical-grade ambulatory health monitoring outside the clinic. Technological advances have enabled development of devices that can measure vital signs with great precision and significant progress has been made towards extracting clinically meaningful information from these devices in research studies. However, translating measurement accuracies achieved in the controlled settings such as the lab and clinic to unconstrained environments such as the home remains a challenge. In this paper, we present a novel wearable computing platform for unobtrusive collection of labeled datasets and a new paradigm for continuous development, deployment and evaluation of machine learning models to ensure robust model performance as we transition from the lab to home. Using this system, we train activity classification models across two studies and track changes in model performance as we go from constrained to unconstrained settings.}, } @article {pmid28269567, year = {2016}, author = {Preejith, SP and Ravindran, AS and Hajare, R and Joseph, J and Sivaprakasam, M}, title = {A wrist worn SpO2 monitor with custom finger probe for motion artifact removal.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2016}, number = {}, pages = {5777-5780}, doi = {10.1109/EMBC.2016.7592040}, pmid = {28269567}, issn = {2694-0604}, mesh = {Algorithms ; *Artifacts ; *Fingers ; Heart Rate ; Humans ; Monitoring, Physiologic ; *Movement ; Oximetry/*instrumentation ; Oxygen/*blood ; Photoplethysmography/*instrumentation ; *Wrist ; }, abstract = {Continuous monitoring of blood oxygen saturation (SpO2) level and heart rate is critical in surgery, ICUs and patients suffering from Chronic Obstructive Pulmonary Diseases. Pulse oximeters which compute SpO2 using transmittance photoplethysmography (PPG), is widely accepted for continuous monitoring. Presence of motion artifacts in PPG signals is a major obstacle in the extraction of reliable cardiovascular parameters, in real time and continuous monitoring applications. In this paper, a wrist worn device with a custom finger probe with an integrated accelerometer to remove motion artifacts is presented. An algorithm which can run on low power systems with processing constraints is implemented on the device. The device does continuous acquisition of PPG and accelerometer waveforms and computes SpO2 using the proposed light weight algorithm. The measurement results are continuously synced with an Android tablet, which acts as a gateway and is pushed on to the cloud for further analysis. The accuracy in SpO2 measured by the device was validated using Fluke ProSim 8 SpO2 simulator and the efficiency in accurately computing SpO2 in the presence of motion was validated over 40 healthy volunteers in a controlled setting.}, } @article {pmid28269002, year = {2016}, author = {Roychowdhury, S}, title = {Classification of large-scale fundus image data sets: a cloud-computing framework.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2016}, number = {}, pages = {3256-3259}, doi = {10.1109/EMBC.2016.7591423}, pmid = {28269002}, issn = {2694-0604}, mesh = {Algorithms ; *Cloud Computing ; *Databases as Topic ; Diabetic Retinopathy/diagnosis ; Female ; *Fundus Oculi ; Humans ; *Image Interpretation, Computer-Assisted ; ROC Curve ; Reproducibility of Results ; }, abstract = {Large medical image data sets with high dimensionality require substantial amount of computation time for data creation and data processing. This paper presents a novel generalized method that finds optimal image-based feature sets that reduce computational time complexity while maximizing overall classification accuracy for detection of diabetic retinopathy (DR). First, region-based and pixel-based features are extracted from fundus images for classification of DR lesions and vessel-like structures. Next, feature ranking strategies are used to distinguish the optimal classification feature sets. DR lesion and vessel classification accuracies are computed using the boosted decision tree and decision forest classifiers in the Microsoft Azure Machine Learning Studio platform, respectively. For images from the DIARETDB1 data set, 40 of its highest-ranked features are used to classify four DR lesion types with an average classification accuracy of 90.1% in 792 seconds. Also, for classification of red lesion regions and hemorrhages from microaneurysms, accuracies of 85% and 72% are observed, respectively. For images from STARE data set, 40 high-ranked features can classify minor blood vessels with an accuracy of 83.5% in 326 seconds. Such cloud-based fundus image analysis systems can significantly enhance the borderline classification performances in automated screening systems.}, } @article {pmid28261533, year = {2017}, author = {Bitsaki, M and Koutras, G and Heep, H and Koutras, C}, title = {Cost-Effective Mobile-Based Healthcare System for Managing Total Joint Arthroplasty Follow-Up.}, journal = {Healthcare informatics research}, volume = {23}, number = {1}, pages = {67-73}, pmid = {28261533}, issn = {2093-3681}, abstract = {OBJECTIVES: Long-term follow-up care after total joint arthroplasty is essential to evaluate hip and knee arthroplasty outcomes, to provide information to physicians and improve arthroplasty performance, and to improve patients' health condition. In this paper, we aim to improve the communication between arthroplasty patients and physicians and to reduce the cost of follow-up controls based on mobile application technologies and cloud computing.

METHODS: We propose a mobile-based healthcare system that provides cost-effective follow-up controls for primary arthroplasty patients through questions about symptoms in the replaced joint, questionnaires (WOMAC and SF-36v2) and the radiological examination of knee or hip joint. We also perform a cost analysis for a set of 423 patients that were treated in the University Clinic for Orthopedics in Essen-Werden.

RESULTS: The estimation of healthcare costs shows significant cost savings (a reduction of 63.67% for readmission rate 5%) in both the University Clinic for Orthopedics in Essen-Werden and the state of North Rhine-Westphalia when the mobile-based healthcare system is applied.

CONCLUSIONS: We propose a mHealth system to reduce the cost of follow-up assessments of arthroplasty patients through evaluation of diagnosis, self-monitoring, and regular review of their health status.}, } @article {pmid28261491, year = {2017}, author = {De, D and Mukherjee, A and Sau, A and Bhakta, I}, title = {Design of smart neonatal health monitoring system using SMCC.}, journal = {Healthcare technology letters}, volume = {4}, number = {1}, pages = {13-19}, pmid = {28261491}, issn = {2053-3713}, abstract = {Automated health monitoring and alert system development is a demanding research area today. Most of the currently available monitoring and controlling medical devices are wired which limits freeness of working environment. Wireless sensor network (WSN) is a better alternative in such an environment. Neonatal intensive care unit is used to take care of sick and premature neonates. Hypothermia is an independent risk factor for neonatal mortality and morbidity. To prevent it an automated monitoring system is required. In this Letter, an automated neonatal health monitoring system is designed using sensor mobile cloud computing (SMCC). SMCC is based on WSN and MCC. In the authors' system temperature sensor, acceleration sensor and heart rate measurement sensor are used to monitor body temperature, acceleration due to body movement and heart rate of neonates. The sensor data are stored inside the cloud. The health person continuously monitors and accesses these data through the mobile device using an Android Application for neonatal monitoring. When an abnormal situation arises, an alert is generated in the mobile device of the health person. By alerting health professional using such an automated system, early care is provided to the affected babies and the probability of recovery is increased.}, } @article {pmid28257067, year = {2017}, author = {Dinh, T and Kim, Y and Lee, H}, title = {A Location-Based Interactive Model of Internet of Things and Cloud (IoT-Cloud) for Mobile Cloud Computing Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {3}, pages = {}, pmid = {28257067}, issn = {1424-8220}, abstract = {This paper presents a location-based interactive model of Internet of Things (IoT) and cloud integration (IoT-cloud) for mobile cloud computing applications, in comparison with the periodic sensing model. In the latter, sensing collections are performed without awareness of sensing demands. Sensors are required to report their sensing data periodically regardless of whether or not there are demands for their sensing services. This leads to unnecessary energy loss due to redundant transmission. In the proposed model, IoT-cloud provides sensing services on demand based on interest and location of mobile users. By taking advantages of the cloud as a coordinator, sensing scheduling of sensors is controlled by the cloud, which knows when and where mobile users request for sensing services. Therefore, when there is no demand, sensors are put into an inactive mode to save energy. Through extensive analysis and experimental results, we show that the location-based model achieves a significant improvement in terms of network lifetime compared to the periodic model.}, } @article {pmid28245610, year = {2017}, author = {Mao, J and Chen, Y and Shi, F and Jia, Y and Liang, Z}, title = {Toward Exposing Timing-Based Probing Attacks in Web Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {3}, pages = {}, pmid = {28245610}, issn = {1424-8220}, abstract = {Web applications have become the foundation of many types of systems, ranging from cloud services to Internet of Things (IoT) systems. Due to the large amount of sensitive data processed by web applications, user privacy emerges as a major concern in web security. Existing protection mechanisms in modern browsers, e.g., the same origin policy, prevent the users' browsing information on one website from being directly accessed by another website. However, web applications executed in the same browser share the same runtime environment. Such shared states provide side channels for malicious websites to indirectly figure out the information of other origins. Timing is a classic side channel and the root cause of many recent attacks, which rely on the variations in the time taken by the systems to process different inputs. In this paper, we propose an approach to expose the timing-based probing attacks in web applications. It monitors the browser behaviors and identifies anomalous timing behaviors to detect browser probing attacks. We have prototyped our system in the Google Chrome browser and evaluated the effectiveness of our approach by using known probing techniques. We have applied our approach on a large number of top Alexa sites and reported the suspicious behavior patterns with corresponding analysis results. Our theoretical analysis illustrates that the effectiveness of the timing-based probing attacks is dramatically limited by our approach.}, } @article {pmid28243601, year = {2017}, author = {Abduallah, Y and Turki, T and Byron, K and Du, Z and Cervantes-Cervantes, M and Wang, JT}, title = {MapReduce Algorithms for Inferring Gene Regulatory Networks from Time-Series Microarray Data Using an Information-Theoretic Approach.}, journal = {BioMed research international}, volume = {2017}, number = {}, pages = {6261802}, pmid = {28243601}, issn = {2314-6141}, mesh = {*Algorithms ; *Gene Regulatory Networks ; *Information Theory ; Oligonucleotide Array Sequence Analysis/*methods ; Saccharomyces cerevisiae/*genetics ; Time Factors ; }, abstract = {Gene regulation is a series of processes that control gene expression and its extent. The connections among genes and their regulatory molecules, usually transcription factors, and a descriptive model of such connections are known as gene regulatory networks (GRNs). Elucidating GRNs is crucial to understand the inner workings of the cell and the complexity of gene interactions. To date, numerous algorithms have been developed to infer gene regulatory networks. However, as the number of identified genes increases and the complexity of their interactions is uncovered, networks and their regulatory mechanisms become cumbersome to test. Furthermore, prodding through experimental results requires an enormous amount of computation, resulting in slow data processing. Therefore, new approaches are needed to expeditiously analyze copious amounts of experimental data resulting from cellular GRNs. To meet this need, cloud computing is promising as reported in the literature. Here, we propose new MapReduce algorithms for inferring gene regulatory networks on a Hadoop cluster in a cloud environment. These algorithms employ an information-theoretic approach to infer GRNs using time-series microarray data. Experimental results show that our MapReduce program is much faster than an existing tool while achieving slightly better prediction accuracy than the existing tool.}, } @article {pmid28241972, year = {2017}, author = {Paulano-Godino, F and Jiménez-Delgado, JJ}, title = {Identification of fracture zones and its application in automatic bone fracture reduction.}, journal = {Computer methods and programs in biomedicine}, volume = {141}, number = {}, pages = {93-104}, doi = {10.1016/j.cmpb.2016.12.014}, pmid = {28241972}, issn = {1872-7565}, mesh = {Algorithms ; Fractures, Bone/diagnostic imaging/*therapy ; Humans ; *Therapy, Computer-Assisted ; Tomography, X-Ray Computed ; }, abstract = {BACKGROUND AND OBJECTIVE: The preoperative planning of bone fractures using information from CT scans increases the probability of obtaining satisfactory results, since specialists are provided with additional information before surgery. The reduction of complex bone fractures requires solving a 3D puzzle in order to place each fragment into its correct position. Computer-assisted solutions may aid in this process by identifying the number of fragments and their location, by calculating the fracture zones or even by computing the correct position of each fragment. The main goal of this paper is the development of an automatic method to calculate contact zones between fragments and thus to ease the computation of bone fracture reduction.

METHODS: In this paper, an automatic method to calculate the contact zone between two bone fragments is presented. In a previous step, bone fragments are segmented and labelled from CT images and a point cloud is generated for each bone fragment. The calculated contact zones enable the automatic reduction of complex fractures. To that end, an automatic method to match bone fragments in complex fractures is also presented.

RESULTS: The proposed method has been successfully applied in the calculation of the contact zone of 4 different bones from the ankle area. The calculated fracture zones enabled the reduction of all the tested cases using the presented matching algorithm. The performed tests show that the reduction of these fractures using the proposed methods leaded to a small overlapping between fragments.

CONCLUSIONS: The presented method makes the application of puzzle-solving strategies easier, since it does not obtain the entire fracture zone but the contact area between each pair of fragments. Therefore, it is not necessary to find correspondences between fracture zones and fragments may be aligned two by two. The developed algorithms have been successfully applied in different fracture cases in the ankle area. The small overlapping error obtained in the performed tests demonstrates the absence of visual overlapping in the figures.}, } @article {pmid28234271, year = {2017}, author = {Wong, KK and Fong, S and Wang, D}, title = {Impact of advanced parallel or cloud computing technologies for image guided diagnosis and therapy.}, journal = {Journal of X-ray science and technology}, volume = {25}, number = {2}, pages = {187-192}, doi = {10.3233/XST-17252}, pmid = {28234271}, issn = {1095-9114}, mesh = {*Cloud Computing ; Humans ; *Image Interpretation, Computer-Assisted ; *Software ; *Therapy, Computer-Assisted ; }, } @article {pmid28232083, year = {2017}, author = {Yu, J and Blom, J and Sczyrba, A and Goesmann, A}, title = {Rapid protein alignment in the cloud: HAMOND combines fast DIAMOND alignments with Hadoop parallelism.}, journal = {Journal of biotechnology}, volume = {257}, number = {}, pages = {58-60}, doi = {10.1016/j.jbiotec.2017.02.020}, pmid = {28232083}, issn = {1873-4863}, mesh = {*Cloud Computing ; Comparative Genomic Hybridization ; Computational Biology ; Genomics/methods ; High-Throughput Nucleotide Sequencing/instrumentation/*methods ; Internet ; Metagenome ; Molecular Sequence Data ; Sequence Alignment/instrumentation/*methods ; Sequence Analysis, DNA/instrumentation/*methods ; Software ; Whole Genome Sequencing ; }, abstract = {The introduction of next generation sequencing has caused a steady increase in the amounts of data that have to be processed in modern life science. Sequence alignment plays a key role in the analysis of sequencing data e.g. within whole genome sequencing or metagenome projects. BLAST is a commonly used alignment tool that was the standard approach for more than two decades, but in the last years faster alternatives have been proposed including RapSearch, GHOSTX, and DIAMOND. Here we introduce HAMOND, an application that uses Apache Hadoop to parallelize DIAMOND computation in order to scale-out the calculation of alignments. HAMOND is fault tolerant and scalable by utilizing large cloud computing infrastructures like Amazon Web Services. HAMOND has been tested in comparative genomics analyses and showed promising results both in efficiency and accuracy.}, } @article {pmid28213882, year = {2017}, author = {Mohit, P and Amin, R and Karati, A and Biswas, GP and Khan, MK}, title = {A Standard Mutual Authentication Protocol for Cloud Computing Based Health Care System.}, journal = {Journal of medical systems}, volume = {41}, number = {4}, pages = {50}, pmid = {28213882}, issn = {1573-689X}, mesh = {Algorithms ; *Cloud Computing ; Computer Security/*instrumentation ; *Confidentiality ; Health Information Exchange/*standards ; Humans ; Internet ; Telemedicine/*instrumentation ; }, abstract = {Telecare Medical Information System (TMIS) supports a standard platform to the patient for getting necessary medical treatment from the doctor(s) via Internet communication. Security protection is important for medical records (data) of the patients because of very sensitive information. Besides, patient anonymity is another most important property, which must be protected. Most recently, Chiou et al. suggested an authentication protocol for TMIS by utilizing the concept of cloud environment. They claimed that their protocol is patient anonymous and well security protected. We reviewed their protocol and found that it is completely insecure against patient anonymity. Further, the same protocol is not protected against mobile device stolen attack. In order to improve security level and complexity, we design a light weight authentication protocol for the same environment. Our security analysis ensures resilience of all possible security attacks. The performance of our protocol is relatively standard in comparison with the related previous research.}, } @article {pmid28208684, year = {2017}, author = {Liu, K and Wei, S and Chen, Z and Jia, B and Chen, G and Ling, H and Sheaff, C and Blasch, E}, title = {A Real-Time High Performance Computation Architecture for Multiple Moving Target Tracking Based on Wide-Area Motion Imagery via Cloud and Graphic Processing Units.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {2}, pages = {}, pmid = {28208684}, issn = {1424-8220}, abstract = {This paper presents the first attempt at combining Cloud with Graphic Processing Units (GPUs) in a complementary manner within the framework of a real-time high performance computation architecture for the application of detecting and tracking multiple moving targets based on Wide Area Motion Imagery (WAMI). More specifically, the GPU and Cloud Moving Target Tracking (GC-MTT) system applied a front-end web based server to perform the interaction with Hadoop and highly parallelized computation functions based on the Compute Unified Device Architecture (CUDA©). The introduced multiple moving target detection and tracking method can be extended to other applications such as pedestrian tracking, group tracking, and Patterns of Life (PoL) analysis. The cloud and GPUs based computing provides an efficient real-time target recognition and tracking approach as compared to methods when the work flow is applied using only central processing units (CPUs). The simultaneous tracking and recognition results demonstrate that a GC-MTT based approach provides drastically improved tracking with low frame rates over realistic conditions.}, } @article {pmid28205674, year = {2017}, author = {Zhao, L and Chen, Q and Li, W and Jiang, P and Wong, L and Li, J}, title = {MapReduce for accurate error correction of next-generation sequencing data.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {23}, pages = {3844-3851}, doi = {10.1093/bioinformatics/btx089}, pmid = {28205674}, issn = {1367-4811}, mesh = {*Algorithms ; Base Sequence ; *High-Throughput Nucleotide Sequencing ; Reproducibility of Results ; Sequence Analysis, DNA/*methods ; }, abstract = {MOTIVATION: Next-generation sequencing platforms have produced huge amounts of sequence data. This is revolutionizing every aspect of genetic and genomic research. However, these sequence datasets contain quite a number of machine-induced errors-e.g. errors due to substitution can be as high as 2.5%. Existing error-correction methods are still far from perfect. In fact, more errors are sometimes introduced than correct corrections, especially by the prevalent k-mer based methods. The existing methods have also made limited exploitation of on-demand cloud computing.

RESULTS: We introduce an error-correction method named MEC, which uses a two-layered MapReduce technique to achieve high correction performance. In the first layer, all the input sequences are mapped to groups to identify candidate erroneous bases in parallel. In the second layer, the erroneous bases at the same position are linked together from all the groups for making statistically reliable corrections. Experiments on real and simulated datasets show that our method outperforms existing methods remarkably. Its per-position error rate is consistently the lowest, and the correction gain is always the highest.

The source code is available at bioinformatics.gxu.edu.cn/ngs/mec.

CONTACTS: wongls@comp.nus.edu.sg or jinyan.li@uts.edu.au.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid28190948, year = {2016}, author = {Drawert, B and Trogdon, M and Toor, S and Petzold, L and Hellander, A}, title = {MOLNs: A CLOUD PLATFORM FOR INTERACTIVE, REPRODUCIBLE, AND SCALABLE SPATIAL STOCHASTIC COMPUTATIONAL EXPERIMENTS IN SYSTEMS BIOLOGY USING PyURDME.}, journal = {SIAM journal on scientific computing : a publication of the Society for Industrial and Applied Mathematics}, volume = {38}, number = {3}, pages = {C179-C202}, pmid = {28190948}, issn = {1064-8275}, support = {R01 EB014877/EB/NIBIB NIH HHS/United States ; R01 GM113241/GM/NIGMS NIH HHS/United States ; }, abstract = {Computational experiments using spatial stochastic simulations have led to important new biological insights, but they require specialized tools and a complex software stack, as well as large and scalable compute and data analysis resources due to the large computational cost associated with Monte Carlo computational workflows. The complexity of setting up and managing a large-scale distributed computation environment to support productive and reproducible modeling can be prohibitive for practitioners in systems biology. This results in a barrier to the adoption of spatial stochastic simulation tools, effectively limiting the type of biological questions addressed by quantitative modeling. In this paper, we present PyURDME, a new, user-friendly spatial modeling and simulation package, and MOLNs, a cloud computing appliance for distributed simulation of stochastic reaction-diffusion models. MOLNs is based on IPython and provides an interactive programming platform for development of sharable and reproducible distributed parallel computational experiments.}, } @article {pmid28187881, year = {2017}, author = {Forkan, ARM and Khalil, I}, title = {A clinical decision-making mechanism for context-aware and patient-specific remote monitoring systems using the correlations of multiple vital signs.}, journal = {Computer methods and programs in biomedicine}, volume = {139}, number = {}, pages = {1-16}, doi = {10.1016/j.cmpb.2016.10.018}, pmid = {28187881}, issn = {1872-7565}, mesh = {Algorithms ; *Awareness ; *Clinical Decision-Making ; Humans ; *Monitoring, Physiologic ; }, abstract = {BACKGROUND AND OBJECTIVES: In home-based context-aware monitoring patient's real-time data of multiple vital signs (e.g. heart rate, blood pressure) are continuously generated from wearable sensors. The changes in such vital parameters are highly correlated. They are also patient-centric and can be either recurrent or can fluctuate. The objective of this study is to develop an intelligent method for personalized monitoring and clinical decision support through early estimation of patient-specific vital sign values, and prediction of anomalies using the interrelation among multiple vital signs.

METHODS: In this paper, multi-label classification algorithms are applied in classifier design to forecast these values and related abnormalities. We proposed a completely new approach of patient-specific vital sign prediction system using their correlations. The developed technique can guide healthcare professionals to make accurate clinical decisions. Moreover, our model can support many patients with various clinical conditions concurrently by utilizing the power of cloud computing technology. The developed method also reduces the rate of false predictions in remote monitoring centres.

RESULTS: In the experimental settings, the statistical features and correlations of six vital signs are formulated as multi-label classification problem. Eight multi-label classification algorithms along with three fundamental machine learning algorithms are used and tested on a public dataset of 85 patients. Different multi-label classification evaluation measures such as Hamming score, F1-micro average, and accuracy are used for interpreting the prediction performance of patient-specific situation classifications. We achieved 90-95% Hamming score values across 24 classifier combinations for 85 different patients used in our experiment. The results are compared with single-label classifiers and without considering the correlations among the vitals. The comparisons show that multi-label method is the best technique for this problem domain.

CONCLUSIONS: The evaluation results reveal that multi-label classification techniques using the correlations among multiple vitals are effective ways for early estimation of future values of those vitals. In context-aware remote monitoring this process can greatly help the doctors in quick diagnostic decision making.}, } @article {pmid28178214, year = {2017}, author = {Celesti, A and Fazio, M and Villari, M}, title = {Enabling Secure XMPP Communications in Federated IoT Clouds Through XEP 0027 and SAML/SASL SSO.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {2}, pages = {}, pmid = {28178214}, issn = {1424-8220}, abstract = {Nowadays, in the panorama of Internet of Things (IoT), finding a right compromise between interactivity and security is not trivial at all. Currently, most of pervasive communication technologies are designed to work locally. As a consequence, the development of large-scale Internet services and applications is not so easy for IoT Cloud providers. The main issue is that both IoT architectures and services have started as simple but they are becoming more and more complex. Consequently, the web service technology is often inappropriate. Recently, many operators in both academia and industry fields are considering the possibility to adopt the eXtensible Messaging and Presence Protocol (XMPP) for the implementation of IoT Cloud communication systems. In fact, XMPP offers many advantages in term of real-time capabilities, efficient data distribution, service discovery and inter-domain communication compared to other technologies. Nevertheless, the protocol lacks of native security, data confidentiality and trustworthy federation features. In this paper, considering an XMPP-based IoT Cloud architectural model, we discuss how can be possible to enforce message signing/encryption and Single-Sign On (SSO) authentication respectively for secure inter-module and inter-domain communications in a federated environment. Experiments prove that security mechanisms introduce an acceptable overhead, considering the obvious advantages achieved in terms of data trustiness and privacy.}, } @article {pmid28164085, year = {2017}, author = {Granados Moreno, P and Joly, Y and Knoppers, BM}, title = {Public-Private Partnerships in Cloud-Computing Services in the Context of Genomic Research.}, journal = {Frontiers in medicine}, volume = {4}, number = {}, pages = {3}, pmid = {28164085}, issn = {2296-858X}, abstract = {Public-private partnerships (PPPs) have been increasingly used to spur and facilitate innovation in a number of fields. In healthcare, the purpose of using a PPP is commonly to develop and/or provide vaccines and drugs against communicable diseases, mainly in developing or underdeveloped countries. With the advancement of technology and of the area of genomics, these partnerships also focus on large-scale genomic research projects that aim to advance the understanding of diseases that have a genetic component and to develop personalized treatments. This new focus has created new forms of PPPs that involve information technology companies, which provide computing infrastructure and services to store, analyze, and share the massive amounts of data genomic-related projects produce. In this article, we explore models of PPPs proposed to handle, protect, and share the genomic data collected and to further develop genomic-based medical products. We also identify the reasons that make these models suitable and the challenges they have yet to overcome. To achieve this, we describe the details and complexities of MSSNG, International Cancer Genome Consortium, and 100,000 Genomes Project, the three PPPs that focus on large-scale genomic research to better understand the genetic components of autism, cancer, rare diseases, and infectious diseases with the intention to find appropriate treatments. Organized as PPP and employing cloud-computing services, the three projects have advanced quickly and are likely to be important sources of research and development for future personalized medicine. However, there still are unresolved matters relating to conflicts of interest, commercialization, and data control. Learning from the challenges encountered by past PPPs allowed us to establish that developing guidelines to adequately manage personal health information stored in clouds and ensuring the protection of data integrity and privacy would be critical steps in the development of future PPPs.}, } @article {pmid28113422, year = {2016}, author = {Shengshan Hu, and Qian Wang, and Jingjun Wang, and Zhan Qin, and Kui Ren, }, title = {Securing SIFT: Privacy-Preserving Outsourcing Computation of Feature Extractions Over Encrypted Image Data.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {25}, number = {7}, pages = {3411-3425}, doi = {10.1109/TIP.2016.2568460}, pmid = {28113422}, issn = {1941-0042}, abstract = {Advances in cloud computing have greatly motivated data owners to outsource their huge amount of personal multimedia data and/or computationally expensive tasks onto the cloud by leveraging its abundant resources for cost saving and flexibility. Despite the tremendous benefits, the outsourced multimedia data and its originated applications may reveal the data owner's private information, such as the personal identity, locations, or even financial profiles. This observation has recently aroused new research interest on privacy-preserving computations over outsourced multimedia data. In this paper, we propose an effective and practical privacy-preserving computation outsourcing protocol for the prevailing scale-invariant feature transform (SIFT) over massive encrypted image data. We first show that the previous solutions to this problem have either efficiency/security or practicality issues, and none can well preserve the important characteristics of the original SIFT in terms of distinctiveness and robustness. We then present a new scheme design that achieves efficiency and security requirements simultaneously with the preservation of its key characteristics, by randomly splitting the original image data, designing two novel efficient protocols for secure multiplication and comparison, and carefully distributing the feature extraction computations onto two independent cloud servers. We both carefully analyze and extensively evaluate the security and effectiveness of our design. The results show that our solution is practically secure, outperforms the state-of-the-art, and performs comparably with the original SIFT in terms of various characteristics, including rotation invariance, image scale invariance, robust matching across affine distortion, and addition of noise and change in 3D viewpoint and illumination.}, } @article {pmid28112020, year = {2017}, author = {Lian, JW}, title = {Establishing a Cloud Computing Success Model for Hospitals in Taiwan.}, journal = {Inquiry : a journal of medical care organization, provision and financing}, volume = {54}, number = {}, pages = {46958016685836}, pmid = {28112020}, issn = {1945-7243}, mesh = {Cloud Computing/*standards ; Computer Security ; *Hospitals ; *Models, Organizational ; Surveys and Questionnaires ; Taiwan ; }, abstract = {The purpose of this study is to understand the critical quality-related factors that affect cloud computing success of hospitals in Taiwan. In this study, private cloud computing is the major research target. The chief information officers participated in a questionnaire survey. The results indicate that the integration of trust into the information systems success model will have acceptable explanatory power to understand cloud computing success in the hospital. Moreover, information quality and system quality directly affect cloud computing satisfaction, whereas service quality indirectly affects the satisfaction through trust. In other words, trust serves as the mediator between service quality and satisfaction. This cloud computing success model will help hospitals evaluate or achieve success after adopting private cloud computing health care services.}, } @article {pmid28107819, year = {2017}, author = {Elshazly, H and Souilmi, Y and Tonellato, PJ and Wall, DP and Abouelhoda, M}, title = {MC-GenomeKey: a multicloud system for the detection and annotation of genomic variants.}, journal = {BMC bioinformatics}, volume = {18}, number = {1}, pages = {49}, pmid = {28107819}, issn = {1471-2105}, support = {R01 LM011566/LM/NLM NIH HHS/United States ; R01 MH090611/MH/NIMH NIH HHS/United States ; }, mesh = {*Cloud Computing ; Databases, Genetic ; Genome, Human ; Genomics/*methods ; *High-Throughput Nucleotide Sequencing ; Humans ; Internet ; Software ; Workflow ; }, abstract = {BACKGROUND: Next Generation Genome sequencing techniques became affordable for massive sequencing efforts devoted to clinical characterization of human diseases. However, the cost of providing cloud-based data analysis of the mounting datasets remains a concerning bottleneck for providing cost-effective clinical services. To address this computational problem, it is important to optimize the variant analysis workflow and the used analysis tools to reduce the overall computational processing time, and concomitantly reduce the processing cost. Furthermore, it is important to capitalize on the use of the recent development in the cloud computing market, which have witnessed more providers competing in terms of products and prices.

RESULTS: In this paper, we present a new package called MC-GenomeKey (Multi-Cloud GenomeKey) that efficiently executes the variant analysis workflow for detecting and annotating mutations using cloud resources from different commercial cloud providers. Our package supports Amazon, Google, and Azure clouds, as well as, any other cloud platform based on OpenStack. Our package allows different scenarios of execution with different levels of sophistication, up to the one where a workflow can be executed using a cluster whose nodes come from different clouds. MC-GenomeKey also supports scenarios to exploit the spot instance model of Amazon in combination with the use of other cloud platforms to provide significant cost reduction. To the best of our knowledge, this is the first solution that optimizes the execution of the workflow using computational resources from different cloud providers.

CONCLUSIONS: MC-GenomeKey provides an efficient multicloud based solution to detect and annotate mutations. The package can run in different commercial cloud platforms, which enables the user to seize the best offers. The package also provides a reliable means to make use of the low-cost spot instance model of Amazon, as it provides an efficient solution to the sudden termination of spot machines as a result of a sudden price increase. The package has a web-interface and it is available for free for academic use.}, } @article {pmid28103567, year = {2017}, author = {Guo-Ping Liu, }, title = {Predictive Control of Networked Multiagent Systems via Cloud Computing.}, journal = {IEEE transactions on cybernetics}, volume = {47}, number = {8}, pages = {1852-1859}, doi = {10.1109/TCYB.2017.2647820}, pmid = {28103567}, issn = {2168-2275}, abstract = {This paper studies the design and analysis of networked multiagent predictive control systems via cloud computing. A cloud predictive control scheme for networked multiagent systems (NMASs) is proposed to achieve consensus and stability simultaneously and to compensate for network delays actively. The design of the cloud predictive controller for NMASs is detailed. The analysis of the cloud predictive control scheme gives the necessary and sufficient conditions of stability and consensus of closed-loop networked multiagent control systems. The proposed scheme is verified to characterize the dynamical behavior and control performance of NMASs through simulations. The outcome provides a foundation for the development of cooperative and coordinative control of NMASs and its applications.}, } @article {pmid28085932, year = {2017}, author = {Cotes-Ruiz, IT and Prado, RP and García-Galán, S and Muñoz-Expósito, JE and Ruiz-Reyes, N}, title = {Dynamic Voltage Frequency Scaling Simulator for Real Workflows Energy-Aware Management in Green Cloud Computing.}, journal = {PloS one}, volume = {12}, number = {1}, pages = {e0169803}, pmid = {28085932}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Energy Transfer ; Humans ; Information Storage and Retrieval/*methods ; *Internet ; *Models, Theoretical ; Signal Processing, Computer-Assisted ; Software ; Workflow ; Workload ; }, abstract = {Nowadays, the growing computational capabilities of Cloud systems rely on the reduction of the consumed power of their data centers to make them sustainable and economically profitable. The efficient management of computing resources is at the heart of any energy-aware data center and of special relevance is the adaptation of its performance to workload. Intensive computing applications in diverse areas of science generate complex workload called workflows, whose successful management in terms of energy saving is still at its beginning. WorkflowSim is currently one of the most advanced simulators for research on workflows processing, offering advanced features such as task clustering and failure policies. In this work, an expected power-aware extension of WorkflowSim is presented. This new tool integrates a power model based on a computing-plus-communication design to allow the optimization of new management strategies in energy saving considering computing, reconfiguration and networks costs as well as quality of service, and it incorporates the preeminent strategy for on host energy saving: Dynamic Voltage Frequency Scaling (DVFS). The simulator is designed to be consistent in different real scenarios and to include a wide repertory of DVFS governors. Results showing the validity of the simulator in terms of resources utilization, frequency and voltage scaling, power, energy and time saving are presented. Also, results achieved by the intra-host DVFS strategy with different governors are compared to those of the data center using a recent and successful DVFS-based inter-host scheduling strategy as overlapped mechanism to the DVFS intra-host technique.}, } @article {pmid28075343, year = {2017}, author = {Li, Z and Su, D and Zhu, H and Li, W and Zhang, F and Li, R}, title = {A Fast Synthetic Aperture Radar Raw Data Simulation Using Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {17}, number = {1}, pages = {}, pmid = {28075343}, issn = {1424-8220}, abstract = {Synthetic Aperture Radar (SAR) raw data simulation is a fundamental problem in radar system design and imaging algorithm research. The growth of surveying swath and resolution results in a significant increase in data volume and simulation period, which can be considered to be a comprehensive data intensive and computing intensive issue. Although several high performance computing (HPC) methods have demonstrated their potential for accelerating simulation, the input/output (I/O) bottleneck of huge raw data has not been eased. In this paper, we propose a cloud computing based SAR raw data simulation algorithm, which employs the MapReduce model to accelerate the raw data computing and the Hadoop distributed file system (HDFS) for fast I/O access. The MapReduce model is designed for the irregular parallel accumulation of raw data simulation, which greatly reduces the parallel efficiency of graphics processing unit (GPU) based simulation methods. In addition, three kinds of optimization strategies are put forward from the aspects of programming model, HDFS configuration and scheduling. The experimental results show that the cloud computing based algorithm achieves 4_ speedup over the baseline serial approach in an 8-node cloud environment, and each optimization strategy can improve about 20%. This work proves that the proposed cloud algorithm is capable of solving the computing intensive and data intensive issues in SAR raw data simulation, and is easily extended to large scale computing to achieve higher acceleration.}, } @article {pmid28065898, year = {2017}, author = {Zhou, W and Li, R and Yuan, S and Liu, C and Yao, S and Luo, J and Niu, B}, title = {MetaSpark: a spark-based distributed processing tool to recruit metagenomic reads to reference genomes.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {7}, pages = {1090-1092}, doi = {10.1093/bioinformatics/btw750}, pmid = {28065898}, issn = {1367-4811}, mesh = {Algorithms ; Genome ; High-Throughput Nucleotide Sequencing/*methods/standards ; Humans ; Metagenomics/*methods/standards ; Reference Standards ; *Software ; }, abstract = {SUMMARY: With the advent of next-generation sequencing, traditional bioinformatics tools are challenged by massive raw metagenomic datasets. One of the bottlenecks of metagenomic studies is lack of large-scale and cloud computing suitable data analysis tools. In this paper, we proposed a Spark based tool, called MetaSpark, to recruit metagenomic reads to reference genomes. MetaSpark benefits from the distributed data set (RDD) of Spark, which makes it able to cache data set in memory across cluster nodes and scale well with the datasets. Compared with previous metagenomics recruitment tools, MetaSpark recruited significantly more reads than many programs such as SOAP2, BWA and LAST and increased recruited reads by ∼4% compared with FR-HIT when there were 1 million reads and 0.75 GB references. Different test cases demonstrate MetaSpark's scalability and overall high performance.

AVAILABILITY: https://github.com/zhouweiyg/metaspark.

CONTACT: bniu@sccas.cn , jingluo@ynu.edu.cn.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid28051850, year = {2017}, author = {Gupta, V and Irimia, J and Pau, I and Rodríguez-Patón, A}, title = {BioBlocks: Programming Protocols in Biology Made Easier.}, journal = {ACS synthetic biology}, volume = {6}, number = {7}, pages = {1230-1232}, doi = {10.1021/acssynbio.6b00304}, pmid = {28051850}, issn = {2161-5063}, mesh = {Automation ; Computational Biology ; Programming Languages ; Synthetic Biology/*methods ; }, abstract = {The methods to execute biological experiments are evolving. Affordable fluid handling robots and on-demand biology enterprises are making automating entire experiments a reality. Automation offers the benefit of high-throughput experimentation, rapid prototyping, and improved reproducibility of results. However, learning to automate and codify experiments is a difficult task as it requires programming expertise. Here, we present a web-based visual development environment called BioBlocks for describing experimental protocols in biology. It is based on Google's Blockly and Scratch, and requires little or no experience in computer programming to automate the execution of experiments. The experiments can be specified, saved, modified, and shared between multiple users in an easy manner. BioBlocks is open-source and can be customized to execute protocols on local robotic platforms or remotely, that is, in the cloud. It aims to serve as a de facto open standard for programming protocols in Biology.}, } @article {pmid28034410, year = {2017}, author = {Dainton, C and Chu, CH}, title = {A review of electronic medical record keeping on mobile medical service trips in austere settings.}, journal = {International journal of medical informatics}, volume = {98}, number = {}, pages = {33-40}, doi = {10.1016/j.ijmedinf.2016.11.008}, pmid = {28034410}, issn = {1872-8243}, mesh = {Delivery of Health Care ; Electronic Health Records/*organization & administration/statistics & numerical data ; Health Services/*statistics & numerical data ; Humans ; Practice Guidelines as Topic/*standards ; }, abstract = {INTRODUCTION: Electronic medical records (EMRs) may address the need for decision and language support for Western clinicians on mobile medical service trips (MSTs) in low resource settings abroad, while providing improved access to records and data management. However, there has yet to be a review of this emerging technology used by MSTs in low-resource settings. The aim of this study is to describe EMR systems designed specifically for use by mobile MSTs in remote settings, and accordingly, determine new opportunities for this technology to improve quality of healthcare provided by MSTs.

METHODS: A MEDLINE, EMBASE, and Scopus/IEEE search and supplementary Google search were performed for EMR systems specific to mobile MSTs. Information was extracted regarding EMR name, organization, scope of use, platform, open source coding, commercial availability, data integration, and capacity for linguistic and decision support. Missing information was requested by email.

RESULTS: After screening of 122 abstracts, two articles remained that discussed deployment of EMR systems in MST settings (iChart, SmartList To Go), and thirteen additional EMR systems were found through the Google search. Of these, three systems (Project Buendia, TEBOW, and University of Central Florida's internally developed EMR) are based on modified versions of Open MRS software, while three are smartphone apps (QuickChart EMR, iChart, NotesFirst). Most of the systems use a local network to manage data, while the remaining systems use opportunistic cloud synchronization. Three (TimmyCare, Basil, and Backpack EMR) contain multilingual user interfaces, and only one (QuickChart EMR) contained MST-specific clinical decision support.

DISCUSSION: There have been limited attempts to tailor EMRs to mobile MSTs. Only Open MRS has a broad user base, and other EMR systems should consider interoperability and data sharing with larger systems as a priority. Several systems include tablet compatibility, or are specifically designed for smartphone, which may be helpful given the environment and low resource context. Results from this review may be useful to non-government organizations (NGOs) considering modernization of their medical records practices as EMR use facilitates research, decreases paper administration costs, and improves perceptions of professionalism; however, most MST-specific EMRs remain in their early stages, and further development and research is required before reaching the stage of widespread adoption.}, } @article {pmid28030553, year = {2016}, author = {Zhang, N and Yang, X and Zhang, M and Sun, Y}, title = {Crowd-Funding: A New Resource Cooperation Mode for Mobile Cloud Computing.}, journal = {PloS one}, volume = {11}, number = {12}, pages = {e0167657}, doi = {10.1371/journal.pone.0167657}, pmid = {28030553}, issn = {1932-6203}, mesh = {Algorithms ; *Cell Phone ; *Cloud Computing ; *Cooperative Behavior ; Financial Management/*methods ; }, abstract = {Mobile cloud computing, which integrates the cloud computing techniques into the mobile environment, is regarded as one of the enabler technologies for 5G mobile wireless networks. There are many sporadic spare resources distributed within various devices in the networks, which can be used to support mobile cloud applications. However, these devices, with only a few spare resources, cannot support some resource-intensive mobile applications alone. If some of them cooperate with each other and share their resources, then they can support many applications. In this paper, we propose a resource cooperative provision mode referred to as "Crowd-funding", which is designed to aggregate the distributed devices together as the resource provider of mobile applications. Moreover, to facilitate high-efficiency resource management via dynamic resource allocation, different resource providers should be selected to form a stable resource coalition for different requirements. Thus, considering different requirements, we propose two different resource aggregation models for coalition formation. Finally, we may allocate the revenues based on their attributions according to the concept of the "Shapley value" to enable a more impartial revenue share among the cooperators. It is shown that a dynamic and flexible resource-management method can be developed based on the proposed Crowd-funding model, relying on the spare resources in the network.}, } @article {pmid28025586, year = {2016}, author = {Dong, J and Xiao, X and Menarguez, MA and Zhang, G and Qin, Y and Thau, D and Biradar, C and Moore, B}, title = {Mapping paddy rice planting area in northeastern Asia with Landsat 8 images, phenology-based algorithm and Google Earth Engine.}, journal = {Remote sensing of environment}, volume = {185}, number = {}, pages = {142-154}, pmid = {28025586}, issn = {0034-4257}, support = {R01 AI101028/AI/NIAID NIH HHS/United States ; }, abstract = {Area and spatial distribution information of paddy rice are important for understanding of food security, water use, greenhouse gas emission, and disease transmission. Due to climatic warming and increasing food demand, paddy rice has been expanding rapidly in high latitude areas in the last decade, particularly in northeastern (NE) Asia. Current knowledge about paddy rice fields in these cold regions is limited. The phenology- and pixel-based paddy rice mapping (PPPM) algorithm, which identifies the flooding signals in the rice transplanting phase, has been effectively applied in tropical areas, but has not been tested at large scale of cold regions yet. Despite the effects from more snow/ice, paddy rice mapping in high latitude areas is assumed to be more encouraging due to less clouds, lower cropping intensity, and more observations from Landsat sidelaps. Moreover, the enhanced temporal and geographic coverage from Landsat 8 provides an opportunity to acquire phenology information and map paddy rice. This study evaluated the potential of Landsat 8 images on annual paddy rice mapping in NE Asia which was dominated by single cropping system, including Japan, North Korea, South Korea, and NE China. The cloud computing approach was used to process all the available Landsat 8 imagery in 2014 (143 path/rows, ~3290 scenes) with the Google Earth Engine (GEE) platform. The results indicated that the Landsat 8, GEE, and improved PPPM algorithm can effectively support the yearly mapping of paddy rice in NE Asia. The resultant paddy rice map has a high accuracy with the producer (user) accuracy of 73% (92%), based on the validation using very high resolution images and intensive field photos. Geographic characteristics of paddy rice distribution were analyzed from aspects of country, elevation, latitude, and climate. The resultant 30-m paddy rice map is expected to provide unprecedented details about the area, spatial distribution, and landscape pattern of paddy rice fields in NE Asia, which will contribute to food security assessment, water resource management, estimation of greenhouse gas emissions, and disease control.}, } @article {pmid28025200, year = {2017}, author = {Yang, A and Troup, M and Lin, P and Ho, JW}, title = {Falco: a quick and flexible single-cell RNA-seq processing framework on the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {5}, pages = {767-769}, doi = {10.1093/bioinformatics/btw732}, pmid = {28025200}, issn = {1367-4811}, mesh = {Algorithms ; Animals ; Computational Biology/methods ; Dendritic Cells/metabolism ; Gene Expression ; Gene Expression Profiling/*methods ; *Gene Regulatory Networks ; Humans ; Mice ; RNA ; Sequence Analysis, RNA/*methods ; Single-Cell Analysis/*methods ; *Software ; }, abstract = {SUMMARY: Single-cell RNA-seq (scRNA-seq) is increasingly used in a range of biomedical studies. Nonetheless, current RNA-seq analysis tools are not specifically designed to efficiently process scRNA-seq data due to their limited scalability. Here we introduce Falco, a cloud-based framework to enable paralellization of existing RNA-seq processing pipelines using big data technologies of Apache Hadoop and Apache Spark for performing massively parallel analysis of large scale transcriptomic data. Using two public scRNA-seq datasets and two popular RNA-seq alignment/feature quantification pipelines, we show that the same processing pipeline runs 2.6-145.4 times faster using Falco than running on a highly optimized standalone computer. Falco also allows users to utilize low-cost spot instances of Amazon Web Services, providing a ∼65% reduction in cost of analysis.

Falco is available via a GNU General Public License at https://github.com/VCCRI/Falco/.

CONTACT: j.ho@victorchang.edu.au.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid28009205, year = {2016}, author = {Meir, Z and Sikorsky, T and Ben-Shlomi, R and Akerman, N and Dallal, Y and Ozeri, R}, title = {Dynamics of a Ground-State Cooled Ion Colliding with Ultracold Atoms.}, journal = {Physical review letters}, volume = {117}, number = {24}, pages = {243401}, doi = {10.1103/PhysRevLett.117.243401}, pmid = {28009205}, issn = {1079-7114}, abstract = {Ultracold atom-ion mixtures are gaining increasing interest due to their potential applications in ultracold and state-controlled chemistry, quantum computing, and many-body physics. Here, we studied the dynamics of a single ground-state cooled ion during few, to many, Langevin (spiraling) collisions with ultracold atoms. We measured the ion's energy distribution and observed a clear deviation from the Maxwell-Boltzmann distribution, characterized by an exponential tail, to a power-law distribution best described by a Tsallis function. Unlike previous experiments, the energy scale of atom-ion interactions is not determined by either the atomic cloud temperature or the ion's trap residual excess-micromotion energy. Instead, it is determined by the force the atom exerts on the ion during a collision which is then amplified by the trap dynamics. This effect is intrinsic to ion Paul traps and sets the lower bound of atom-ion steady-state interaction energy in these systems. Despite the fact that our system is eventually driven out of the ultracold regime, we are capable of studying quantum effects by limiting the interaction to the first collision when the ion is initialized in the ground state of the trap.}, } @article {pmid27995048, year = {2016}, author = {Mizuno, S and Iwamoto, S and Seki, M and Yamaki, N}, title = {Proposal for optimal placement platform of bikes using queueing networks.}, journal = {SpringerPlus}, volume = {5}, number = {1}, pages = {2071}, pmid = {27995048}, issn = {2193-1801}, abstract = {In recent social experiments, rental motorbikes and rental bicycles have been arranged at nodes, and environments where users can ride these bikes have been improved. When people borrow bikes, they return them to nearby nodes. Some experiments have been conducted using the models of Hamachari of Yokohama, the Niigata Rental Cycle, and Bicing. However, from these experiments, the effectiveness of distributing bikes was unclear, and many models were discontinued midway. Thus, we need to consider whether these models are effectively designed to represent the distribution system. Therefore, we construct a model to arrange the nodes for distributing bikes using a queueing network. To adopt realistic values for our model, we use the Google Maps application program interface. Thus, we can easily obtain values of distance and transit time between nodes in various places in the world. Moreover, we apply the distribution of a population to a gravity model and we compute the effective transition probability for this queueing network. If the arrangement of the nodes and number of bikes at each node is known, we can precisely design the system. We illustrate our system using convenience stores as nodes and optimize the node configuration. As a result, we can optimize simultaneously the number of nodes, node places, and number of bikes for each node, and we can construct a base for a rental cycle business to use our system.}, } @article {pmid27986896, year = {2017}, author = {Grimm, DG and Roqueiro, D and Salomé, PA and Kleeberger, S and Greshake, B and Zhu, W and Liu, C and Lippert, C and Stegle, O and Schölkopf, B and Weigel, D and Borgwardt, KM}, title = {easyGWAS: A Cloud-Based Platform for Comparing the Results of Genome-Wide Association Studies.}, journal = {The Plant cell}, volume = {29}, number = {1}, pages = {5-19}, pmid = {27986896}, issn = {1532-298X}, mesh = {Arabidopsis/genetics/growth & development ; Computational Biology/*methods ; Flowers/genetics/growth & development ; Genome, Plant/*genetics ; Genome-Wide Association Study/*methods ; Genotype ; Humans ; Phenotype ; *Polymorphism, Single Nucleotide ; Reproducibility of Results ; Software ; User-Computer Interface ; }, abstract = {The ever-growing availability of high-quality genotypes for a multitude of species has enabled researchers to explore the underlying genetic architecture of complex phenotypes at an unprecedented level of detail using genome-wide association studies (GWAS). The systematic comparison of results obtained from GWAS of different traits opens up new possibilities, including the analysis of pleiotropic effects. Other advantages that result from the integration of multiple GWAS are the ability to replicate GWAS signals and to increase statistical power to detect such signals through meta-analyses. In order to facilitate the simple comparison of GWAS results, we present easyGWAS, a powerful, species-independent online resource for computing, storing, sharing, annotating, and comparing GWAS. The easyGWAS tool supports multiple species, the uploading of private genotype data and summary statistics of existing GWAS, as well as advanced methods for comparing GWAS results across different experiments and data sets in an interactive and user-friendly interface. easyGWAS is also a public data repository for GWAS data and summary statistics and already includes published data and results from several major GWAS. We demonstrate the potential of easyGWAS with a case study of the model organism Arabidopsis thaliana, using flowering and growth-related traits.}, } @article {pmid27982081, year = {2016}, author = {Tatlow, PJ and Piccolo, SR}, title = {A cloud-based workflow to quantify transcript-expression levels in public cancer compendia.}, journal = {Scientific reports}, volume = {6}, number = {}, pages = {39259}, pmid = {27982081}, issn = {2045-2322}, mesh = {Cell Line, Tumor ; Cloud Computing/*economics ; Computational Biology/economics/methods ; Databases, Factual ; Humans ; Internet ; Neoplasms/genetics/metabolism/*pathology ; RNA, Neoplasm/chemistry/metabolism ; Sequence Analysis, RNA ; *User-Computer Interface ; }, abstract = {Public compendia of sequencing data are now measured in petabytes. Accordingly, it is infeasible for researchers to transfer these data to local computers. Recently, the National Cancer Institute began exploring opportunities to work with molecular data in cloud-computing environments. With this approach, it becomes possible for scientists to take their tools to the data and thereby avoid large data transfers. It also becomes feasible to scale computing resources to the needs of a given analysis. We quantified transcript-expression levels for 12,307 RNA-Sequencing samples from the Cancer Cell Line Encyclopedia and The Cancer Genome Atlas. We used two cloud-based configurations and examined the performance and cost profiles of each configuration. Using preemptible virtual machines, we processed the samples for as little as $0.09 (USD) per sample. As the samples were processed, we collected performance metrics, which helped us track the duration of each processing step and quantified computational resources used at different stages of sample processing. Although the computational demands of reference alignment and expression quantification have decreased considerably, there remains a critical need for researchers to optimize preprocessing steps. We have stored the software, scripts, and processed data in a publicly accessible repository (https://osf.io/gqrz9).}, } @article {pmid27966528, year = {2016}, author = {Marshall, K and Jacobsen, CS and Schäfermeier, C and Gehring, T and Weedbrook, C and Andersen, UL}, title = {Continuous-variable quantum computing on encrypted data.}, journal = {Nature communications}, volume = {7}, number = {}, pages = {13795}, pmid = {27966528}, issn = {2041-1723}, abstract = {The ability to perform computations on encrypted data is a powerful tool for protecting a client's privacy, especially in today's era of cloud and distributed computing. In terms of privacy, the best solutions that classical techniques can achieve are unfortunately not unconditionally secure in the sense that they are dependent on a hacker's computational power. Here we theoretically investigate, and experimentally demonstrate with Gaussian displacement and squeezing operations, a quantum solution that achieves the security of a user's privacy using the practical technology of continuous variables. We demonstrate losses of up to 10 km both ways between the client and the server and show that security can still be achieved. Our approach offers a number of practical benefits (from a quantum perspective) that could one day allow the potential widespread adoption of this quantum technology in future cloud-based computing networks.}, } @article {pmid27942268, year = {2016}, author = {Lampa, S and Alvarsson, J and Spjuth, O}, title = {Towards agile large-scale predictive modelling in drug discovery with flow-based programming design principles.}, journal = {Journal of cheminformatics}, volume = {8}, number = {}, pages = {67}, pmid = {27942268}, issn = {1758-2946}, abstract = {Predictive modelling in drug discovery is challenging to automate as it often contains multiple analysis steps and might involve cross-validation and parameter tuning that create complex dependencies between tasks. With large-scale data or when using computationally demanding modelling methods, e-infrastructures such as high-performance or cloud computing are required, adding to the existing challenges of fault-tolerant automation. Workflow management systems can aid in many of these challenges, but the currently available systems are lacking in the functionality needed to enable agile and flexible predictive modelling. We here present an approach inspired by elements of the flow-based programming paradigm, implemented as an extension of the Luigi system which we name SciLuigi. We also discuss the experiences from using the approach when modelling a large set of biochemical interactions using a shared computer cluster.Graphical abstract.}, } @article {pmid27930676, year = {2016}, author = {Drawert, B and Hellander, A and Bales, B and Banerjee, D and Bellesia, G and Daigle, BJ and Douglas, G and Gu, M and Gupta, A and Hellander, S and Horuk, C and Nath, D and Takkar, A and Wu, S and Lötstedt, P and Krintz, C and Petzold, LR}, title = {Stochastic Simulation Service: Bridging the Gap between the Computational Expert and the Biologist.}, journal = {PLoS computational biology}, volume = {12}, number = {12}, pages = {e1005220}, pmid = {27930676}, issn = {1553-7358}, support = {R01 EB014877/EB/NIBIB NIH HHS/United States ; R01 GM113241/GM/NIGMS NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; *Computer Simulation ; *Software ; *Stochastic Processes ; }, abstract = {We present StochSS: Stochastic Simulation as a Service, an integrated development environment for modeling and simulation of both deterministic and discrete stochastic biochemical systems in up to three dimensions. An easy to use graphical user interface enables researchers to quickly develop and simulate a biological model on a desktop or laptop, which can then be expanded to incorporate increasing levels of complexity. StochSS features state-of-the-art simulation engines. As the demand for computational power increases, StochSS can seamlessly scale computing resources in the cloud. In addition, StochSS can be deployed as a multi-user software environment where collaborators share computational resources and exchange models via a public model repository. We demonstrate the capabilities and ease of use of StochSS with an example of model development and simulation at increasing levels of complexity.}, } @article {pmid27921034, year = {2016}, author = {Liebeskind, DS}, title = {Crowdsourcing Precision Cerebrovascular Health: Imaging and Cloud Seeding A Million Brains Initiative™.}, journal = {Frontiers in medicine}, volume = {3}, number = {}, pages = {62}, pmid = {27921034}, issn = {2296-858X}, abstract = {Crowdsourcing, an unorthodox approach in medicine, creates an unusual paradigm to study precision cerebrovascular health, eliminating the relative isolation and non-standardized nature of current imaging data infrastructure, while shifting emphasis to the astounding capacity of big data in the cloud. This perspective envisions the use of imaging data of the brain and vessels to orient and seed A Million Brains Initiative™ that may leapfrog incremental advances in stroke and rapidly provide useful data to the sizable population around the globe prone to the devastating effects of stroke and vascular substrates of dementia. Despite such variability in the type of data available and other limitations, the data hierarchy logically starts with imaging and can be enriched with almost endless types and amounts of other clinical and biological data. Crowdsourcing allows an individual to contribute to aggregated data on a population, while preserving their right to specific information about their own brain health. The cloud now offers endless storage, computing prowess, and neuroimaging applications for postprocessing that is searchable and scalable. Collective expertise is a windfall of the crowd in the cloud and particularly valuable in an area such as cerebrovascular health. The rise of precision medicine, rapidly evolving technological capabilities of cloud computing and the global imperative to limit the public health impact of cerebrovascular disease converge in the imaging of A Million Brains Initiative™. Crowdsourcing secure data on brain health may provide ultimate generalizability, enable focused analyses, facilitate clinical practice, and accelerate research efforts.}, } @article {pmid27917360, year = {2016}, author = {Zhou, X and Lin, F and Yang, L and Nie, J and Tan, Q and Zeng, W and Zhang, N}, title = {Load balancing prediction method of cloud storage based on analytic hierarchy process and hybrid hierarchical genetic algorithm.}, journal = {SpringerPlus}, volume = {5}, number = {1}, pages = {1989}, pmid = {27917360}, issn = {2193-1801}, abstract = {With the continuous expansion of the cloud computing platform scale and rapid growth of users and applications, how to efficiently use system resources to improve the overall performance of cloud computing has become a crucial issue. To address this issue, this paper proposes a method that uses an analytic hierarchy process group decision (AHPGD) to evaluate the load state of server nodes. Training was carried out by using a hybrid hierarchical genetic algorithm (HHGA) for optimizing a radial basis function neural network (RBFNN). The AHPGD makes the aggregative indicator of virtual machines in cloud, and become input parameters of predicted RBFNN. Also, this paper proposes a new dynamic load balancing scheduling algorithm combined with a weighted round-robin algorithm, which uses the predictive periodical load value of nodes based on AHPPGD and RBFNN optimized by HHGA, then calculates the corresponding weight values of nodes and makes constant updates. Meanwhile, it keeps the advantages and avoids the shortcomings of static weighted round-robin algorithm.}, } @article {pmid27897984, year = {2016}, author = {Xie, Q and Wang, L}, title = {Privacy-Preserving Location-Based Service Scheme for Mobile Sensing Data.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {12}, pages = {}, pmid = {27897984}, issn = {1424-8220}, abstract = {With the wide use of mobile sensing application, more and more location-embedded data are collected and stored in mobile clouds, such as iCloud, Samsung cloud, etc. Using these data, the cloud service provider (CSP) can provide location-based service (LBS) for users. However, the mobile cloud is untrustworthy. The privacy concerns force the sensitive locations to be stored on the mobile cloud in an encrypted form. However, this brings a great challenge to utilize these data to provide efficient LBS. To solve this problem, we propose a privacy-preserving LBS scheme for mobile sensing data, based on the RSA (for Rivest, Shamir and Adleman) algorithm and ciphertext policy attribute-based encryption (CP-ABE) scheme. The mobile cloud can perform location distance computing and comparison efficiently for authorized users, without location privacy leakage. In the end, theoretical security analysis and experimental evaluation demonstrate that our scheme is secure against the chosen plaintext attack (CPA) and efficient enough for practical applications in terms of user side computation overhead.}, } @article {pmid27869704, year = {2016}, author = {Shang, F and Jiang, Y and Xiong, A and Su, W and He, L}, title = {A Node Localization Algorithm Based on Multi-Granularity Regional Division and the Lagrange Multiplier Method in Wireless Sensor Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {11}, pages = {}, pmid = {27869704}, issn = {1424-8220}, abstract = {With the integrated development of the Internet, wireless sensor technology, cloud computing, and mobile Internet, there has been a lot of attention given to research about and applications of the Internet of Things. A Wireless Sensor Network (WSN) is one of the important information technologies in the Internet of Things; it integrates multi-technology to detect and gather information in a network environment by mutual cooperation, using a variety of methods to process and analyze data, implement awareness, and perform tests. This paper mainly researches the localization algorithm of sensor nodes in a wireless sensor network. Firstly, a multi-granularity region partition is proposed to divide the location region. In the range-based method, the RSSI (Received Signal Strength indicator, RSSI) is used to estimate distance. The optimal RSSI value is computed by the Gaussian fitting method. Furthermore, a Voronoi diagram is characterized by the use of dividing region. Rach anchor node is regarded as the center of each region; the whole position region is divided into several regions and the sub-region of neighboring nodes is combined into triangles while the unknown node is locked in the ultimate area. Secondly, the multi-granularity regional division and Lagrange multiplier method are used to calculate the final coordinates. Because nodes are influenced by many factors in the practical application, two kinds of positioning methods are designed. When the unknown node is inside positioning unit, we use the method of vector similarity. Moreover, we use the centroid algorithm to calculate the ultimate coordinates of unknown node. When the unknown node is outside positioning unit, we establish a Lagrange equation containing the constraint condition to calculate the first coordinates. Furthermore, we use the Taylor expansion formula to correct the coordinates of the unknown node. In addition, this localization method has been validated by establishing the real environment.}, } @article {pmid27835667, year = {2016}, author = {Kinoshita, M and Higashihara, E and Kawano, H and Higashiyama, R and Koga, D and Fukui, T and Gondo, N and Oka, T and Kawahara, K and Rigo, K and Hague, T and Katsuragi, K and Sudo, K and Takeshi, M and Horie, S and Nutahara, K}, title = {Technical Evaluation: Identification of Pathogenic Mutations in PKD1 and PKD2 in Patients with Autosomal Dominant Polycystic Kidney Disease by Next-Generation Sequencing and Use of a Comprehensive New Classification System.}, journal = {PloS one}, volume = {11}, number = {11}, pages = {e0166288}, pmid = {27835667}, issn = {1932-6203}, mesh = {Adult ; Codon, Nonsense ; DNA Mutational Analysis/methods ; Frameshift Mutation ; Gene Rearrangement ; Genetic Testing/methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Multiplex Polymerase Chain Reaction/methods ; *Mutation ; Mutation, Missense ; Polycystic Kidney, Autosomal Dominant/diagnosis/*genetics ; RNA Splice Sites/genetics ; Reproducibility of Results ; Sensitivity and Specificity ; TRPP Cation Channels/*genetics ; }, abstract = {Genetic testing of PKD1 and PKD2 is expected to play an increasingly important role in determining allelic influences in autosomal dominant polycystic kidney disease (ADPKD) in the near future. However, to date, genetic testing is not commonly employed because it is expensive, complicated because of genetic heterogeneity, and does not easily identify pathogenic variants. In this study, we developed a genetic testing system based on next-generation sequencing (NGS), long-range polymerase chain reaction, and a new software package. The new software package integrated seven databases and provided access to five cloud-based computing systems. The database integrated 241 polymorphic nonpathogenic variants detected in 140 healthy Japanese volunteers aged >35 years, who were confirmed by ultrasonography as having no cysts in either kidney. Using this system, we identified 60 novel and 30 known pathogenic mutations in 101 Japanese patients with ADPKD, with an overall detection rate of 89.1% (90/101) [95% confidence interval (CI), 83.0%-95.2%]. The sensitivity of the system increased to 93.1% (94/101) (95% CI, 88.1%-98.0%) when combined with multiplex ligation-dependent probe amplification analysis, making it sufficient for use in a clinical setting. In 82 (87.2%) of the patients, pathogenic mutations were detected in PKD1 (95% CI, 79.0%-92.5%), whereas in 12 (12.8%) patients pathogenic mutations were detected in PKD2 (95% CI, 7.5%-21.0%); this is consistent with previously reported findings. In addition, we were able to reconfirm our pathogenic mutation identification results using Sanger sequencing. In conclusion, we developed a high-sensitivity NGS-based system and successfully employed it to identify pathogenic mutations in PKD1 and PKD2 in Japanese patients with ADPKD.}, } @article {pmid27834660, year = {2017}, author = {Ghasemi, R and Al Aziz, MM and Mohammed, N and Dehkordi, MH and Jiang, X}, title = {Private and Efficient Query Processing on Outsourced Genomic Databases.}, journal = {IEEE journal of biomedical and health informatics}, volume = {21}, number = {5}, pages = {1466-1472}, pmid = {27834660}, issn = {2168-2208}, support = {R21 LM012060/LM/NLM NIH HHS/United States ; U01 EB023685/EB/NIBIB NIH HHS/United States ; R00 LM011392/LM/NLM NIH HHS/United States ; R01 HG007078/HG/NHGRI NIH HHS/United States ; R13 HG009072/HG/NHGRI NIH HHS/United States ; R01 GM118609/GM/NIGMS NIH HHS/United States ; }, mesh = {Cloud Computing ; *Computer Security ; Databases, Genetic ; Genomics/*standards ; Humans ; Medical Informatics/*standards ; Outsourced Services/*standards ; *Privacy ; }, abstract = {Applications of genomic studies are spreading rapidly in many domains of science and technology such as healthcare, biomedical research, direct-to-consumer services, and legal and forensic. However, there are a number of obstacles that make it hard to access and process a big genomic database for these applications. First, sequencing genomic sequence is a time consuming and expensive process. Second, it requires large-scale computation and storage systems to process genomic sequences. Third, genomic databases are often owned by different organizations, and thus, not available for public usage. Cloud computing paradigm can be leveraged to facilitate the creation and sharing of big genomic databases for these applications. Genomic data owners can outsource their databases in a centralized cloud server to ease the access of their databases. However, data owners are reluctant to adopt this model, as it requires outsourcing the data to an untrusted cloud service provider that may cause data breaches. In this paper, we propose a privacy-preserving model for outsourcing genomic data to a cloud. The proposed model enables query processing while providing privacy protection of genomic databases. Privacy of the individuals is guaranteed by permuting and adding fake genomic records in the database. These techniques allow cloud to evaluate count and top-k queries securely and efficiently. Experimental results demonstrate that a count and a top-k query over 40 Single Nucleotide Polymorphisms (SNPs) in a database of 20 000 records takes around 100 and 150 s, respectively.}, } @article {pmid27814027, year = {2017}, author = {Xie, L and Draizen, EJ and Bourne, PE}, title = {Harnessing Big Data for Systems Pharmacology.}, journal = {Annual review of pharmacology and toxicology}, volume = {57}, number = {}, pages = {245-262}, pmid = {27814027}, issn = {1545-4304}, support = {R01 LM011986/LM/NLM NIH HHS/United States ; }, mesh = {Animals ; *Data Interpretation, Statistical ; Databases, Factual/*statistics & numerical data ; High-Throughput Screening Assays/methods/trends ; Humans ; Pharmacology, Clinical/*methods/trends ; Systems Biology/*methods/trends ; }, abstract = {Systems pharmacology aims to holistically understand mechanisms of drug actions to support drug discovery and clinical practice. Systems pharmacology modeling (SPM) is data driven. It integrates an exponentially growing amount of data at multiple scales (genetic, molecular, cellular, organismal, and environmental). The goal of SPM is to develop mechanistic or predictive multiscale models that are interpretable and actionable. The current explosions in genomics and other omics data, as well as the tremendous advances in big data technologies, have already enabled biologists to generate novel hypotheses and gain new knowledge through computational models of genome-wide, heterogeneous, and dynamic data sets. More work is needed to interpret and predict a drug response phenotype, which is dependent on many known and unknown factors. To gain a comprehensive understanding of drug actions, SPM requires close collaborations between domain experts from diverse fields and integration of heterogeneous models from biophysics, mathematics, statistics, machine learning, and semantic webs. This creates challenges in model management, model integration, model translation, and knowledge integration. In this review, we discuss several emergent issues in SPM and potential solutions using big data technology and analytics. The concurrent development of high-throughput techniques, cloud computing, data science, and the semantic web will likely allow SPM to be findable, accessible, interoperable, reusable, reliable, interpretable, and actionable.}, } @article {pmid27810005, year = {2016}, author = {, and , }, title = {To the Cloud! A Grassroots Proposal to Accelerate Brain Science Discovery.}, journal = {Neuron}, volume = {92}, number = {3}, pages = {622-627}, pmid = {27810005}, issn = {1097-4199}, support = {P20 GM103472/GM/NIGMS NIH HHS/United States ; R01 MH094524/MH/NIMH NIH HHS/United States ; R01 EB006841/EB/NIBIB NIH HHS/United States ; R01 NS075531/NS/NINDS NIH HHS/United States ; R01 EB020407/EB/NIBIB NIH HHS/United States ; }, mesh = {Cloud Computing/*trends ; Humans ; Information Systems/*trends ; Neurosciences/methods/*organization & administration ; }, abstract = {The revolution in neuroscientific data acquisition is creating an analysis challenge. We propose leveraging cloud-computing technologies to enable large-scale neurodata storing, exploring, analyzing, and modeling. This utility will empower scientists globally to generate and test theories of brain function and dysfunction.}, } @article {pmid27801987, year = {2017}, author = {Boles, NC and Stone, T and Bergeron, C and Kiehl, TR}, title = {Big Data access and infrastructure for modern biology: case studies in data repository utility.}, journal = {Annals of the New York Academy of Sciences}, volume = {1387}, number = {1}, pages = {112-123}, doi = {10.1111/nyas.13281}, pmid = {27801987}, issn = {1749-6632}, mesh = {Access to Information ; Animals ; Biomedical Research/*methods/trends ; *Cloud Computing/trends ; *Computer Communication Networks/instrumentation/trends ; Data Mining/methods/trends ; Decision Making, Computer-Assisted ; Genomics/methods/trends ; Humans ; Image Processing, Computer-Assisted ; Internet ; Software ; Systems Biology/instrumentation/*methods/trends ; }, abstract = {Big Data is no longer solely the purview of big organizations with big resources. Today's routine tools and experimental methods can generate large slices of data. For example, high-throughput sequencing can quickly interrogate biological systems for the expression levels of thousands of different RNAs, examine epigenetic marks throughout the genome, and detect differences in the genomes of individuals. Multichannel electrophysiology platforms produce gigabytes of data in just a few minutes of recording. Imaging systems generate videos capturing biological behaviors over the course of days. Thus, any researcher now has access to a veritable wealth of data. However, the ability of any given researcher to utilize that data is limited by her/his own resources and skills for downloading, storing, and analyzing the data. In this paper, we examine the necessary resources required to engage Big Data, survey the state of modern data analysis pipelines, present a few data repository case studies, and touch on current institutions and programs supporting the work that relies on Big Data.}, } @article {pmid27801869, year = {2016}, author = {Zhou, L and Chen, N and Yuan, S and Chen, Z}, title = {An Efficient Method of Sharing Mass Spatio-Temporal Trajectory Data Based on Cloudera Impala for Traffic Distribution Mapping in an Urban City.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {11}, pages = {}, pmid = {27801869}, issn = {1424-8220}, abstract = {The efficient sharing of spatio-temporal trajectory data is important to understand traffic congestion in mass data. However, the data volumes of bus networks in urban cities are growing rapidly, reaching daily volumes of one hundred million datapoints. Accessing and retrieving mass spatio-temporal trajectory data in any field is hard and inefficient due to limited computational capabilities and incomplete data organization mechanisms. Therefore, we propose an optimized and efficient spatio-temporal trajectory data retrieval method based on the Cloudera Impala query engine, called ESTRI, to enhance the efficiency of mass data sharing. As an excellent query tool for mass data, Impala can be applied for mass spatio-temporal trajectory data sharing. In ESTRI we extend the spatio-temporal trajectory data retrieval function of Impala and design a suitable data partitioning method. In our experiments, the Taiyuan BeiDou (BD) bus network is selected, containing 2300 buses with BD positioning sensors, producing 20 million records every day, resulting in two difficulties as described in the Introduction section. In addition, ESTRI and MongoDB are applied in experiments. The experiments show that ESTRI achieves the most efficient data retrieval compared to retrieval using MongoDB for data volumes of fifty million, one hundred million, one hundred and fifty million, and two hundred million. The performance of ESTRI is approximately seven times higher than that of MongoDB. The experiments show that ESTRI is an effective method for retrieving mass spatio-temporal trajectory data. Finally, bus distribution mapping in Taiyuan city is achieved, describing the buses density in different regions at different times throughout the day, which can be applied in future studies of transport, such as traffic scheduling, traffic planning and traffic behavior management in intelligent public transportation systems.}, } @article {pmid27797761, year = {2017}, author = {Penha, ED and Iriabho, E and Dussaq, A and de Oliveira, DM and Almeida, JS}, title = {Isomorphic semantic mapping of variant call format (VCF2RDF).}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {4}, pages = {547-548}, pmid = {27797761}, issn = {1367-4811}, support = {U24 CA180924/CA/NCI NIH HHS/United States ; R01 LM011119/LM/NLM NIH HHS/United States ; R01 LM009239/LM/NLM NIH HHS/United States ; }, mesh = {*Genetic Variation ; *Genome ; Genomics/methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Information Storage and Retrieval ; Semantics ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {SUMMARY: The move of computational genomics workflows to Cloud Computing platforms is associated with a new level of integration and interoperability that challenges existing data representation formats. The Variant Calling Format (VCF) is in a particularly sensitive position in that regard, with both clinical and consumer-facing analysis tools relying on this self-contained description of genomic variation in Next Generation Sequencing (NGS) results. In this report we identify an isomorphic map between VCF and the reference Resource Description Framework. RDF is advanced by the World Wide Web Consortium (W3C) to enable representations of linked data that are both distributed and discoverable. The resulting ability to decompose VCF reports of genomic variation without loss of context addresses the need to modularize and govern NGS pipelines for Precision Medicine. Specifically, it provides the flexibility (i.e. the indexing) needed to support the wide variety of clinical scenarios and patient-facing governance where only part of the VCF data is fitting.

Software libraries with a claim to be both domain-facing and consumer-facing have to pass the test of portability across the variety of devices that those consumers in fact adopt. That is, ideally the implementation should itself take place within the space defined by web technologies. Consequently, the isomorphic mapping function was implemented in JavaScript, and was tested in a variety of environments and devices, client and server side alike. These range from web browsers in mobile phones to the most popular micro service platform, NodeJS. The code is publicly available at https://github.com/ibl/VCFr , with a live deployment at: http://ibl.github.io/VCFr/ .

CONTACT: jonas.almeida@stonybrookmedicine.edu.}, } @article {pmid27378292, year = {2016}, author = {Weinmaier, T and Platzer, A and Frank, J and Hellinger, HJ and Tischler, P and Rattei, T}, title = {ConsPred: a rule-based (re-)annotation framework for prokaryotic genomes.}, journal = {Bioinformatics (Oxford, England)}, volume = {32}, number = {21}, pages = {3327-3329}, doi = {10.1093/bioinformatics/btw393}, pmid = {27378292}, issn = {1367-4811}, support = {P 27703/FWF_/Austrian Science Fund FWF/Austria ; }, mesh = {Algorithms ; *Genome ; *Prokaryotic Cells ; *Software ; }, abstract = {MOTIVATION: The rapidly growing number of available prokaryotic genome sequences requires fully automated and high-quality software solutions for their initial and re-annotation. Here we present ConsPred, a prokaryotic genome annotation framework that performs intrinsic gene predictions, homology searches, predictions of non-coding genes as well as CRISPR repeats and integrates all evidence into a consensus annotation. ConsPred achieves comprehensive, high-quality annotations based on rules and priorities, similar to decision-making in manual curation and avoids conflicting predictions. Parameters controlling the annotation process are configurable by the user. ConsPred has been used in the institutions of the authors for longer than 5 years and can easily be extended and adapted to specific needs.

SUMMARY: The ConsPred algorithm for producing a consensus from the varying scores of multiple gene prediction programs approaches manual curation in accuracy. Its rule-based approach for choosing final predictions avoids overriding previous manual curations.

ConsPred is implemented in Java, Perl and Shell and is freely available under the Creative Commons license as a stand-alone in-house pipeline or as an Amazon Machine Image for cloud computing, see https://sourceforge.net/projects/conspred/.

CONTACT: thomas.rattei@univie.ac.atSupplementary information: Supplementary data are available at Bioinformatics online.}, } @article {pmid27784829, year = {2017}, author = {Thiel, S and Mitchell, J and Williams, J}, title = {Coordination or Collision? The Intersection of Diabetes Care, Cybersecurity, and Cloud-Based Computing.}, journal = {Journal of diabetes science and technology}, volume = {11}, number = {2}, pages = {195-197}, doi = {10.1177/1932296816676189}, pmid = {27784829}, issn = {1932-2968}, mesh = {*Cloud Computing ; *Computer Security ; Diabetes Mellitus/*therapy ; Humans ; *Pancreas, Artificial ; }, abstract = {Diagnosis and treatment of diabetes changed little from the Middle Ages through the early 19th century, when the first chemical test for the condition was developed. In the 20th century, advances in diabetes management gained momentum with home-use diagnostic devices and mass-produced insulin. In the 21st century, technological developments around diabetes are advancing so rapidly that a small, discrete system of medical devices that serve as an artificial pancreas are now possible. In this article, we assert that medical device interoperability and cyber security are necessary preconditions for safe, effective, and reliable widespread use of the artificial pancreas system.}, } @article {pmid26439830, year = {2016}, author = {Welch, J and Kanter, B and Skora, B and McCombie, S and Henry, I and McCombie, D and Kennedy, R and Soller, B}, title = {Multi-parameter vital sign database to assist in alarm optimization for general care units.}, journal = {Journal of clinical monitoring and computing}, volume = {30}, number = {6}, pages = {895-900}, pmid = {26439830}, issn = {1573-2614}, mesh = {Blood Pressure ; Blood Pressure Determination/methods ; *Clinical Alarms ; Cloud Computing ; Computer Simulation ; *Databases, Factual ; Equipment Failure ; Heart Rate ; Hospitals ; Humans ; Medical Informatics ; Monitoring, Physiologic/*methods ; Respiratory Rate ; *Vital Signs ; Wireless Technology ; }, abstract = {Continual vital sign assessment on the general care, medical-surgical floor is expected to provide early indication of patient deterioration and increase the effectiveness of rapid response teams. However, there is concern that continual, multi-parameter vital sign monitoring will produce alarm fatigue. The objective of this study was the development of a methodology to help care teams optimize alarm settings. An on-body wireless monitoring system was used to continually assess heart rate, respiratory rate, SpO2 and noninvasive blood pressure in the general ward of ten hospitals between April 1, 2014 and January 19, 2015. These data, 94,575 h for 3430 patients are contained in a large database, accessible with cloud computing tools. Simulation scenarios assessed the total alarm rate as a function of threshold and annunciation delay (s). The total alarm rate of ten alarms/patient/day predicted from the cloud-hosted database was the same as the total alarm rate for a 10 day evaluation (1550 h for 36 patients) in an independent hospital. Plots of vital sign distributions in the cloud-hosted database were similar to other large databases published by different authors. The cloud-hosted database can be used to run simulations for various alarm thresholds and annunciation delays to predict the total alarm burden experienced by nursing staff. This methodology might, in the future, be used to help reduce alarm fatigue without sacrificing the ability to continually monitor all vital signs.}, } @article {pmid27783315, year = {2016}, author = {Liu, L and Chen, W and Nie, M and Zhang, F and Wang, Y and He, A and Wang, X and Yan, G}, title = {iMAGE cloud: medical image processing as a service for regional healthcare in a hybrid cloud environment.}, journal = {Environmental health and preventive medicine}, volume = {21}, number = {6}, pages = {563-571}, pmid = {27783315}, issn = {1347-4715}, mesh = {China ; Cloud Computing/*statistics & numerical data ; Electronic Health Records/*instrumentation ; *Image Processing, Computer-Assisted ; *Software ; }, abstract = {OBJECTIVES: To handle the emergence of the regional healthcare ecosystem, physicians and surgeons in various departments and healthcare institutions must process medical images securely, conveniently, and efficiently, and must integrate them with electronic medical records (EMRs). In this manuscript, we propose a software as a service (SaaS) cloud called the iMAGE cloud.

METHODS: A three-layer hybrid cloud was created to provide medical image processing services in the smart city of Wuxi, China, in April 2015. In the first step, medical images and EMR data were received and integrated via the hybrid regional healthcare network. Then, traditional and advanced image processing functions were proposed and computed in a unified manner in the high-performance cloud units. Finally, the image processing results were delivered to regional users using the virtual desktop infrastructure (VDI) technology. Security infrastructure was also taken into consideration.

RESULTS: Integrated information query and many advanced medical image processing functions-such as coronary extraction, pulmonary reconstruction, vascular extraction, intelligent detection of pulmonary nodules, image fusion, and 3D printing-were available to local physicians and surgeons in various departments and healthcare institutions.

CONCLUSIONS: Implementation results indicate that the iMAGE cloud can provide convenient, efficient, compatible, and secure medical image processing services in regional healthcare networks. The iMAGE cloud has been proven to be valuable in applications in the regional healthcare system, and it could have a promising future in the healthcare system worldwide.}, } @article {pmid27782052, year = {2016}, author = {Fraga-Lamas, P and Fernández-Caramés, TM and Suárez-Albela, M and Castedo, L and González-López, M}, title = {A Review on Internet of Things for Defense and Public Safety.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {10}, pages = {}, pmid = {27782052}, issn = {1424-8220}, mesh = {*Civil Defense ; Cloud Computing ; Computer Communication Networks ; Electrocardiography, Ambulatory ; Humans ; *Internet ; Remote Sensing Technology ; }, abstract = {The Internet of Things (IoT) is undeniably transforming the way that organizations communicate and organize everyday businesses and industrial procedures. Its adoption has proven well suited for sectors that manage a large number of assets and coordinate complex and distributed processes. This survey analyzes the great potential for applying IoT technologies (i.e., data-driven applications or embedded automation and intelligent adaptive systems) to revolutionize modern warfare and provide benefits similar to those in industry. It identifies scenarios where Defense and Public Safety (PS) could leverage better commercial IoT capabilities to deliver greater survivability to the warfighter or first responders, while reducing costs and increasing operation efficiency and effectiveness. This article reviews the main tactical requirements and the architecture, examining gaps and shortcomings in existing IoT systems across the military field and mission-critical scenarios. The review characterizes the open challenges for a broad deployment and presents a research roadmap for enabling an affordable IoT for defense and PS.}, } @article {pmid27781238, year = {2016}, author = {Ermakova, T and Fabian, B and Zarnekow, R}, title = {Improving Individual Acceptance of Health Clouds through Confidentiality Assurance.}, journal = {Applied clinical informatics}, volume = {7}, number = {4}, pages = {983-993}, pmid = {27781238}, issn = {1869-0327}, mesh = {Adult ; *Cloud Computing ; Confidentiality/*psychology ; Electronic Health Records ; Female ; Humans ; Male ; Patient Acceptance of Health Care/*psychology ; Surveys and Questionnaires ; }, abstract = {BACKGROUND: Cloud computing promises to essentially improve healthcare delivery performance. However, shifting sensitive medical records to third-party cloud providers could create an adoption hurdle because of security and privacy concerns.

OBJECTIVES: This study examines the effect of confidentiality assurance in a cloud-computing environment on individuals' willingness to accept the infrastructure for inter-organizational sharing of medical data.

METHODS: We empirically investigate our research question by a survey with over 260 full responses. For the setting with a high confidentiality assurance, we base on a recent multi-cloud architecture which provides very high confidentiality assurance through a secret-sharing mechanism: Health information is cryptographically encoded and distributed in a way that no single and no small group of cloud providers is able to decode it.

RESULTS: Our results indicate the importance of confidentiality assurance in individuals' acceptance of health clouds for sensitive medical data. Specifically, this finding holds for a variety of practically relevant circumstances, i.e., in the absence and despite the presence of conventional offline alternatives and along with pseudonymization. On the other hand, we do not find support for the effect of confidentiality assurance in individuals' acceptance of health clouds for non-sensitive medical data. These results could support the process of privacy engineering for health-cloud solutions.}, } @article {pmid27639895, year = {2016}, author = {Sahi, A and Lai, D and Li, Y}, title = {Security and privacy preserving approaches in the eHealth clouds with disaster recovery plan.}, journal = {Computers in biology and medicine}, volume = {78}, number = {}, pages = {1-8}, doi = {10.1016/j.compbiomed.2016.09.003}, pmid = {27639895}, issn = {1879-0534}, mesh = {*Cloud Computing ; *Computer Security ; *Electronic Health Records ; Health Records, Personal ; Humans ; Information Storage and Retrieval ; *Privacy ; Telemedicine/standards ; }, abstract = {Cloud computing was introduced as an alternative storage and computing model in the health sector as well as other sectors to handle large amounts of data. Many healthcare companies have moved their electronic data to the cloud in order to reduce in-house storage, IT development and maintenance costs. However, storing the healthcare records in a third-party server may cause serious storage, security and privacy issues. Therefore, many approaches have been proposed to preserve security as well as privacy in cloud computing projects. Cryptographic-based approaches were presented as one of the best ways to ensure the security and privacy of healthcare data in the cloud. Nevertheless, the cryptographic-based approaches which are used to transfer health records safely remain vulnerable regarding security, privacy, or the lack of any disaster recovery strategy. In this paper, we review the related work on security and privacy preserving as well as disaster recovery in the eHealth cloud domain. Then we propose two approaches, the Security-Preserving approach and the Privacy-Preserving approach, and a disaster recovery plan. The Security-Preserving approach is a robust means of ensuring the security and integrity of Electronic Health Records, and the Privacy-Preserving approach is an efficient authentication approach which protects the privacy of Personal Health Records. Finally, we discuss how the integrated approaches and the disaster recovery plan can ensure the reliability and security of cloud projects.}, } @article {pmid27766951, year = {2016}, author = {Liu, Y and Khan, SM and Wang, J and Rynge, M and Zhang, Y and Zeng, S and Chen, S and Maldonado Dos Santos, JV and Valliyodan, B and Calyam, PP and Merchant, N and Nguyen, HT and Xu, D and Joshi, T}, title = {PGen: large-scale genomic variations analysis workflow and browser in SoyKB.}, journal = {BMC bioinformatics}, volume = {17}, number = {Suppl 13}, pages = {337}, pmid = {27766951}, issn = {1471-2105}, mesh = {*Genome, Plant ; Genomics/methods ; High-Throughput Nucleotide Sequencing/methods ; *Polymorphism, Genetic ; Sequence Analysis, DNA/*methods ; *Software ; Glycine max/*genetics ; Workflow ; }, abstract = {BACKGROUND: With the advances in next-generation sequencing (NGS) technology and significant reductions in sequencing costs, it is now possible to sequence large collections of germplasm in crops for detecting genome-scale genetic variations and to apply the knowledge towards improvements in traits. To efficiently facilitate large-scale NGS resequencing data analysis of genomic variations, we have developed "PGen", an integrated and optimized workflow using the Extreme Science and Engineering Discovery Environment (XSEDE) high-performance computing (HPC) virtual system, iPlant cloud data storage resources and Pegasus workflow management system (Pegasus-WMS). The workflow allows users to identify single nucleotide polymorphisms (SNPs) and insertion-deletions (indels), perform SNP annotations and conduct copy number variation analyses on multiple resequencing datasets in a user-friendly and seamless way.

RESULTS: We have developed both a Linux version in GitHub (https://github.com/pegasus-isi/PGen-GenomicVariations-Workflow) and a web-based implementation of the PGen workflow integrated within the Soybean Knowledge Base (SoyKB), (http://soykb.org/Pegasus/index.php). Using PGen, we identified 10,218,140 single-nucleotide polymorphisms (SNPs) and 1,398,982 indels from analysis of 106 soybean lines sequenced at 15X coverage. 297,245 non-synonymous SNPs and 3330 copy number variation (CNV) regions were identified from this analysis. SNPs identified using PGen from additional soybean resequencing projects adding to 500+ soybean germplasm lines in total have been integrated. These SNPs are being utilized for trait improvement using genotype to phenotype prediction approaches developed in-house. In order to browse and access NGS data easily, we have also developed an NGS resequencing data browser (http://soykb.org/NGS_Resequence/NGS_index.php) within SoyKB to provide easy access to SNP and downstream analysis results for soybean researchers.

CONCLUSION: PGen workflow has been optimized for the most efficient analysis of soybean data using thorough testing and validation. This research serves as an example of best practices for development of genomics data analysis workflows by integrating remote HPC resources and efficient data management with ease of use for biological users. PGen workflow can also be easily customized for analysis of data in other species.}, } @article {pmid27755563, year = {2016}, author = {Charlebois, K and Palmour, N and Knoppers, BM}, title = {The Adoption of Cloud Computing in the Field of Genomics Research: The Influence of Ethical and Legal Issues.}, journal = {PloS one}, volume = {11}, number = {10}, pages = {e0164347}, pmid = {27755563}, issn = {1932-6203}, mesh = {Cloud Computing/*ethics/*legislation & jurisprudence ; Computer Security ; Databases, Factual/ethics ; *Genomics/ethics ; Humans ; Information Storage and Retrieval/ethics ; Interviews as Topic ; Research ; }, abstract = {This study aims to understand the influence of the ethical and legal issues on cloud computing adoption in the field of genomics research. To do so, we adapted Diffusion of Innovation (DoI) theory to enable understanding of how key stakeholders manage the various ethical and legal issues they encounter when adopting cloud computing. Twenty semi-structured interviews were conducted with genomics researchers, patient advocates and cloud service providers. Thematic analysis generated five major themes: 1) Getting comfortable with cloud computing; 2) Weighing the advantages and the risks of cloud computing; 3) Reconciling cloud computing with data privacy; 4) Maintaining trust and 5) Anticipating the cloud by creating the conditions for cloud adoption. Our analysis highlights the tendency among genomics researchers to gradually adopt cloud technology. Efforts made by cloud service providers to promote cloud computing adoption are confronted by researchers' perpetual cost and security concerns, along with a lack of familiarity with the technology. Further underlying those fears are researchers' legal responsibility with respect to the data that is stored on the cloud. Alternative consent mechanisms aimed at increasing patients' control over the use of their data also provide a means to circumvent various institutional and jurisdictional hurdles that restrict access by creating siloed databases. However, the risk of creating new, cloud-based silos may run counter to the goal in genomics research to increase data sharing on a global scale.}, } @article {pmid27754388, year = {2016}, author = {Pérez-Torres, R and Torres-Huitzil, C and Galeana-Zapién, H}, title = {Full On-Device Stay Points Detection in Smartphones for Location-Based Mobile Applications.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {10}, pages = {}, pmid = {27754388}, issn = {1424-8220}, abstract = {The tracking of frequently visited places, also known as stay points, is a critical feature in location-aware mobile applications as a way to adapt the information and services provided to smartphones users according to their moving patterns. Location based applications usually employ the GPS receiver along with Wi-Fi hot-spots and cellular cell tower mechanisms for estimating user location. Typically, fine-grained GPS location data are collected by the smartphone and transferred to dedicated servers for trajectory analysis and stay points detection. Such Mobile Cloud Computing approach has been successfully employed for extending smartphone's battery lifetime by exchanging computation costs, assuming that on-device stay points detection is prohibitive. In this article, we propose and validate the feasibility of having an alternative event-driven mechanism for stay points detection that is executed fully on-device, and that provides higher energy savings by avoiding communication costs. Our solution is encapsulated in a sensing middleware for Android smartphones, where a stream of GPS location updates is collected in the background, supporting duty cycling schemes, and incrementally analyzed following an event-driven paradigm for stay points detection. To evaluate the performance of the proposed middleware, real world experiments were conducted under different stress levels, validating its power efficiency when compared against a Mobile Cloud Computing oriented solution.}, } @article {pmid27754378, year = {2016}, author = {Solano, A and Dormido, R and Duro, N and González, V}, title = {One-Time URL: A Proximity Security Mechanism between Internet of Things and Mobile Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {10}, pages = {}, pmid = {27754378}, issn = {1424-8220}, abstract = {The aim of this paper is to determine the physical proximity of connected things when they are accessed from a smartphone. Links between connected things and mobile communication devices are temporarily created by means of dynamic URLs (uniform resource locators) which may be easily discovered with pervasive short-range radio frequency technologies available on smartphones. In addition, a multi cross domain silent logging mechanism to allow people to interact with their surrounding connected things from their mobile communication devices is presented. The proposed mechanisms are based in web standards technologies, evolving our social network of Internet of Things towards the so-called Web of Things.}, } @article {pmid27743241, year = {2016}, author = {Tahmasbi, A and Adabi, S and Rezaee, A}, title = {Behavioral Reference Model for Pervasive Healthcare Systems.}, journal = {Journal of medical systems}, volume = {40}, number = {12}, pages = {270}, pmid = {27743241}, issn = {1573-689X}, mesh = {Cloud Computing ; Health Information Exchange ; Humans ; Information Storage and Retrieval/*methods ; Information Systems/*organization & administration ; Reproducibility of Results ; *Software Design ; Telemedicine/*organization & administration ; }, abstract = {The emergence of mobile healthcare systems is an important outcome of application of pervasive computing concepts for medical care purposes. These systems provide the facilities and infrastructure required for automatic and ubiquitous sharing of medical information. Healthcare systems have a dynamic structure and configuration, therefore having an architecture is essential for future development of these systems. The need for increased response rate, problem limited storage, accelerated processing and etc. the tendency toward creating a new generation of healthcare system architecture highlight the need for further focus on cloud-based solutions for transfer data and data processing challenges. Integrity and reliability of healthcare systems are of critical importance, as even the slightest error may put the patients' lives in danger; therefore acquiring a behavioral model for these systems and developing the tools required to model their behaviors are of significant importance. The high-level designs may contain some flaws, therefor the system must be fully examined for different scenarios and conditions. This paper presents a software architecture for development of healthcare systems based on pervasive computing concepts, and then models the behavior of described system. A set of solutions are then proposed to improve the design's qualitative characteristics including, availability, interoperability and performance.}, } @article {pmid27733153, year = {2016}, author = {Tang, H and Jiang, X and Wang, X and Wang, S and Sofia, H and Fox, D and Lauter, K and Malin, B and Telenti, A and Xiong, L and Ohno-Machado, L}, title = {Protecting genomic data analytics in the cloud: state of the art and opportunities.}, journal = {BMC medical genomics}, volume = {9}, number = {1}, pages = {63}, pmid = {27733153}, issn = {1755-8794}, support = {R21 LM012060/LM/NLM NIH HHS/United States ; U54 HL108460/HL/NHLBI NIH HHS/United States ; R00 LM011392/LM/NLM NIH HHS/United States ; R01 HG007078/HG/NHGRI NIH HHS/United States ; R00 HG008175/HG/NHGRI NIH HHS/United States ; }, mesh = {*Cloud Computing ; *Computer Security ; Genome-Wide Association Study ; *Genomics ; }, abstract = {The outsourcing of genomic data into public cloud computing settings raises concerns over privacy and security. Significant advancements in secure computation methods have emerged over the past several years, but such techniques need to be rigorously evaluated for their ability to support the analysis of human genomic data in an efficient and cost-effective manner. With respect to public cloud environments, there are concerns about the inadvertent exposure of human genomic data to unauthorized users. In analyses involving multiple institutions, there is additional concern about data being used beyond agreed research scope and being prcoessed in untrused computational environments, which may not satisfy institutional policies. To systematically investigate these issues, the NIH-funded National Center for Biomedical Computing iDASH (integrating Data for Analysis, 'anonymization' and SHaring) hosted the second Critical Assessment of Data Privacy and Protection competition to assess the capacity of cryptographic technologies for protecting computation over human genomes in the cloud and promoting cross-institutional collaboration. Data scientists were challenged to design and engineer practical algorithms for secure outsourcing of genome computation tasks in working software, whereby analyses are performed only on encrypted data. They were also challenged to develop approaches to enable secure collaboration on data from genomic studies generated by multiple organizations (e.g., medical centers) to jointly compute aggregate statistics without sharing individual-level records. The results of the competition indicated that secure computation techniques can enable comparative analysis of human genomes, but greater efficiency (in terms of compute time and memory utilization) are needed before they are sufficiently practical for real world environments.}, } @article {pmid27730389, year = {2016}, author = {Zeadally, S and Isaac, JT and Baig, Z}, title = {Security Attacks and Solutions in Electronic Health (E-health) Systems.}, journal = {Journal of medical systems}, volume = {40}, number = {12}, pages = {263}, pmid = {27730389}, issn = {1573-689X}, mesh = {Computer Security/*standards ; *Confidentiality ; Electronic Health Records/organization & administration ; Health Information Exchange/standards ; Humans ; Internet ; Remote Sensing Technology/methods/standards ; Telemedicine/*organization & administration/standards ; Wireless Technology/*organization & administration/standards ; }, abstract = {For centuries, healthcare has been a basic service provided by many governments to their citizens. Over the past few decades, we have witnessed a significant transformation in the quality of healthcare services provided by healthcare organizations and professionals. Recent advances have led to the emergence of Electronic Health (E-health), largely made possible by the massive deployment and adoption of information and communication technologies (ICTs). However, cybercriminals and attackers are exploiting vulnerabilities associated primarily with ICTs, causing data breaches of patients' confidential digital health information records. Here, we review recent security attacks reported for E-healthcare and discuss the solutions proposed to mitigate them. We also identify security challenges that must be addressed by E-health system designers and implementers in the future, to respond to threats that could arise as E-health systems become integrated with technologies such as cloud computing, the Internet of Things, and smart cities.}, } @article {pmid27727225, year = {2016}, author = {Datta, S and Bettinger, K and Snyder, M}, title = {Corrigendum: Secure cloud computing for genomic data.}, journal = {Nature biotechnology}, volume = {34}, number = {10}, pages = {1072}, doi = {10.1038/nbt1016-1072c}, pmid = {27727225}, issn = {1546-1696}, } @article {pmid27722978, year = {2016}, author = {Jiang, H and Li, X and Xu, Q}, title = {An Improvement to a Multi-Client Searchable Encryption Scheme for Boolean Queries.}, journal = {Journal of medical systems}, volume = {40}, number = {12}, pages = {255}, doi = {10.1007/s10916-016-0610-6}, pmid = {27722978}, issn = {1573-689X}, mesh = {*Algorithms ; Cloud Computing ; Computer Security/*instrumentation ; *Confidentiality ; *Health Information Exchange ; Humans ; Information Storage and Retrieval ; }, abstract = {The migration of e-health systems to the cloud computing brings huge benefits, as same as some security risks. Searchable Encryption(SE) is a cryptography encryption scheme that can protect the confidentiality of data and utilize the encrypted data at the same time. The SE scheme proposed by Cash et al. in Crypto2013 and its follow-up work in CCS2013 are most practical SE Scheme that support Boolean queries at present. In their scheme, the data user has to generate the search tokens by the counter number one by one and interact with server repeatedly, until he meets the correct one, or goes through plenty of tokens to illustrate that there is no search result. In this paper, we make an improvement to their scheme. We allow server to send back some information and help the user to generate exact search token in the search phase. In our scheme, there are only two round interaction between server and user, and the search token has [Formula: see text] elements, where n is the keywords number in query expression, and [Formula: see text] is the minimum documents number that contains one of keyword in query expression, and the computation cost of server is [Formula: see text] modular exponentiation operation.}, } @article {pmid27714562, year = {2016}, author = {Eom, J and Lee, DH and Lee, K}, title = {Patient-Controlled Attribute-Based Encryption for Secure Electronic Health Records System.}, journal = {Journal of medical systems}, volume = {40}, number = {12}, pages = {253}, pmid = {27714562}, issn = {1573-689X}, mesh = {Algorithms ; Cloud Computing ; Computer Security/*instrumentation ; *Confidentiality ; Electronic Health Records/*instrumentation ; Humans ; *Patient Access to Records ; }, abstract = {In recent years, many countries have been trying to integrate electronic health data managed by each hospital to offer more efficient healthcare services. Since health data contain sensitive information of patients, there have been much research that present privacy preserving mechanisms. However, existing studies either require a patient to perform various steps to secure the data or restrict the patient to exerting control over the data. In this paper, we propose patient-controlled attribute-based encryption, which enables a patient (a data owner) to control access to the health data and reduces the operational burden for the patient, simultaneously. With our method, the patient has powerful control capability of his/her own health data in that he/she has the final say on the access with time limitation. In addition, our scheme provides emergency medical services which allow the emergency staffs to access the health data without the patient's permission only in the case of emergencies. We prove that our scheme is secure under cryptographic assumptions and analyze its efficiency from the patient's perspective.}, } @article {pmid30800319, year = {2016}, author = {Lin, CH and Chen, WL and Li, CM and Wu, MJ and Huang, PT and Chen, YS}, title = {Assistive technology using integrated flexible sensor and virtual alarm unit for blood leakage detection during dialysis therapy.}, journal = {Healthcare technology letters}, volume = {3}, number = {4}, pages = {290-296}, pmid = {30800319}, issn = {2053-3713}, abstract = {Blood leakages and blood loss are both serious complications during dialysis therapies. According to dialysis survey reports, these events are life-threatening issues for nephrology nurses, medical staff, and patients. When venous needle dislodgement occurs, it takes only <2.5 min of reaction time for blood loss in an adult patient, resulting in mortality. As an early-warning design, a wireless assistive technology using an integrated flexible sensor and virtual alarm unit was developed to detect blood leakage during dialysis therapies. The flexible sensor was designed using a screen print technique with printing electronic circuits on a plastic substrate. A self-organising algorithm was used to design a virtual alarm unit, consisting of a virtual direct current grid and a virtual alarm driver. In other words, this warning device was employed to identify the blood leakage levels via wireless fidelity wireless network in cloud computing. The feasibility was verified, and commercialisation designs can also be implemented in an embedded system.}, } @article {pmid27689562, year = {2017}, author = {Kruse, CS and Frederick, B and Jacobson, T and Monticone, DK}, title = {Cybersecurity in healthcare: A systematic review of modern threats and trends.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {25}, number = {1}, pages = {1-10}, doi = {10.3233/THC-161263}, pmid = {27689562}, issn = {1878-7401}, mesh = {Computer Security/*standards ; *Confidentiality ; Crime ; Delivery of Health Care/*organization & administration/standards ; Health Insurance Portability and Accountability Act ; Humans ; United States ; }, abstract = {BACKGROUND: The adoption of healthcare technology is arduous, and it requires planning and implementation time. Healthcare organizations are vulnerable to modern trends and threats because it has not kept up with threats.

OBJECTIVE: The objective of this systematic review is to identify cybersecurity trends, including ransomware, and identify possible solutions by querying academic literature.

METHODS: The reviewers conducted three separate searches through the CINAHL and PubMed (MEDLINE) and the Nursing and Allied Health Source via ProQuest databases. Using key words with Boolean operators, database filters, and hand screening, we identified 31 articles that met the objective of the review.

RESULTS: The analysis of 31 articles showed the healthcare industry lags behind in security. Like other industries, healthcare should clearly define cybersecurity duties, establish clear procedures for upgrading software and handling a data breach, use VLANs and deauthentication and cloud-based computing, and to train their users not to open suspicious code.

CONCLUSIONS: The healthcare industry is a prime target for medical information theft as it lags behind other leading industries in securing vital data. It is imperative that time and funding is invested in maintaining and ensuring the protection of healthcare technology and the confidentially of patient information from unauthorized access.}, } @article {pmid27681008, year = {2017}, author = {Fu, D and Liu, Y}, title = {Using cloud models of heartbeats as the entity identifier to secure mobile devices.}, journal = {Journal of medical engineering & technology}, volume = {41}, number = {1}, pages = {36-45}, doi = {10.1080/03091902.2016.1210684}, pmid = {27681008}, issn = {1464-522X}, mesh = {*Biometric Identification ; Electrocardiography ; Electrodes ; Female ; *Heart Sounds ; Humans ; Male ; *Models, Biological ; *Smartphone ; }, abstract = {Mobile devices are extensively used to store more private and often sensitive information. Therefore, it is important to protect them against unauthorised access. Authentication ensures that authorised users can use mobile devices. However, traditional authentication methods, such as numerical or graphic passwords, are vulnerable to passive attacks. For example, an adversary can steal the password by snooping from a shorter distance. To avoid these problems, this study presents a biometric approach that uses cloud models of heartbeats as the entity identifier to secure mobile devices. Here, it is identified that these concepts including cloud model or cloud have nothing to do with cloud computing. The cloud model appearing in the study is the cognitive model. In the proposed method, heartbeats are collected by two ECG electrodes that are connected to one mobile device. The backward normal cloud generator is used to generate ECG standard cloud models characterising the heartbeat template. When a user tries to have access to their mobile device, cloud models regenerated by fresh heartbeats will be compared with ECG standard cloud models to determine if the current user can use this mobile device. This authentication method was evaluated from three aspects including accuracy, authentication time and energy consumption. The proposed method gives 86.04% of true acceptance rate with 2.73% of false acceptance rate. One authentication can be done in 6s, and this processing consumes about 2000 mW of power.}, } @article {pmid27679478, year = {2017}, author = {Zhang, Y and Aevermann, BD and Anderson, TK and Burke, DF and Dauphin, G and Gu, Z and He, S and Kumar, S and Larsen, CN and Lee, AJ and Li, X and Macken, C and Mahaffey, C and Pickett, BE and Reardon, B and Smith, T and Stewart, L and Suloway, C and Sun, G and Tong, L and Vincent, AL and Walters, B and Zaremba, S and Zhao, H and Zhou, L and Zmasek, C and Klem, EB and Scheuermann, RH}, title = {Influenza Research Database: An integrated bioinformatics resource for influenza virus research.}, journal = {Nucleic acids research}, volume = {45}, number = {D1}, pages = {D466-D474}, pmid = {27679478}, issn = {1362-4962}, support = {HHSN272201400028C/AI/NIAID NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; *Databases, Factual ; *Influenza A virus/classification/physiology ; Molecular Typing/methods ; Phenotype ; Phylogeny ; *Research ; *Software ; Viral Proteins/genetics ; Virulence ; }, abstract = {The Influenza Research Database (IRD) is a U.S. National Institute of Allergy and Infectious Diseases (NIAID)-sponsored Bioinformatics Resource Center dedicated to providing bioinformatics support for influenza virus research. IRD facilitates the research and development of vaccines, diagnostics and therapeutics against influenza virus by providing a comprehensive collection of influenza-related data integrated from various sources, a growing suite of analysis and visualization tools for data mining and hypothesis generation, personal workbench spaces for data storage and sharing, and active user community support. Here, we describe the recent improvements in IRD including the use of cloud and high performance computing resources, analysis and visualization of user-provided sequence data with associated metadata, predictions of novel variant proteins, annotations of phenotype-associated sequence markers and their predicted phenotypic effects, hemagglutinin (HA) clade classifications, an automated tool for HA subtype numbering conversion, linkouts to disease event data and the addition of host factor and antiviral drug components. All data and tools are freely available without restriction from the IRD website at https://www.fludb.org.}, } @article {pmid27655341, year = {2017}, author = {Stockton, DB and Santamaria, F}, title = {Automating NEURON Simulation Deployment in Cloud Resources.}, journal = {Neuroinformatics}, volume = {15}, number = {1}, pages = {51-70}, pmid = {27655341}, issn = {1559-0089}, support = {G12 MD007591/MD/NIMHD NIH HHS/United States ; }, mesh = {Algorithms ; *Cloud Computing ; *Computer Simulation ; *Computing Methodologies ; Humans ; Internet ; Neurons/physiology ; *Software ; User-Computer Interface ; }, abstract = {Simulations in neuroscience are performed on local servers or High Performance Computing (HPC) facilities. Recently, cloud computing has emerged as a potential computational platform for neuroscience simulation. In this paper we compare and contrast HPC and cloud resources for scientific computation, then report how we deployed NEURON, a widely used simulator of neuronal activity, in three clouds: Chameleon Cloud, a hybrid private academic cloud for cloud technology research based on the OpenStack software; Rackspace, a public commercial cloud, also based on OpenStack; and Amazon Elastic Cloud Computing, based on Amazon's proprietary software. We describe the manual procedures and how to automate cloud operations. We describe extending our simulation automation software called NeuroManager (Stockton and Santamaria, Frontiers in Neuroinformatics, 2015), so that the user is capable of recruiting private cloud, public cloud, HPC, and local servers simultaneously with a simple common interface. We conclude by performing several studies in which we examine speedup, efficiency, total session time, and cost for sets of simulations of a published NEURON model.}, } @article {pmid27653042, year = {2016}, author = {Guo, C and Zhuang, R and Jie, Y and Ren, Y and Wu, T and Choo, KR}, title = {Fine-grained Database Field Search Using Attribute-Based Encryption for E-Healthcare Clouds.}, journal = {Journal of medical systems}, volume = {40}, number = {11}, pages = {235}, doi = {10.1007/s10916-016-0588-0}, pmid = {27653042}, issn = {1573-689X}, mesh = {*Cloud Computing ; Computer Security/*instrumentation ; *Confidentiality ; Electronic Health Records/*instrumentation ; Humans ; Information Storage and Retrieval ; Telemedicine/*instrumentation ; }, abstract = {An effectively designed e-healthcare system can significantly enhance the quality of access and experience of healthcare users, including facilitating medical and healthcare providers in ensuring a smooth delivery of services. Ensuring the security of patients' electronic health records (EHRs) in the e-healthcare system is an active research area. EHRs may be outsourced to a third-party, such as a community healthcare cloud service provider for storage due to cost-saving measures. Generally, encrypting the EHRs when they are stored in the system (i.e. data-at-rest) or prior to outsourcing the data is used to ensure data confidentiality. Searchable encryption (SE) scheme is a promising technique that can ensure the protection of private information without compromising on performance. In this paper, we propose a novel framework for controlling access to EHRs stored in semi-trusted cloud servers (e.g. a private cloud or a community cloud). To achieve fine-grained access control for EHRs, we leverage the ciphertext-policy attribute-based encryption (CP-ABE) technique to encrypt tables published by hospitals, including patients' EHRs, and the table is stored in the database with the primary key being the patient's unique identity. Our framework can enable different users with different privileges to search on different database fields. Differ from previous attempts to secure outsourcing of data, we emphasize the control of the searches of the fields within the database. We demonstrate the utility of the scheme by evaluating the scheme using datasets from the University of California, Irvine.}, } @article {pmid27643925, year = {2016}, author = {Zhang, Y and Wang, L and Feng, Z and Cheng, H and McGuire, TF and Ding, Y and Cheng, T and Gao, Y and Xie, XQ}, title = {StemCellCKB: An Integrated Stem Cell-Specific Chemogenomics KnowledgeBase for Target Identification and Systems-Pharmacology Research.}, journal = {Journal of chemical information and modeling}, volume = {56}, number = {10}, pages = {1995-2004}, pmid = {27643925}, issn = {1549-960X}, support = {P30 DA035778/DA/NIDA NIH HHS/United States ; R21 HL109654/HL/NHLBI NIH HHS/United States ; UL1 TR000005/TR/NCATS NIH HHS/United States ; }, mesh = {Cloud Computing ; Computational Biology/*methods ; Databases, Factual ; Drug Discovery/*methods ; Humans ; Knowledge Bases ; Models, Molecular ; Protein Interaction Maps/drug effects ; Signal Transduction/drug effects ; *Stem Cells/chemistry/cytology/drug effects/metabolism ; }, abstract = {Given the capacity of self-renewal and multilineage differentiation, stem cells are promising sources for use in regenerative medicines as well as in the clinical treatment of certain hematological malignancies and degenerative diseases. Complex networks of cellular signaling pathways largely determine stem cell fate and function. Small molecules that modulate these pathways can provide important biological and pharmacological insights. However, it is still challenging to identify the specific protein targets of these compounds, to explore the changes in stem cell phenotypes induced by compound treatment and to ascertain compound mechanisms of action. To facilitate stem cell related small molecule study and provide a better understanding of the associated signaling pathways, we have constructed a comprehensive domain-specific chemogenomics resource, called StemCellCKB (http://www.cbligand.org/StemCellCKB/). This new cloud-computing platform describes the chemical molecules, genes, proteins, and signaling pathways implicated in stem cell regulation. StemCellCKB is also implemented with web applications designed specifically to aid in the identification of stem cell relevant protein targets, including TargetHunter, a machine-learning algorithm for predicting small molecule targets based on molecular fingerprints, and HTDocking, a high-throughput docking module for target prediction and systems-pharmacology analyses. We have systematically tested StemCellCKB to verify data integrity. Target-prediction accuracy has also been validated against the reported known target/compound associations. This proof-of-concept example demonstrates that StemCellCKB can (1) accurately predict the macromolecular targets of existing stem cell modulators and (2) identify novel small molecules capable of probing stem cell signaling mechanisms, for use in systems-pharmacology studies. StemCellCKB facilitates the exploration and exchange of stem cell chemogenomics data among members of the broader research community.}, } @article {pmid27628727, year = {2016}, author = {Sareen, S and Sood, SK and Gupta, SK}, title = {An Automatic Prediction of Epileptic Seizures Using Cloud Computing and Wireless Sensor Networks.}, journal = {Journal of medical systems}, volume = {40}, number = {11}, pages = {226}, pmid = {27628727}, issn = {1573-689X}, mesh = {*Algorithms ; *Cloud Computing ; Computer Security ; Electroencephalography/*methods ; Epilepsy/*diagnosis/physiopathology ; Geographic Information Systems ; Humans ; Monitoring, Ambulatory/*methods ; Seizures/diagnosis/physiopathology ; Smartphone ; Telemetry/*methods ; Wireless Technology ; }, abstract = {Epilepsy is one of the most common neurological disorders which is characterized by the spontaneous and unforeseeable occurrence of seizures. An automatic prediction of seizure can protect the patients from accidents and save their life. In this article, we proposed a mobile-based framework that automatically predict seizures using the information contained in electroencephalography (EEG) signals. The wireless sensor technology is used to capture the EEG signals of patients. The cloud-based services are used to collect and analyze the EEG data from the patient's mobile phone. The features from the EEG signal are extracted using the fast Walsh-Hadamard transform (FWHT). The Higher Order Spectral Analysis (HOSA) is applied to FWHT coefficients in order to select the features set relevant to normal, preictal and ictal states of seizure. We subsequently exploit the selected features as input to a k-means classifier to detect epileptic seizure states in a reasonable time. The performance of the proposed model is tested on Amazon EC2 cloud and compared in terms of execution time and accuracy. The findings show that with selected HOS based features, we were able to achieve a classification accuracy of 94.6 %.}, } @article {pmid27624491, year = {2016}, author = {Xu, X and Zhong, M and Wan, J and Yi, M and Gao, T}, title = {Health Monitoring and Management for Manufacturing Workers in Adverse Working Conditions.}, journal = {Journal of medical systems}, volume = {40}, number = {10}, pages = {222}, pmid = {27624491}, issn = {1573-689X}, mesh = {Algorithms ; Cloud Computing ; Environmental Monitoring/*methods ; Humans ; *Industry ; Occupational Exposure/*adverse effects/*analysis ; Wireless Technology ; *Workplace ; }, abstract = {In adverse working conditions, environmental parameters such as metallic dust, noise, and environmental temperature, directly affect the health condition of manufacturing workers. It is therefore important to implement health monitoring and management based on important physiological parameters (e.g., heart rate, blood pressure, and body temperature). In recent years, new technologies, such as body area networks, cloud computing, and smart clothing, have allowed the improvement of the quality of services. In this article, we first give five-layer architecture for health monitoring and management of manufacturing workers. Then, we analyze the system implementation process, including environmental data processing, physical condition monitoring and system services and management, and present the corresponding algorithms. Finally, we carry out an evaluation and analysis from the perspective of insurance and compensation for manufacturing workers in adverse working conditions. The proposed scheme will contribute to the improvement of workplace conditions, realize health monitoring and management, and protect the interests of manufacturing workers.}, } @article {pmid27614348, year = {2017}, author = {David, M and Dursi, LJ and Yao, D and Boutros, PC and Simpson, JT}, title = {Nanocall: an open source basecaller for Oxford Nanopore sequencing data.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {1}, pages = {49-55}, pmid = {27614348}, issn = {1367-4811}, support = {//CIHR/Canada ; }, mesh = {DNA/*analysis ; Escherichia coli/genetics ; Humans ; Polymerase Chain Reaction ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {MOTIVATION: The highly portable Oxford Nanopore MinION sequencer has enabled new applications of genome sequencing directly in the field. However, the MinION currently relies on a cloud computing platform, Metrichor (metrichor.com), for translating locally generated sequencing data into basecalls.

RESULTS: To allow offline and private analysis of MinION data, we created Nanocall. Nanocall is the first freely available, open-source basecaller for Oxford Nanopore sequencing data and does not require an internet connection. Using R7.3 chemistry, on two E.coli and two human samples, with natural as well as PCR-amplified DNA, Nanocall reads have ∼68% identity, directly comparable to Metrichor '1D' data. Further, Nanocall is efficient, processing ∼2500 Kbp of sequence per core hour using the fastest settings, and fully parallelized. Using a 4 core desktop computer, Nanocall could basecall a MinION sequencing run in real time. Metrichor provides the ability to integrate the '1D' sequencing of template and complement strands of a single DNA molecule, and create a '2D' read. Nanocall does not currently integrate this technology, and addition of this capability will be an important future development. In summary, Nanocall is the first open-source, freely available, off-line basecaller for Oxford Nanopore sequencing data.

Nanocall is available at github.com/mateidavid/nanocall, released under the MIT license.

CONTACT: matei.david@oicr.on.caSupplementary information: Supplementary data are available at Bioinformatics online.}, } @article {pmid27612449, year = {2016}, author = {Huang, Z and Rustagi, N and Veeraraghavan, N and Carroll, A and Gibbs, R and Boerwinkle, E and Venkata, MG and Yu, F}, title = {A hybrid computational strategy to address WGS variant analysis in >5000 samples.}, journal = {BMC bioinformatics}, volume = {17}, number = {1}, pages = {361}, pmid = {27612449}, issn = {1471-2105}, support = {HHSN268201100012C/HL/NHLBI NIH HHS/United States ; HHSN268201100009I/HL/NHLBI NIH HHS/United States ; HHSN268201100010C/HL/NHLBI NIH HHS/United States ; HHSN268201100008C/HL/NHLBI NIH HHS/United States ; U01 HL080295/HL/NHLBI NIH HHS/United States ; HHSN268201500001C/HL/NHLBI NIH HHS/United States ; HHSN268201100005G/HL/NHLBI NIH HHS/United States ; HHSN268201100008I/HL/NHLBI NIH HHS/United States ; HHSN268201100007C/HL/NHLBI NIH HHS/United States ; N01 HC015103/HC/NHLBI NIH HHS/United States ; HHSN268201100011I/HL/NHLBI NIH HHS/United States ; HHSN268201100011C/HL/NHLBI NIH HHS/United States ; N01 HC085085/HC/NHLBI NIH HHS/United States ; N01HC55222/HL/NHLBI NIH HHS/United States ; N01HC85086/HL/NHLBI NIH HHS/United States ; HHSN268201100006C/HL/NHLBI NIH HHS/United States ; HHSN268201200036C/HL/NHLBI NIH HHS/United States ; HHSN268201100005I/HL/NHLBI NIH HHS/United States ; HHSN268201500001I/HL/NHLBI NIH HHS/United States ; N01 HC085084/HC/NHLBI NIH HHS/United States ; N01HC85082/HL/NHLBI NIH HHS/United States ; N01HC75150/HL/NHLBI NIH HHS/United States ; R01 HG008115/HG/NHGRI NIH HHS/United States ; HHSN268201100009C/HL/NHLBI NIH HHS/United States ; N01HC85083/HL/NHLBI NIH HHS/United States ; HHSN268201100005C/HL/NHLBI NIH HHS/United States ; N01HC25195/HL/NHLBI NIH HHS/United States ; HHSN268201100007I/HL/NHLBI NIH HHS/United States ; R01 AG023629/AG/NIA NIH HHS/United States ; N01 HC045133/HC/NHLBI NIH HHS/United States ; N01HC85080/HL/NHLBI NIH HHS/United States ; N01 HC035129/HC/NHLBI NIH HHS/United States ; N01HC85081/HL/NHLBI NIH HHS/United States ; }, mesh = {Databases, Genetic ; *Genome, Human ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; }, abstract = {BACKGROUND: The decreasing costs of sequencing are driving the need for cost effective and real time variant calling of whole genome sequencing data. The scale of these projects are far beyond the capacity of typical computing resources available with most research labs. Other infrastructures like the cloud AWS environment and supercomputers also have limitations due to which large scale joint variant calling becomes infeasible, and infrastructure specific variant calling strategies either fail to scale up to large datasets or abandon joint calling strategies.

RESULTS: We present a high throughput framework including multiple variant callers for single nucleotide variant (SNV) calling, which leverages hybrid computing infrastructure consisting of cloud AWS, supercomputers and local high performance computing infrastructures. We present a novel binning approach for large scale joint variant calling and imputation which can scale up to over 10,000 samples while producing SNV callsets with high sensitivity and specificity. As a proof of principle, we present results of analysis on Cohorts for Heart And Aging Research in Genomic Epidemiology (CHARGE) WGS freeze 3 dataset in which joint calling, imputation and phasing of over 5300 whole genome samples was produced in under 6 weeks using four state-of-the-art callers. The callers used were SNPTools, GATK-HaplotypeCaller, GATK-UnifiedGenotyper and GotCloud. We used Amazon AWS, a 4000-core in-house cluster at Baylor College of Medicine, IBM power PC Blue BioU at Rice and Rhea at Oak Ridge National Laboratory (ORNL) for the computation. AWS was used for joint calling of 180 TB of BAM files, and ORNL and Rice supercomputers were used for the imputation and phasing step. All other steps were carried out on the local compute cluster. The entire operation used 5.2 million core hours and only transferred a total of 6 TB of data across the platforms.

CONCLUSIONS: Even with increasing sizes of whole genome datasets, ensemble joint calling of SNVs for low coverage data can be accomplished in a scalable, cost effective and fast manner by using heterogeneous computing platforms without compromising on the quality of variants.}, } @article {pmid27610326, year = {2016}, author = {Lin, CY and Kao, YH and Lee, WB and Chen, RC}, title = {An efficient reversible privacy-preserving data mining technology over data streams.}, journal = {SpringerPlus}, volume = {5}, number = {1}, pages = {1407}, pmid = {27610326}, issn = {2193-1801}, abstract = {With the popularity of smart handheld devices and the emergence of cloud computing, users and companies can save various data, which may contain private data, to the cloud. Topics relating to data security have therefore received much attention. This study focuses on data stream environments and uses the concept of a sliding window to design a reversible privacy-preserving technology to process continuous data in real time, known as a continuous reversible privacy-preserving (CRP) algorithm. Data with CRP algorithm protection can be accurately recovered through a data recovery process. In addition, by using an embedded watermark, the integrity of the data can be verified. The results from the experiments show that, compared to existing algorithms, CRP is better at preserving knowledge and is more effective in terms of reducing information loss and privacy disclosure risk. In addition, it takes far less time for CRP to process continuous data than existing algorithms. As a result, CRP is confirmed as suitable for data stream environments and fulfills the requirements of being lightweight and energy-efficient for smart handheld devices.}, } @article {pmid27606547, year = {2017}, author = {Tran, NTL and Huang, CH}, title = {Cloud-based MOTIFSIM: Detecting Similarity in Large DNA Motif Data Sets.}, journal = {Journal of computational biology : a journal of computational molecular cell biology}, volume = {24}, number = {5}, pages = {450-459}, doi = {10.1089/cmb.2016.0080}, pmid = {27606547}, issn = {1557-8666}, mesh = {Algorithms ; Cloud Computing ; DNA/chemistry/*genetics ; Databases, Genetic ; Nucleotide Motifs ; Sequence Analysis, DNA/*methods ; Web Browser ; }, abstract = {We developed the cloud-based MOTIFSIM on Amazon Web Services (AWS) cloud. The tool is an extended version from our web-based tool version 2.0, which was developed based on a novel algorithm for detecting similarity in multiple DNA motif data sets. This cloud-based version further allows researchers to exploit the computing resources available from AWS to detect similarity in multiple large-scale DNA motif data sets resulting from the next-generation sequencing technology. The tool is highly scalable with expandable AWS.}, } @article {pmid27592709, year = {2017}, author = {Nellore, A and Collado-Torres, L and Jaffe, AE and Alquicira-Hernández, J and Wilks, C and Pritt, J and Morton, J and Leek, JT and Langmead, B}, title = {Rail-RNA: scalable analysis of RNA-seq splicing and coverage.}, journal = {Bioinformatics (Oxford, England)}, volume = {33}, number = {24}, pages = {4033-4040}, pmid = {27592709}, issn = {1367-4811}, support = {R01 GM105705/GM/NIGMS NIH HHS/United States ; UL1 TR001079/TR/NCATS NIH HHS/United States ; }, mesh = {Exons ; Gene Expression Profiling ; *RNA Splicing ; Sequence Alignment/*methods ; Sequence Analysis, RNA/*methods ; *Software ; }, abstract = {MOTIVATION: RNA sequencing (RNA-seq) experiments now span hundreds to thousands of samples. Current spliced alignment software is designed to analyze each sample separately. Consequently, no information is gained from analyzing multiple samples together, and it requires extra work to obtain analysis products that incorporate data from across samples.

RESULTS: We describe Rail-RNA, a cloud-enabled spliced aligner that analyzes many samples at once. Rail-RNA eliminates redundant work across samples, making it more efficient as samples are added. For many samples, Rail-RNA is more accurate than annotation-assisted aligners. We use Rail-RNA to align 667 RNA-seq samples from the GEUVADIS project on Amazon Web Services in under 16 h for US$0.91 per sample. Rail-RNA outputs alignments in SAM/BAM format; but it also outputs (i) base-level coverage bigWigs for each sample; (ii) coverage bigWigs encoding normalized mean and median coverages at each base across samples analyzed; and (iii) exon-exon splice junctions and indels (features) in columnar formats that juxtapose coverages in samples in which a given feature is found. Supplementary outputs are ready for use with downstream packages for reproducible statistical analysis. We use Rail-RNA to identify expressed regions in the GEUVADIS samples and show that both annotated and unannotated (novel) expressed regions exhibit consistent patterns of variation across populations and with respect to known confounding variables.

Rail-RNA is open-source software available at http://rail.bio.

CONTACTS: anellore@gmail.com or langmea@cs.jhu.edu.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid27589753, year = {2016}, author = {Liu, Q and Cai, W and Jin, D and Shen, J and Fu, Z and Liu, X and Linge, N}, title = {Estimation Accuracy on Execution Time of Run-Time Tasks in a Heterogeneous Distributed Environment.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {9}, pages = {}, pmid = {27589753}, issn = {1424-8220}, abstract = {Distributed Computing has achieved tremendous development since cloud computing was proposed in 2006, and played a vital role promoting rapid growth of data collecting and analysis models, e.g., Internet of things, Cyber-Physical Systems, Big Data Analytics, etc. Hadoop has become a data convergence platform for sensor networks. As one of the core components, MapReduce facilitates allocating, processing and mining of collected large-scale data, where speculative execution strategies help solve straggler problems. However, there is still no efficient solution for accurate estimation on execution time of run-time tasks, which can affect task allocation and distribution in MapReduce. In this paper, task execution data have been collected and employed for the estimation. A two-phase regression (TPR) method is proposed to predict the finishing time of each task accurately. Detailed data of each task have drawn interests with detailed analysis report being made. According to the results, the prediction accuracy of concurrent tasks' execution time can be improved, in particular for some regular jobs.}, } @article {pmid27580831, year = {2016}, author = {Kayyali, R and Savickas, V and Spruit, MA and Kaimakamis, E and Siva, R and Costello, RW and Chang, J and Pierscionek, B and Davies, N and Vaes, AW and Paradiso, R and Philip, N and Perantoni, E and D'Arcy, S and Raptopoulos, A and Nabhani-Gebara, S}, title = {Qualitative investigation into a wearable system for chronic obstructive pulmonary disease: the stakeholders' perspective.}, journal = {BMJ open}, volume = {6}, number = {8}, pages = {e011657}, pmid = {27580831}, issn = {2044-6055}, mesh = {Aged ; Aged, 80 and over ; Comorbidity ; Female ; Focus Groups ; Greece ; Humans ; Interviews as Topic ; Ireland ; Male ; Middle Aged ; Mobile Applications ; Monitoring, Physiologic/*methods ; Netherlands ; Pulmonary Disease, Chronic Obstructive/*diagnosis ; Qualitative Research ; *Stakeholder Participation ; Telemedicine/methods ; United Kingdom ; Wearable Electronic Devices/*standards ; }, abstract = {OBJECTIVES: To ascertain the stakeholders' views and devise recommendations for further stages of the Wearable Sensing and Smart Cloud Computing for Integrated Care to Chronic Obstructive Pulmonary Disease (COPD) Patients with Co-morbidities (WELCOME) system development. This system aims to create a wearable vest to monitor physiological signals for patients concerned incorporating an inhaler adherence monitoring, weight, temperature, blood pressure and glucose metres, and a mobile health application for communication with healthcare professionals (HCPs).

DESIGN: A study of qualitative data derived from focus groups and semistructured interviews.

SETTING: 4 participating clinical sites in Greece, the UK, Ireland and the Netherlands.

PARTICIPANTS: Purposive sampling was used to recruit 32 patients with COPD with heart failure, diabetes, anxiety or depression, 27 informal carers and 23 HCPs from 4 European Union (EU) countries for focus groups and interviews.

RESULTS: Most patients and HCPs described the WELCOME system as 'brilliant and creative' and felt it gave a sense of safety. Both users and HCPs agreed that the duration and frequency of vest wear should be individualised as should the mobile application functions. The parameters and frequency of monitoring should be personalised using a multidisciplinary approach. A 'traffic light' alert system was proposed by HCPs for abnormal results. Patients were happy to take actions in response.

CONCLUSIONS: WELCOME stakeholders provided valuable views on the development of the system, which should take into account patient's individual comorbidities, circumstances and concerns. This will enable the development of the individualised system in each member state concerned.}, } @article {pmid32518536, year = {2016}, author = {Quwaider, M and Jararweh, Y}, title = {Multi-tier cloud infrastructure support for reliable global health awareness system.}, journal = {Simulation modelling practice and theory}, volume = {67}, number = {}, pages = {44-58}, pmid = {32518536}, issn = {1569-190X}, abstract = {The exceptional outbreaks of a number of epidemic diseases such as Ebola, SARS, Zika and H1N1 and their wide distribution over multiple regions calls for a reliable global health awareness system. This system is needed to achieve early detection of such emergencies. Furthermore, such health awareness system should be capable of predicting the outbreaks patterns to facilitate future countermeasure planning. This health awareness system should cover large scale regions that can be extended to multiple countries, continents and ultimately the globe. Many advanced and industrial countries are still struggling in building such system effectively even with the availability of resources and domain experts. The realization of a reliable health awareness system is accompanied with multiple challenges such as the availability of resources and experts, the global agreements about the system from the legislative and control point of view and the availability of the infrastructure that will support the system functionality with a reasonable cost. This paper presents a novel global health awareness system that overcomes the aforementioned challenges. The system is exploiting the emerging cloud computing services availability over the globe. To handle the large scale requirements, we introduce a multi-tier based cloud system that spans over four tiers starting from the monitored subjects to a centralized global cloud system. Also, we present a mixed integer optimization formulation to tackle the issues related to the latency of detecting outbreaks. Our results show that processing the data in multi-tier health awareness system will reduce the overall delay significantly and enable efficient health data sharing.}, } @article {pmid29897175, year = {2016}, author = {Conner, B}, title = {Can Cloud Computing Impact your EBITDA?.}, journal = {Healthcare financial management : journal of the Healthcare Financial Management Association}, volume = {70}, number = {9}, pages = {36-38}, pmid = {29897175}, issn = {0735-0732}, mesh = {Accounts Payable and Receivable ; *Cloud Computing ; Contract Services ; Financial Management/*trends ; Humans ; Software ; }, } @article {pmid29792628, year = {2016}, author = {Yang, S and Qiu, Y and Shi, B}, title = {[The Key Technology Study on Cloud Computing Platform for ECG Monitoring Based on Regional Internet of Things].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {40}, number = {5}, pages = {341-343}, pmid = {29792628}, issn = {1671-7104}, mesh = {Arrhythmias, Cardiac ; *Cloud Computing ; Computers ; *Electrocardiography ; Internet ; }, abstract = {This paper explores the methods of building the internet of things of a regional ECG monitoring, focused on the implementation of ECG monitoring center based on cloud computing platform. It analyzes implementation principles of automatic identifi cation in the types of arrhythmia. It also studies the system architecture and key techniques of cloud computing platform, including server load balancing technology, reliable storage of massive smalfi les and the implications of quick search function.}, } @article {pmid29782124, year = {2016}, author = {Feeney, JM and Montgomery, SC and Wolf, L and Jayaraman, V and Twohig, M}, title = {Cost Savings Associated with the Adoption of a Cloud Computing Data Transfer System for Trauma Patients.}, journal = {Connecticut medicine}, volume = {80}, number = {7}, pages = {389-392}, pmid = {29782124}, issn = {0010-6178}, mesh = {*Cloud Computing ; Connecticut ; Cost Savings/methods ; Female ; Health Information Exchange/*economics ; Humans ; Male ; Middle Aged ; Patient Transfer/*methods ; *Tomography, X-Ray Computed/methods/statistics & numerical data ; Trauma Centers/*organization & administration ; Wounds and Injuries/*diagnosis ; }, abstract = {INTRODUCTION: Among transferred trauma patients, challenges with the transfer of radiographic studies include problems loading or viewing the studies at the receiving hospitals, and problems manipulating, reconstructing, or evalu- ating the transferred images. Cloud-based image transfer systems may address some ofthese problems.

METHODS: We reviewed the charts of patients trans- ferred during one year surrounding the adoption of a cloud computing data transfer system. We compared the rates of repeat imaging before (precloud) and af- ter (postcloud) the adoption of the cloud-based data transfer system.

RESULTS: During the precloud period, 28 out of 100 patients required 90 repeat studies. With the cloud computing transfer system in place, three out of 134 patients required seven repeat films.

CONCLUSION: There was a statistically significant decrease in the proportion of patients requiring repeat films (28% to 2.2%, P < .0001). Based on an annualized volume of 200 trauma patient transfers, the cost savings estimated using three methods of cost analysis, is between $30,272 and $192,453.}, } @article {pmid27577444, year = {2016}, author = {Hao, B and Sun, W and Yu, Y and Li, J and Hu, G and Xie, G}, title = {Accelerate Healthcare Data Analytics: An Agile Practice to Perform Collaborative and Reproducible Analyses.}, journal = {Studies in health technology and informatics}, volume = {228}, number = {}, pages = {552-556}, pmid = {27577444}, issn = {1879-8365}, mesh = {Cloud Computing ; Delivery of Health Care/*statistics & numerical data ; Humans ; Machine Learning ; Reproducibility of Results ; }, abstract = {Recent advances in cloud computing and machine learning made it more convenient for researchers to gain insights from massive healthcare data, while performing analyses on healthcare data in current practice still lacks efficiency for researchers. What's more, collaborating among different researchers and sharing analysis results are challenging issues. In this paper, we developed a practice to make analytics process collaborative and analysis results reproducible by exploiting and extending Jupyter Notebook. After applying this practice in our use cases, we can perform analyses and deliver results with less efforts in shorter time comparing to our previous practice.}, } @article {pmid27577240, year = {2016}, author = {Luo, L and Li, L and Hu, J and Wang, X and Hou, B and Zhang, T and Zhao, LP}, title = {A hybrid solution for extracting structured medical information from unstructured data in medical records via a double-reading/entry system.}, journal = {BMC medical informatics and decision making}, volume = {16}, number = {1}, pages = {114}, pmid = {27577240}, issn = {1472-6947}, mesh = {Adult ; China ; *Decision Support Systems, Clinical ; *Electronic Health Records ; Female ; *Health Services Research ; Humans ; Information Storage and Retrieval/*methods ; *Lung Neoplasms ; Male ; Middle Aged ; }, abstract = {BACKGROUND: Healthcare providers generate a huge amount of biomedical data stored in either legacy system (paper-based) format or electronic medical records (EMR) around the world, which are collectively referred to as big biomedical data (BBD). To realize the promise of BBD for clinical use and research, it is an essential step to extract key data elements from unstructured medical records into patient-centered electronic health records with computable data elements. Our objective is to introduce a novel solution, known as a double-reading/entry system (DRESS), for extracting clinical data from unstructured medical records (MR) and creating a semi-structured electronic health record database, as well as to demonstrate its reproducibility empirically.

METHODS: Utilizing the modern cloud-based technologies, we have developed a comprehensive system that includes multiple subsystems, from capturing MRs in clinics, to securely transferring MRs, storing and managing cloud-based MRs, to facilitating both machine learning and manual reading, and to performing iterative quality control before committing the semi-structured data into the desired database. To evaluate the reproducibility of extracted medical data elements by DRESS, we conduct a blinded reproducibility study, with 100 MRs from patients who have undergone surgical treatment of lung cancer in China. The study uses Kappa statistic to measure concordance of discrete variables, and uses correlation coefficient to measure reproducibility of continuous variables.

RESULTS: Using the DRESS, we have demonstrated the feasibility of extracting clinical data from unstructured MRs to create semi-structured and patient-centered electronic health record database. The reproducibility study with 100 patient's MRs has shown an overall high reproducibility of 98 %, and varies across six modules (pathology, Radio/chemo therapy, clinical examination, surgery information, medical image and general patient information).

CONCLUSIONS: DRESS uses a double-reading, double-entry, and an independent adjudication, to manually curate structured data elements from unstructured clinical data. Further, through distributed computing strategies, DRESS protects data privacy by dividing MR data into de-identified modules. Finally, through internet-based computing cloud, DRESS enables many data specialists to work in a virtual environment to achieve the necessary scale of processing thousands MRs within days. This hybrid system represents probably a workable solution to solve the big medical data challenge.}, } @article {pmid27562482, year = {2016}, author = {Shi, X and Li, W and Song, J and Hossain, MS and Mizanur Rahman, SM and Alelaiwi, A}, title = {Towards Interactive Medical Content Delivery Between Simulated Body Sensor Networks and Practical Data Center.}, journal = {Journal of medical systems}, volume = {40}, number = {10}, pages = {214}, pmid = {27562482}, issn = {1573-689X}, mesh = {*Cloud Computing ; Computer Communication Networks/organization & administration ; Humans ; *Information Dissemination ; Internet ; Monitoring, Ambulatory/*instrumentation ; Remote Sensing Technology/instrumentation ; *Systems Integration ; *Wireless Technology ; }, abstract = {With the development of IoT (Internet of Thing), big data analysis and cloud computing, traditional medical information system integrates with these new technologies. The establishment of cloud-based smart healthcare application gets more and more attention. In this paper, semi-physical simulation technology is applied to cloud-based smart healthcare system. The Body sensor network (BSN) of system transmit has two ways of data collection and transmission. The one is using practical BSN to collect data and transmitting it to the data center. The other is transmitting real medical data to practical data center by simulating BSN. In order to transmit real medical data to practical data center by simulating BSN under semi-physical simulation environment, this paper designs an OPNET packet structure, defines a gateway node model between simulating BSN and practical data center and builds a custom protocol stack. Moreover, this paper conducts a large amount of simulation on the real data transmission through simulation network connecting with practical network. The simulation result can provides a reference for parameter settings of fully practical network and reduces the cost of devices and personnel involved.}, } @article {pmid27560777, year = {2016}, author = {Montenegro-Burke, JR and Phommavongsay, T and Aisporna, AE and Huan, T and Rinehart, D and Forsberg, E and Poole, FL and Thorgersen, MP and Adams, MW and Krantz, G and Fields, MW and Northen, TR and Robbins, PD and Niedernhofer, LJ and Lairson, L and Benton, HP and Siuzdak, G}, title = {Smartphone Analytics: Mobilizing the Lab into the Cloud for Omic-Scale Analyses.}, journal = {Analytical chemistry}, volume = {88}, number = {19}, pages = {9753-9758}, pmid = {27560777}, issn = {1520-6882}, mesh = {Chromatography, Liquid ; Data Interpretation, Statistical ; Humans ; *Internet ; Mass Spectrometry ; *Metabolomics ; *Mobile Applications ; Principal Component Analysis ; *Smartphone ; }, abstract = {Active data screening is an integral part of many scientific activities, and mobile technologies have greatly facilitated this process by minimizing the reliance on large hardware instrumentation. In order to meet with the increasingly growing field of metabolomics and heavy workload of data processing, we designed the first remote metabolomic data screening platform for mobile devices. Two mobile applications (apps), XCMS Mobile and METLIN Mobile, facilitate access to XCMS and METLIN, which are the most important components in the computer-based XCMS Online platforms. These mobile apps allow for the visualization and analysis of metabolic data throughout the entire analytical process. Specifically, XCMS Mobile and METLIN Mobile provide the capabilities for remote monitoring of data processing, real time notifications for the data processing, visualization and interactive analysis of processed data (e.g., cloud plots, principle component analysis, box-plots, extracted ion chromatograms, and hierarchical cluster analysis), and database searching for metabolite identification. These apps, available on Apple iOS and Google Android operating systems, allow for the migration of metabolomic research onto mobile devices for better accessibility beyond direct instrument operation. The utility of XCMS Mobile and METLIN Mobile functionalities was developed and is demonstrated here through the metabolomic LC-MS analyses of stem cells, colon cancer, aging, and bacterial metabolism.}, } @article {pmid27558385, year = {2017}, author = {Neylon, J and Min, Y and Kupelian, P and Low, DA and Santhanam, A}, title = {Analytical modeling and feasibility study of a multi-GPU cloud-based server (MGCS) framework for non-voxel-based dose calculations.}, journal = {International journal of computer assisted radiology and surgery}, volume = {12}, number = {4}, pages = {669-680}, pmid = {27558385}, issn = {1861-6429}, mesh = {Algorithms ; *Cloud Computing ; Feasibility Studies ; Humans ; *Models, Theoretical ; *Radiotherapy Dosage ; }, abstract = {PURPOSE: In this paper, a multi-GPU cloud-based server (MGCS) framework is presented for dose calculations, exploring the feasibility of remote computing power for parallelization and acceleration of computationally and time intensive radiotherapy tasks in moving toward online adaptive therapies.

METHODS: An analytical model was developed to estimate theoretical MGCS performance acceleration and intelligently determine workload distribution. Numerical studies were performed with a computing setup of 14 GPUs distributed over 4 servers interconnected by a 1 Gigabits per second (Gbps) network. Inter-process communication methods were optimized to facilitate resource distribution and minimize data transfers over the server interconnect.

RESULTS: The analytically predicted computation time predicted matched experimentally observations within 1-5 %. MGCS performance approached a theoretical limit of acceleration proportional to the number of GPUs utilized when computational tasks far outweighed memory operations. The MGCS implementation reproduced ground-truth dose computations with negligible differences, by distributing the work among several processes and implemented optimization strategies.

CONCLUSIONS: The results showed that a cloud-based computation engine was a feasible solution for enabling clinics to make use of fast dose calculations for advanced treatment planning and adaptive radiotherapy. The cloud-based system was able to exceed the performance of a local machine even for optimized calculations, and provided significant acceleration for computationally intensive tasks. Such a framework can provide access to advanced technology and computational methods to many clinics, providing an avenue for standardization across institutions without the requirements of purchasing, maintaining, and continually updating hardware.}, } @article {pmid27557126, year = {2016}, author = {Abdullahi, M and Ngadi, MA}, title = {Correction: Hybrid Symbiotic Organisms Search Optimization Algorithm for Scheduling of Tasks on Cloud Computing Environment.}, journal = {PloS one}, volume = {11}, number = {8}, pages = {e0162054}, pmid = {27557126}, issn = {1932-6203}, abstract = {[This corrects the article DOI: 10.1371/journal.pone.0158229.].}, } @article {pmid27548166, year = {2016}, author = {Solano, A and Dormido, R and Duro, N and Sánchez, JM}, title = {A Self-Provisioning Mechanism in OpenStack for IoT Devices.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {8}, pages = {}, pmid = {27548166}, issn = {1424-8220}, abstract = {The aim of this paper is to introduce a plug-and-play mechanism for an Internet of Things (IoT) device to instantiate a Software as a Service (SaaS) application in a private cloud, built up with OpenStack. The SaaS application is the digital avatar of a physical object connected to Internet. As a proof of concept, a Vending Machine is retrofitted and connected to Internet with and Arduino Open Hardware device. Once the self-configuration mechanism is completed, it is possible to order a product from a mobile communication device.}, } @article {pmid27547555, year = {2016}, author = {Pinthong, W and Muangruen, P and Suriyaphol, P and Mairiang, D}, title = {A simple grid implementation with Berkeley Open Infrastructure for Network Computing using BLAST as a model.}, journal = {PeerJ}, volume = {4}, number = {}, pages = {e2248}, pmid = {27547555}, issn = {2167-8359}, abstract = {Development of high-throughput technologies, such as Next-generation sequencing, allows thousands of experiments to be performed simultaneously while reducing resource requirement. Consequently, a massive amount of experiment data is now rapidly generated. Nevertheless, the data are not readily usable or meaningful until they are further analysed and interpreted. Due to the size of the data, a high performance computer (HPC) is required for the analysis and interpretation. However, the HPC is expensive and difficult to access. Other means were developed to allow researchers to acquire the power of HPC without a need to purchase and maintain one such as cloud computing services and grid computing system. In this study, we implemented grid computing in a computer training center environment using Berkeley Open Infrastructure for Network Computing (BOINC) as a job distributor and data manager combining all desktop computers to virtualize the HPC. Fifty desktop computers were used for setting up a grid system during the off-hours. In order to test the performance of the grid system, we adapted the Basic Local Alignment Search Tools (BLAST) to the BOINC system. Sequencing results from Illumina platform were aligned to the human genome database by BLAST on the grid system. The result and processing time were compared to those from a single desktop computer and HPC. The estimated durations of BLAST analysis for 4 million sequence reads on a desktop PC, HPC and the grid system were 568, 24 and 5 days, respectively. Thus, the grid implementation of BLAST by BOINC is an efficient alternative to the HPC for sequence alignment. The grid implementation by BOINC also helped tap unused computing resources during the off-hours and could be easily modified for other available bioinformatics software.}, } @article {pmid27540501, year = {2016}, author = {Hu, Y and Zhang, J and Bai, X and Yu, S and Yang, Z}, title = {Influence analysis of Github repositories.}, journal = {SpringerPlus}, volume = {5}, number = {1}, pages = {1268}, pmid = {27540501}, issn = {2193-1801}, abstract = {With the support of cloud computing techniques, social coding platforms have changed the style of software development. Github is now the most popular social coding platform and project hosting service. Software developers of various levels keep entering Github, and use Github to save their public and private software projects. The large amounts of software developers and software repositories on Github are posing new challenges to the world of software engineering. This paper tries to tackle one of the important problems: analyzing the importance and influence of Github repositories. We proposed a HITS based influence analysis on graphs that represent the star relationship between Github users and repositories. A weighted version of HITS is applied to the overall star graph, and generates a different set of top influential repositories other than the results from standard version of HITS algorithm. We also conduct the influential analysis on per-month star graph, and study the monthly influence ranking of top repositories.}, } @article {pmid32520000, year = {2016}, author = {Zaki, G and Plishker, W and Li, W and Lee, J and Quon, H and Wong, J and Shekhar, R}, title = {The Utility of Cloud Computing in Analyzing GPU-Accelerated Deformable Image Registration of CT and CBCT Images in Head and Neck Cancer Radiation Therapy.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {4}, number = {}, pages = {4300311}, pmid = {32520000}, issn = {2168-2372}, support = {R42 CA137886/CA/NCI NIH HHS/United States ; }, abstract = {The images generated during radiation oncology treatments provide a valuable resource to conduct analysis for personalized therapy, outcomes prediction, and treatment margin optimization. Deformable image registration (DIR) is an essential tool in analyzing these images. We are enhancing and examining DIR with the contributions of this paper: 1) implementing and investigating a cloud and graphic processing unit (GPU) accelerated DIR solution and 2) assessing the accuracy and flexibility of that solution on planning computed tomography (CT) with cone-beam CT (CBCT). Registering planning CTs and CBCTs aids in monitoring tumors, tracking body changes, and assuring that the treatment is executed as planned. This provides significant information not only on the level of a single patient, but also for an oncology department. However, traditional methods for DIR are usually time-consuming, and manual intervention is sometimes required even for a single registration. In this paper, we present a cloud-based solution in order to increase the data analysis throughput, so that treatment tracking results may be delivered at the time of care. We assess our solution in terms of accuracy and flexibility compared with a commercial tool registering CT with CBCT. The latency of a previously reported mutual information-based DIR algorithm was improved with GPUs for a single registration. This registration consists of rigid registration followed by volume subdivision-based nonrigid registration. In this paper, the throughput of the system was accelerated on the cloud for hundreds of data analysis pairs. Nine clinical cases of head and neck cancer patients were utilized to quantitatively evaluate the accuracy and throughput. Target registration error (TRE) and structural similarity index were utilized as evaluation metrics for registration accuracy. The total computation time consisting of preprocessing the data, running the registration, and analyzing the results was used to evaluate the system throughput. Evaluation showed that the average TRE for GPU-accelerated DIR for each of the nine patients was from 1.99 to 3.39 mm, which is lower than the voxel dimension. The total processing time for 282 pairs on an Amazon Web Services cloud consisting of 20 GPU enabled nodes took less than an hour. Beyond the original registration, the cloud resources also included automatic registration quality checks with minimal impact to timing. Clinical data were utilized in quantitative evaluations, and the results showed that the presented method holds great potential for many high-impact clinical applications in radiation oncology, including adaptive radio therapy, patient outcomes prediction, and treatment margin optimization.}, } @article {pmid27512372, year = {2016}, author = {Manjón, JV and Coupé, P}, title = {volBrain: An Online MRI Brain Volumetry System.}, journal = {Frontiers in neuroinformatics}, volume = {10}, number = {}, pages = {30}, pmid = {27512372}, issn = {1662-5196}, support = {U24 RR021382/RR/NCRR NIH HHS/United States ; R01 MH056584/MH/NIMH NIH HHS/United States ; P01 AG003991/AG/NIA NIH HHS/United States ; P50 AG005681/AG/NIA NIH HHS/United States ; R01 AG021910/AG/NIA NIH HHS/United States ; P50 MH071616/MH/NIMH NIH HHS/United States ; }, abstract = {The amount of medical image data produced in clinical and research settings is rapidly growing resulting in vast amount of data to analyze. Automatic and reliable quantitative analysis tools, including segmentation, allow to analyze brain development and to understand specific patterns of many neurological diseases. This field has recently experienced many advances with successful techniques based on non-linear warping and label fusion. In this work we present a novel and fully automatic pipeline for volumetric brain analysis based on multi-atlas label fusion technology that is able to provide accurate volumetric information at different levels of detail in a short time. This method is available through the volBrain online web interface (http://volbrain.upv.es), which is publically and freely accessible to the scientific community. Our new framework has been compared with current state-of-the-art methods showing very competitive results.}, } @article {pmid27505982, year = {2016}, author = {Albright, FB}, title = {WHAT WORKS: CLOUD COMPUTING.}, journal = {Behavioral healthcare}, volume = {36}, number = {2}, pages = {44}, pmid = {27505982}, issn = {1931-7093}, mesh = {Colorado ; *Electronic Health Records ; Fires ; Information Storage and Retrieval/*methods ; Mental Health Services ; Organizational Case Studies ; }, } @article {pmid27501046, year = {2016}, author = {Thanasias, V and Lee, C and Hanif, M and Kim, E and Helal, S}, title = {VM Capacity-Aware Scheduling within Budget Constraints in IaaS Clouds.}, journal = {PloS one}, volume = {11}, number = {8}, pages = {e0160456}, pmid = {27501046}, issn = {1932-6203}, mesh = {Algorithms ; Cloud Computing/*economics ; Models, Theoretical ; Workload ; }, abstract = {Recently, cloud computing has drawn significant attention from both industry and academia, bringing unprecedented changes to computing and information technology. The infrastructure-as-a-Service (IaaS) model offers new abilities such as the elastic provisioning and relinquishing of computing resources in response to workload fluctuations. However, because the demand for resources dynamically changes over time, the provisioning of resources in a way that a given budget is efficiently utilized while maintaining a sufficing performance remains a key challenge. This paper addresses the problem of task scheduling and resource provisioning for a set of tasks running on IaaS clouds; it presents novel provisioning and scheduling algorithms capable of executing tasks within a given budget, while minimizing the slowdown due to the budget constraint. Our simulation study demonstrates a substantial reduction up to 70% in the overall task slowdown rate by the proposed algorithms.}, } @article {pmid27496860, year = {2018}, author = {Gachet Páez, D and de Buenaga Rodríguez, M and Puertas Sánz, E and Villalba, MT and Muñoz Gil, R}, title = {Healthy and wellbeing activities' promotion using a Big Data approach.}, journal = {Health informatics journal}, volume = {24}, number = {2}, pages = {125-135}, doi = {10.1177/1460458216660754}, pmid = {27496860}, issn = {1741-2811}, mesh = {Aging ; *Big Data ; Confidentiality/trends ; Health Promotion/*methods/trends ; Humans ; Self-Management/methods ; Wearable Electronic Devices/trends ; }, abstract = {The aging population and economic crisis specially in developed countries have as a consequence the reduction in funds dedicated to health care; it is then desirable to optimize the costs of public and private healthcare systems, reducing the affluence of chronic and dependent people to care centers; promoting healthy lifestyle and activities can allow people to avoid chronic diseases as for example hypertension. In this article, we describe a system for promoting an active and healthy lifestyle for people and to recommend with guidelines and valuable information about their habits. The proposed system is being developed around the Big Data paradigm using bio-signal sensors and machine-learning algorithms for recommendations.}, } @article {pmid27493630, year = {2016}, author = {Loizou, GD}, title = {Animal-Free Chemical Safety Assessment.}, journal = {Frontiers in pharmacology}, volume = {7}, number = {}, pages = {218}, pmid = {27493630}, issn = {1663-9812}, abstract = {The exponential growth of the Internet of Things and the global popularity and remarkable decline in cost of the mobile phone is driving the digital transformation of medical practice. The rapidly maturing digital, non-medical world of mobile (wireless) devices, cloud computing and social networking is coalescing with the emerging digital medical world of omics data, biosensors and advanced imaging which offers the increasingly realistic prospect of personalized medicine. Described as a potential "seismic" shift from the current "healthcare" model to a "wellness" paradigm that is predictive, preventative, personalized and participatory, this change is based on the development of increasingly sophisticated biosensors which can track and measure key biochemical variables in people. Additional key drivers in this shift are metabolomic and proteomic signatures, which are increasingly being reported as pre-symptomatic, diagnostic and prognostic of toxicity and disease. These advancements also have profound implications for toxicological evaluation and safety assessment of pharmaceuticals and environmental chemicals. An approach based primarily on human in vivo and high-throughput in vitro human cell-line data is a distinct possibility. This would transform current chemical safety assessment practice which operates in a human "data poor" to a human "data rich" environment. This could also lead to a seismic shift from the current animal-based to an animal-free chemical safety assessment paradigm.}, } @article {pmid27490901, year = {2016}, author = {Xie, Z and Shao, X and Xin, Y}, title = {A Scheduling Algorithm for Cloud Computing System Based on the Driver of Dynamic Essential Path.}, journal = {PloS one}, volume = {11}, number = {8}, pages = {e0159932}, pmid = {27490901}, issn = {1932-6203}, mesh = {*Algorithms ; *Cloud Computing ; Internet ; Models, Theoretical ; }, abstract = {To solve the problem of task scheduling in the cloud computing system, this paper proposes a scheduling algorithm for cloud computing based on the driver of dynamic essential path (DDEP). This algorithm applies a predecessor-task layer priority strategy to solve the problem of constraint relations among task nodes. The strategy assigns different priority values to every task node based on the scheduling order of task node as affected by the constraint relations among task nodes, and the task node list is generated by the different priority value. To address the scheduling order problem in which task nodes have the same priority value, the dynamic essential long path strategy is proposed. This strategy computes the dynamic essential path of the pre-scheduling task nodes based on the actual computation cost and communication cost of task node in the scheduling process. The task node that has the longest dynamic essential path is scheduled first as the completion time of task graph is indirectly influenced by the finishing time of task nodes in the longest dynamic essential path. Finally, we demonstrate the proposed algorithm via simulation experiments using Matlab tools. The experimental results indicate that the proposed algorithm can effectively reduce the task Makespan in most cases and meet a high quality performance objective.}, } @article {pmid27477210, year = {2016}, author = {Chae, H and Lee, S and Seo, S and Jung, D and Chang, H and Nephew, KP and Kim, S}, title = {BioVLAB-mCpG-SNP-EXPRESS: A system for multi-level and multi-perspective analysis and exploration of DNA methylation, sequence variation (SNPs), and gene expression from multi-omics data.}, journal = {Methods (San Diego, Calif.)}, volume = {111}, number = {}, pages = {64-71}, doi = {10.1016/j.ymeth.2016.07.019}, pmid = {27477210}, issn = {1095-9130}, mesh = {Computational Biology/*methods ; DNA Methylation/genetics ; Databases, Genetic ; Genetic Variation ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Polymorphism, Single Nucleotide/genetics ; *Software ; }, abstract = {Measuring gene expression, DNA sequence variation, and DNA methylation status is routinely done using high throughput sequencing technologies. To analyze such multi-omics data and explore relationships, reliable bioinformatics systems are much needed. Existing systems are either for exploring curated data or for processing omics data in the form of a library such as R. Thus scientists have much difficulty in investigating relationships among gene expression, DNA sequence variation, and DNA methylation using multi-omics data. In this study, we report a system called BioVLAB-mCpG-SNP-EXPRESS for the integrated analysis of DNA methylation, sequence variation (SNPs), and gene expression for distinguishing cellular phenotypes at the pairwise and multiple phenotype levels. The system can be deployed on either the Amazon cloud or a publicly available high-performance computing node, and the data analysis and exploration of the analysis result can be conveniently done using a web-based interface. In order to alleviate analysis complexity, all the process are fully automated, and graphical workflow system is integrated to represent real-time analysis progression. The BioVLAB-mCpG-SNP-EXPRESS system works in three stages. First, it processes and analyzes multi-omics data as input in the form of the raw data, i.e., FastQ files. Second, various integrated analyses such as methylation vs. gene expression and mutation vs. methylation are performed. Finally, the analysis result can be explored in a number of ways through a web interface for the multi-level, multi-perspective exploration. Multi-level interpretation can be done by either gene, gene set, pathway or network level and multi-perspective exploration can be explored from either gene expression, DNA methylation, sequence variation, or their relationship perspective. The utility of the system is demonstrated by performing analysis of phenotypically distinct 30 breast cancer cell line data set. BioVLAB-mCpG-SNP-EXPRESS is available at http://biohealth.snu.ac.kr/software/biovlab_mcpg_snp_express/.}, } @article {pmid27472806, year = {2016}, author = {Hanson-Smith, V and Johnson, A}, title = {PhyloBot: A Web Portal for Automated Phylogenetics, Ancestral Sequence Reconstruction, and Exploration of Mutational Trajectories.}, journal = {PLoS computational biology}, volume = {12}, number = {7}, pages = {e1004976}, pmid = {27472806}, issn = {1553-7358}, support = {F32 GM108299/GM/NIGMS NIH HHS/United States ; R01 GM037049/GM/NIGMS NIH HHS/United States ; }, mesh = {Animals ; Computational Biology/*methods ; Evolution, Molecular ; Genetics ; Humans ; *Internet ; *Phylogeny ; Proteins/classification/genetics/metabolism ; Sequence Alignment/*methods ; Sequence Analysis, DNA/*methods ; Software ; }, abstract = {The method of phylogenetic ancestral sequence reconstruction is a powerful approach for studying evolutionary relationships among protein sequence, structure, and function. In particular, this approach allows investigators to (1) reconstruct and "resurrect" (that is, synthesize in vivo or in vitro) extinct proteins to study how they differ from modern proteins, (2) identify key amino acid changes that, over evolutionary timescales, have altered the function of the protein, and (3) order historical events in the evolution of protein function. Widespread use of this approach has been slow among molecular biologists, in part because the methods require significant computational expertise. Here we present PhyloBot, a web-based software tool that makes ancestral sequence reconstruction easy. Designed for non-experts, it integrates all the necessary software into a single user interface. Additionally, PhyloBot provides interactive tools to explore evolutionary trajectories between ancestors, enabling the rapid generation of hypotheses that can be tested using genetic or biochemical approaches. Early versions of this software were used in previous studies to discover genetic mechanisms underlying the functions of diverse protein families, including V-ATPase ion pumps, DNA-binding transcription regulators, and serine/threonine protein kinases. PhyloBot runs in a web browser, and is available at the following URL: http://www.phylobot.com. The software is implemented in Python using the Django web framework, and runs on elastic cloud computing resources from Amazon Web Services. Users can create and submit jobs on our free server (at the URL listed above), or use our open-source code to launch their own PhyloBot server.}, } @article {pmid27471996, year = {2016}, author = {Roy, S and Pfeifer, JD and LaFramboise, WA and Pantanowitz, L}, title = {Molecular digital pathology: progress and potential of exchanging molecular data.}, journal = {Expert review of molecular diagnostics}, volume = {16}, number = {9}, pages = {941-947}, doi = {10.1080/14737159.2016.1206472}, pmid = {27471996}, issn = {1744-8352}, mesh = {Humans ; Information Dissemination/*methods ; Information Services/*trends ; Telepathology/*methods/standards/*trends ; }, abstract = {Many of the demands to perform next generation sequencing (NGS) in the clinical laboratory can be resolved using the principles of telepathology. Molecular telepathology can allow facilities to outsource all or a portion of their NGS operation such as cloud computing, bioinformatics pipelines, variant data management, and knowledge curation. Clinical pathology laboratories can electronically share diverse types of molecular data with reference laboratories, technology service providers, and/or regulatory agencies. Exchange of electronic molecular data allows laboratories to perform validation of rare diseases using foreign data, check the accuracy of their test results against benchmarks, and leverage in silico proficiency testing. This review covers the emerging subject of molecular telepathology, describes clinical use cases for the appropriate exchange of molecular data, and highlights key issues such as data integrity, interoperable formats for massive genomic datasets, security, malpractice and emerging regulations involved with this novel practice.}, } @article {pmid27471065, year = {2016}, author = {Pallen, MJ}, title = {Microbial bioinformatics 2020.}, journal = {Microbial biotechnology}, volume = {9}, number = {5}, pages = {681-686}, pmid = {27471065}, issn = {1751-7915}, mesh = {Computational Biology/*methods/trends ; Databases, Nucleic Acid ; Genomics/*methods/trends ; Internet ; }, abstract = {Microbial bioinformatics in 2020 will remain a vibrant, creative discipline, adding value to the ever-growing flood of new sequence data, while embracing novel technologies and fresh approaches. Databases and search strategies will struggle to cope and manual curation will not be sustainable during the scale-up to the million-microbial-genome era. Microbial taxonomy will have to adapt to a situation in which most microorganisms are discovered and characterised through the analysis of sequences. Genome sequencing will become a routine approach in clinical and research laboratories, with fresh demands for interpretable user-friendly outputs. The "internet of things" will penetrate healthcare systems, so that even a piece of hospital plumbing might have its own IP address that can be integrated with pathogen genome sequences. Microbiome mania will continue, but the tide will turn from molecular barcoding towards metagenomics. Crowd-sourced analyses will collide with cloud computing, but eternal vigilance will be the price of preventing the misinterpretation and overselling of microbial sequence data. Output from hand-held sequencers will be analysed on mobile devices. Open-source training materials will address the need for the development of a skilled labour force. As we boldly go into the third decade of the twenty-first century, microbial sequence space will remain the final frontier!}, } @article {pmid27468841, year = {2016}, author = {Cicirelli, F and Fortino, G and Giordano, A and Guerrieri, A and Spezzano, G and Vinci, A}, title = {On the Design of Smart Homes: A Framework for Activity Recognition in Home Environment.}, journal = {Journal of medical systems}, volume = {40}, number = {9}, pages = {200}, pmid = {27468841}, issn = {1573-689X}, mesh = {Cloud Computing ; *Equipment Design ; *Exercise ; *Housing ; Humans ; Internet ; Monitoring, Physiologic/*instrumentation ; Wireless Technology ; }, abstract = {A smart home is a home environment enriched with sensing, actuation, communication and computation capabilities which permits to adapt it to inhabitants preferences and requirements. Establishing a proper strategy of actuation on the home environment can require complex computational tasks on the sensed data. This is the case of activity recognition, which consists in retrieving high-level knowledge about what occurs in the home environment and about the behaviour of the inhabitants. The inherent complexity of this application domain asks for tools able to properly support the design and implementation phases. This paper proposes a framework for the design and implementation of smart home applications focused on activity recognition in home environments. The framework mainly relies on the Cloud-assisted Agent-based Smart home Environment (CASE) architecture offering basic abstraction entities which easily allow to design and implement Smart Home applications. CASE is a three layered architecture which exploits the distributed multi-agent paradigm and the cloud technology for offering analytics services. Details about how to implement activity recognition onto the CASE architecture are supplied focusing on the low-level technological issues as well as the algorithms and the methodologies useful for the activity recognition. The effectiveness of the framework is shown through a case study consisting of a daily activity recognition of a person in a home environment.}, } @article {pmid27465296, year = {2016}, author = {Yang, H and Zhang, J and Ji, Y and He, Y and Lee, Y}, title = {Experimental demonstration of multi-dimensional resources integration for service provisioning in cloud radio over fiber network.}, journal = {Scientific reports}, volume = {6}, number = {}, pages = {30678}, pmid = {27465296}, issn = {2045-2322}, abstract = {Cloud radio access network (C-RAN) becomes a promising scenario to accommodate high-performance services with ubiquitous user coverage and real-time cloud computing in 5G area. However, the radio network, optical network and processing unit cloud have been decoupled from each other, so that their resources are controlled independently. Traditional architecture cannot implement the resource optimization and scheduling for the high-level service guarantee due to the communication obstacle among them with the growing number of mobile internet users. In this paper, we report a study on multi-dimensional resources integration (MDRI) for service provisioning in cloud radio over fiber network (C-RoFN). A resources integrated provisioning (RIP) scheme using an auxiliary graph is introduced based on the proposed architecture. The MDRI can enhance the responsiveness to dynamic end-to-end user demands and globally optimize radio frequency, optical network and processing resources effectively to maximize radio coverage. The feasibility of the proposed architecture is experimentally verified on OpenFlow-based enhanced SDN testbed. The performance of RIP scheme under heavy traffic load scenario is also quantitatively evaluated to demonstrate the efficiency of the proposal based on MDRI architecture in terms of resource utilization, path blocking probability, network cost and path provisioning latency, compared with other provisioning schemes.}, } @article {pmid27454608, year = {2016}, author = {Banos, O and Bilal Amin, M and Ali Khan, W and Afzal, M and Hussain, M and Kang, BH and Lee, S}, title = {The Mining Minds digital health and wellness framework.}, journal = {Biomedical engineering online}, volume = {15 Suppl 1}, number = {Suppl 1}, pages = {76}, pmid = {27454608}, issn = {1475-925X}, mesh = {Data Mining/*methods ; Health Behavior ; Health Knowledge, Attitudes, Practice ; Health Promotion/*methods ; Humans ; *Internet ; Inventions ; Life Style ; Mobile Applications ; }, abstract = {BACKGROUND: The provision of health and wellness care is undergoing an enormous transformation. A key element of this revolution consists in prioritizing prevention and proactivity based on the analysis of people's conducts and the empowerment of individuals in their self-management. Digital technologies are unquestionably destined to be the main engine of this change, with an increasing number of domain-specific applications and devices commercialized every year; however, there is an apparent lack of frameworks capable of orchestrating and intelligently leveraging, all the data, information and knowledge generated through these systems.

METHODS: This work presents Mining Minds, a novel framework that builds on the core ideas of the digital health and wellness paradigms to enable the provision of personalized support. Mining Minds embraces some of the most prominent digital technologies, ranging from Big Data and Cloud Computing to Wearables and Internet of Things, as well as modern concepts and methods, such as context-awareness, knowledge bases or analytics, to holistically and continuously investigate on people's lifestyles and provide a variety of smart coaching and support services.

RESULTS: This paper comprehensively describes the efficient and rational combination and interoperation of these technologies and methods through Mining Minds, while meeting the essential requirements posed by a framework for personalized health and wellness support. Moreover, this work presents a realization of the key architectural components of Mining Minds, as well as various exemplary user applications and expert tools to illustrate some of the potential services supported by the proposed framework.

CONCLUSIONS: Mining Minds constitutes an innovative holistic means to inspect human behavior and provide personalized health and wellness support. The principles behind this framework uncover new research ideas and may serve as a reference for similar initiatives.}, } @article {pmid27441559, year = {2016}, author = {Souza Pardo, MH and Centurion, AM and Franco Eustáquio, PS and Carlucci Santana, RH and Bruschi, SM and Santana, MJ}, title = {Evaluating the Influence of the Client Behavior in Cloud Computing.}, journal = {PloS one}, volume = {11}, number = {7}, pages = {e0158291}, pmid = {27441559}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Computer Simulation ; Markov Chains ; Software ; Time Factors ; }, abstract = {This paper proposes a novel approach for the implementation of simulation scenarios, providing a client entity for cloud computing systems. The client entity allows the creation of scenarios in which the client behavior has an influence on the simulation, making the results more realistic. The proposed client entity is based on several characteristics that affect the performance of a cloud computing system, including different modes of submission and their behavior when the waiting time between requests (think time) is considered. The proposed characterization of the client enables the sending of either individual requests or group of Web services to scenarios where the workload takes the form of bursts. The client entity is included in the CloudSim, a framework for modelling and simulation of cloud computing. Experimental results show the influence of the client behavior on the performance of the services executed in a cloud computing system.}, } @article {pmid27441149, year = {2016}, author = {Liao, WH and Qiu, WL}, title = {Applying analytic hierarchy process to assess healthcare-oriented cloud computing service systems.}, journal = {SpringerPlus}, volume = {5}, number = {1}, pages = {1030}, pmid = {27441149}, issn = {2193-1801}, abstract = {Numerous differences exist between the healthcare industry and other industries. Difficulties in the business operation of the healthcare industry have continually increased because of the volatility and importance of health care, changes to and requirements of health insurance policies, and the statuses of healthcare providers, which are typically considered not-for-profit organizations. Moreover, because of the financial risks associated with constant changes in healthcare payment methods and constantly evolving information technology, healthcare organizations must continually adjust their business operation objectives; therefore, cloud computing presents both a challenge and an opportunity. As a response to aging populations and the prevalence of the Internet in fast-paced contemporary societies, cloud computing can be used to facilitate the task of balancing the quality and costs of health care. To evaluate cloud computing service systems for use in health care, providing decision makers with a comprehensive assessment method for prioritizing decision-making factors is highly beneficial. Hence, this study applied the analytic hierarchy process, compared items related to cloud computing and health care, executed a questionnaire survey, and then classified the critical factors influencing healthcare cloud computing service systems on the basis of statistical analyses of the questionnaire results. The results indicate that the primary factor affecting the design or implementation of optimal cloud computing healthcare service systems is cost effectiveness, with the secondary factors being practical considerations such as software design and system architecture.}, } @article {pmid27440183, year = {2016}, author = {Ferreira Junior, JR and Oliveira, MC and de Azevedo-Marques, PM}, title = {Cloud-Based NoSQL Open Database of Pulmonary Nodules for Computer-Aided Lung Cancer Diagnosis and Reproducible Research.}, journal = {Journal of digital imaging}, volume = {29}, number = {6}, pages = {716-729}, pmid = {27440183}, issn = {1618-727X}, mesh = {*Cloud Computing ; *Databases, Factual ; *Diagnosis, Computer-Assisted ; Humans ; Lung Neoplasms/*diagnostic imaging ; Radiographic Image Interpretation, Computer-Assisted ; Reproducibility of Results ; Solitary Pulmonary Nodule/*diagnostic imaging ; Tomography, X-Ray Computed ; }, abstract = {Lung cancer is the leading cause of cancer-related deaths in the world, and its main manifestation is pulmonary nodules. Detection and classification of pulmonary nodules are challenging tasks that must be done by qualified specialists, but image interpretation errors make those tasks difficult. In order to aid radiologists on those hard tasks, it is important to integrate the computer-based tools with the lesion detection, pathology diagnosis, and image interpretation processes. However, computer-aided diagnosis research faces the problem of not having enough shared medical reference data for the development, testing, and evaluation of computational methods for diagnosis. In order to minimize this problem, this paper presents a public nonrelational document-oriented cloud-based database of pulmonary nodules characterized by 3D texture attributes, identified by experienced radiologists and classified in nine different subjective characteristics by the same specialists. Our goal with the development of this database is to improve computer-aided lung cancer diagnosis and pulmonary nodule detection and classification research through the deployment of this database in a cloud Database as a Service framework. Pulmonary nodule data was provided by the Lung Image Database Consortium and Image Database Resource Initiative (LIDC-IDRI), image descriptors were acquired by a volumetric texture analysis, and database schema was developed using a document-oriented Not only Structured Query Language (NoSQL) approach. The proposed database is now with 379 exams, 838 nodules, and 8237 images, 4029 of them are CT scans and 4208 manually segmented nodules, and it is allocated in a MongoDB instance on a cloud infrastructure.}, } @article {pmid27436772, year = {2016}, author = {Mulder, VL and Lacoste, M and Richer-de-Forges, AC and Arrouays, D}, title = {GlobalSoilMap France: High-resolution spatial modelling the soils of France up to two meter depth.}, journal = {The Science of the total environment}, volume = {573}, number = {}, pages = {1352-1369}, doi = {10.1016/j.scitotenv.2016.07.066}, pmid = {27436772}, issn = {1879-1026}, abstract = {This work presents the first GlobalSoilMap (GSM) products for France. We developed an automatic procedure for mapping the primary soil properties (clay, silt, sand, coarse elements, pH, soil organic carbon (SOC), cation exchange capacity (CEC) and soil depth). The procedure employed a data-mining technique and a straightforward method for estimating the 90% confidence intervals (CIs). The most accurate models were obtained for pH, sand and silt. Next, CEC, clay and SOC were found reasonably accurate predicted. Coarse elements and soil depth were the least accurate of all models. Overall, all models were considered robust; important indicators for this were 1) the small difference in model diagnostics between the calibration and cross-validation set, 2) the unbiased mean predictions, 3) the smaller spatial structure of the prediction residuals in comparison to the observations and 4) the similar performance compared to other developed GlobalSoilMap products. Nevertheless, the confidence intervals (CIs) were rather wide for all soil properties. The median predictions became less reliable with increasing depth, as indicated by the increase of CIs with depth. In addition, model accuracy and the corresponding CIs varied depending on the soil variable of interest, soil depth and geographic location. These findings indicated that the CIs are as informative as the model diagnostics. In conclusion, the presented method resulted in reasonably accurate predictions for the majority of the soil properties. End users can employ the products for different purposes, as was demonstrated with some practical examples. The mapping routine is flexible for cloud-computing and provides ample opportunity to be further developed when desired by its users. This allows regional and international GSM partners with fewer resources to develop their own products or, otherwise, to improve the current routine and work together towards a robust high-resolution digital soil map of the world.}, } @article {pmid27419854, year = {2016}, author = {Wei, X and Sun, B and Cui, J and Xu, G}, title = {A Multi-Objective Compounded Local Mobile Cloud Architecture Using Priority Queues to Process Multiple Jobs.}, journal = {PloS one}, volume = {11}, number = {7}, pages = {e0158491}, pmid = {27419854}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing/economics ; *Computers, Handheld ; Information Storage and Retrieval/economics ; Internet ; *Software/economics ; Software Design ; Time Factors ; }, abstract = {As a result of the greatly increased use of mobile devices, the disadvantages of portable devices have gradually begun to emerge. To solve these problems, the use of mobile cloud computing assisted by cloud data centers has been proposed. However, cloud data centers are always very far from the mobile requesters. In this paper, we propose an improved multi-objective local mobile cloud model: Compounded Local Mobile Cloud Architecture with Dynamic Priority Queues (LMCpri). This new architecture could briefly store jobs that arrive simultaneously at the cloudlet in different priority positions according to the result of auction processing, and then execute partitioning tasks on capable helpers. In the Scheduling Module, NSGA-II is employed as the scheduling algorithm to shorten processing time and decrease requester cost relative to PSO and sequential scheduling. The simulation results show that the number of iteration times that is defined to 30 is the best choice of the system. In addition, comparing with LMCque, LMCpri is able to effectively accommodate a requester who would like his job to be executed in advance and shorten execution time. Finally, we make a comparing experiment between LMCpri and cloud assisting architecture, and the results reveal that LMCpri presents a better performance advantage than cloud assisting architecture.}, } @article {pmid27410146, year = {2016}, author = {Samadi, P and Wen, K and Xu, J and Bergman, K}, title = {Software-defined optical network for metro-scale geographically distributed data centers.}, journal = {Optics express}, volume = {24}, number = {11}, pages = {12310-12320}, doi = {10.1364/OE.24.012310}, pmid = {27410146}, issn = {1094-4087}, abstract = {The emergence of cloud computing and big data has rapidly increased the deployment of small and mid-sized data centers. Enterprises and cloud providers require an agile network among these data centers to empower application reliability and flexible scalability. We present a software-defined inter data center network to enable on-demand scale out of data centers on a metro-scale optical network. The architecture consists of a combined space/wavelength switching platform and a Software-Defined Networking (SDN) control plane equipped with a wavelength and routing assignment module. It enables establishing transparent and bandwidth-selective connections from L2/L3 switches, on-demand. The architecture is evaluated in a testbed consisting of 3 data centers, 5-25 km apart. We successfully demonstrated end-to-end bulk data transfer and Virtual Machine (VM) migrations across data centers with less than 100 ms connection setup time and close to full link capacity utilization.}, } @article {pmid27409623, year = {2016}, author = {Gil, D and Ferrández, A and Mora-Mora, H and Peral, J}, title = {Internet of Things: A Review of Surveys Based on Context Aware Intelligent Services.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {7}, pages = {}, pmid = {27409623}, issn = {1424-8220}, abstract = {The Internet of Things (IoT) has made it possible for devices around the world to acquire information and store it, in order to be able to use it at a later stage. However, this potential opportunity is often not exploited because of the excessively big interval between the data collection and the capability to process and analyse it. In this paper, we review the current IoT technologies, approaches and models in order to discover what challenges need to be met to make more sense of data. The main goal of this paper is to review the surveys related to IoT in order to provide well integrated and context aware intelligent services for IoT. Moreover, we present a state-of-the-art of IoT from the context aware perspective that allows the integration of IoT and social networks in the emerging Social Internet of Things (SIoT) term.}, } @article {pmid27390389, year = {2016}, author = {Gulzar, MA and Interlandi, M and Yoo, S and Tetali, SD and Condie, T and Millstein, T and Kim, M}, title = {BigDebug: Debugging Primitives for Interactive Big Data Processing in Spark.}, journal = {Proceedings - International Conference on Software Engineering. International Conference on Software Engineering}, volume = {2016}, number = {}, pages = {784-795}, pmid = {27390389}, issn = {0270-5257}, support = {U01 HG008488/HG/NHGRI NIH HHS/United States ; U54 EB020404/EB/NIBIB NIH HHS/United States ; }, abstract = {Developers use cloud computing platforms to process a large quantity of data in parallel when developing big data analytics. Debugging the massive parallel computations that run in today's data-centers is time consuming and error-prone. To address this challenge, we design a set of interactive, real-time debugging primitives for big data processing in Apache Spark, the next generation data-intensive scalable cloud computing platform. This requires re-thinking the notion of step-through debugging in a traditional debugger such as gdb, because pausing the entire computation across distributed worker nodes causes significant delay and naively inspecting millions of records using a watchpoint is too time consuming for an end user. First, BIGDEBUG's simulated breakpoints and on-demand watchpoints allow users to selectively examine distributed, intermediate data on the cloud with little overhead. Second, a user can also pinpoint a crash-inducing record and selectively resume relevant sub-computations after a quick fix. Third, a user can determine the root causes of errors (or delays) at the level of individual records through a fine-grained data provenance capability. Our evaluation shows that BIGDEBUG scales to terabytes and its record-level tracing incurs less than 25% overhead on average. It determines crash culprits orders of magnitude more accurately and provides up to 100% time saving compared to the baseline replay debugger. The results show that BIGDEBUG supports debugging at interactive speeds with minimal performance impact.}, } @article {pmid27384239, year = {2016}, author = {Abdulhamid, SM and Abd Latiff, MS and Abdul-Salaam, G and Hussain Madni, SH}, title = {Secure Scientific Applications Scheduling Technique for Cloud Computing Environment Using Global League Championship Algorithm.}, journal = {PloS one}, volume = {11}, number = {7}, pages = {e0158102}, pmid = {27384239}, issn = {1932-6203}, mesh = {*Algorithms ; Appointments and Schedules ; *Cloud Computing ; *Computer Systems ; Humans ; *Information Storage and Retrieval ; Internet ; Models, Statistical ; Personnel Staffing and Scheduling ; Probability ; }, abstract = {Cloud computing system is a huge cluster of interconnected servers residing in a datacenter and dynamically provisioned to clients on-demand via a front-end interface. Scientific applications scheduling in the cloud computing environment is identified as NP-hard problem due to the dynamic nature of heterogeneous resources. Recently, a number of metaheuristics optimization schemes have been applied to address the challenges of applications scheduling in the cloud system, without much emphasis on the issue of secure global scheduling. In this paper, scientific applications scheduling techniques using the Global League Championship Algorithm (GBLCA) optimization technique is first presented for global task scheduling in the cloud environment. The experiment is carried out using CloudSim simulator. The experimental results show that, the proposed GBLCA technique produced remarkable performance improvement rate on the makespan that ranges between 14.44% to 46.41%. It also shows significant reduction in the time taken to securely schedule applications as parametrically measured in terms of the response time. In view of the experimental results, the proposed technique provides better-quality scheduling solution that is suitable for scientific applications task execution in the Cloud Computing environment than the MinMin, MaxMin, Genetic Algorithm (GA) and Ant Colony Optimization (ACO) scheduling techniques.}, } @article {pmid27383269, year = {2016}, author = {Singharoy, A and Teo, I and McGreevy, R and Stone, JE and Zhao, J and Schulten, K}, title = {Molecular dynamics-based refinement and validation for sub-5 Å cryo-electron microscopy maps.}, journal = {eLife}, volume = {5}, number = {}, pages = {}, pmid = {27383269}, issn = {2050-084X}, support = {P41 GM104601/GM/NIGMS NIH HHS/United States ; R01 GM098243/GM/NIGMS NIH HHS/United States ; U54 GM087519/GM/NIGMS NIH HHS/United States ; }, mesh = {Cryoelectron Microscopy/*methods ; Image Processing, Computer-Assisted/*methods ; *Molecular Dynamics Simulation ; TRPV Cation Channels/chemistry/ultrastructure ; beta-Galactosidase/chemistry/ultrastructure ; }, abstract = {Two structure determination methods, based on the molecular dynamics flexible fitting (MDFF) paradigm, are presented that resolve sub-5 Å cryo-electron microscopy (EM) maps with either single structures or ensembles of such structures. The methods, denoted cascade MDFF and resolution exchange MDFF, sequentially re-refine a search model against a series of maps of progressively higher resolutions, which ends with the original experimental resolution. Application of sequential re-refinement enables MDFF to achieve a radius of convergence of ~25 Å demonstrated with the accurate modeling of β-galactosidase and TRPV1 proteins at 3.2 Å and 3.4 Å resolution, respectively. The MDFF refinements uniquely offer map-model validation and B-factor determination criteria based on the inherent dynamics of the macromolecules studied, captured by means of local root mean square fluctuations. The MDFF tools described are available to researchers through an easy-to-use and cost-effective cloud computing resource on Amazon Web Services.}, } @article {pmid27369566, year = {2016}, author = {Aldeen, YA and Salleh, M and Aljeroudi, Y}, title = {An innovative privacy preserving technique for incremental datasets on cloud computing.}, journal = {Journal of biomedical informatics}, volume = {62}, number = {}, pages = {107-116}, doi = {10.1016/j.jbi.2016.06.011}, pmid = {27369566}, issn = {1532-0480}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Humans ; *Information Storage and Retrieval ; Privacy ; }, abstract = {Cloud computing (CC) is a magnificent service-based delivery with gigantic computer processing power and data storage across connected communications channels. It imparted overwhelming technological impetus in the internet (web) mediated IT industry, where users can easily share private data for further analysis and mining. Furthermore, user affable CC services enable to deploy sundry applications economically. Meanwhile, simple data sharing impelled various phishing attacks and malware assisted security threats. Some privacy sensitive applications like health services on cloud that are built with several economic and operational benefits necessitate enhanced security. Thus, absolute cyberspace security and mitigation against phishing blitz became mandatory to protect overall data privacy. Typically, diverse applications datasets are anonymized with better privacy to owners without providing all secrecy requirements to the newly added records. Some proposed techniques emphasized this issue by re-anonymizing the datasets from the scratch. The utmost privacy protection over incremental datasets on CC is far from being achieved. Certainly, the distribution of huge datasets volume across multiple storage nodes limits the privacy preservation. In this view, we propose a new anonymization technique to attain better privacy protection with high data utility over distributed and incremental datasets on CC. The proficiency of data privacy preservation and improved confidentiality requirements is demonstrated through performance evaluation.}, } @article {pmid30035985, year = {2016}, author = {Manos, D}, title = {Future Brightens for The Cloud. Data loads, new trends in computing add a silver lining.}, journal = {Health data management}, volume = {24}, number = {5}, pages = {36-38}, pmid = {30035985}, issn = {1079-9869}, mesh = {*Cloud Computing ; Computer Security ; *Health Information Systems ; Humans ; Systems Integration ; United States ; }, } @article {pmid29388609, year = {2016}, author = {Modave, F and Bian, J and Rosenberg, E and Mendoza, T and Liang, Z and Bhosale, R and Maeztu, C and Rodriguez, C and Cardel, MI}, title = {DiaFit: The Development of a Smart App for Patients with Type 2 Diabetes and Obesity.}, journal = {JMIR diabetes}, volume = {1}, number = {2}, pages = {}, pmid = {29388609}, issn = {2371-4379}, support = {UL1 TR001427/TR/NCATS NIH HHS/United States ; }, abstract = {BACKGROUND: Optimal management of chronic diseases, such as type 2 diabetes (T2D) and obesity, requires patient-provider communication and proactive self-management from the patient. Mobile apps could be an effective strategy for improving patient-provider communication and provide resources for self-management to patients themselves.

OBJECTIVE: The objective of this paper is to describe the development of a mobile tool for patients with T2D and obesity that utilizes an integrative approach to facilitate patient-centered app development, with patient and physician interfaces. Our implementation strategy focused on the building of a multidisciplinary team to create a user-friendly and evidence-based app, to be used by patients in a home setting or at the point-of-care.

METHODS: We present the iterative design, development, and testing of DiaFit, an app designed to improve the self-management of T2D and obesity, using an adapted Agile approach to software implementation. The production team consisted of experts in mobile health, nutrition sciences, and obesity; software engineers; and clinicians. Additionally, the team included citizen scientists and clinicians who acted as the de facto software clients for DiaFit and therefore interacted with the production team throughout the entire app creation, from design to testing.

RESULTS: DiaFit (version 1.0) is an open-source, inclusive iOS app that incorporates nutrition data, physical activity data, and medication and glucose values, as well as patient-reported outcomes. DiaFit supports the uploading of data from sensory devices via Bluetooth for physical activity (iOS step counts, FitBit, Apple watch) and glucose monitoring (iHealth glucose meter). The app provides summary statistics and graphics for step counts, dietary information, and glucose values that can be used by patients and their providers to make informed health decisions. The DiaFit iOS app was developed in Swift (version 2.2) with a Web back-end deployed on the Health Insurance Portability and Accountability Act compliant-ready Amazon Web Services cloud computing platform. DiaFit is publicly available on GitHub to the diabetes community at large, under the GNU General Public License agreement.

CONCLUSIONS: Given the proliferation of health-related apps available to health consumers, it is essential to ensure that apps are evidence-based and user-oriented, with specific health conditions in mind. To this end, we have used a software development approach focusing on community and clinical engagement to create DiaFit, an app that assists patients with T2D and obesity to better manage their health through active communication with their providers and proactive self-management of their diseases.}, } @article {pmid27348127, year = {2016}, author = {Abdullahi, M and Ngadi, MA}, title = {Hybrid Symbiotic Organisms Search Optimization Algorithm for Scheduling of Tasks on Cloud Computing Environment.}, journal = {PloS one}, volume = {11}, number = {6}, pages = {e0158229}, pmid = {27348127}, issn = {1932-6203}, abstract = {Cloud computing has attracted significant attention from research community because of rapid migration rate of Information Technology services to its domain. Advances in virtualization technology has made cloud computing very popular as a result of easier deployment of application services. Tasks are submitted to cloud datacenters to be processed on pay as you go fashion. Task scheduling is one the significant research challenges in cloud computing environment. The current formulation of task scheduling problems has been shown to be NP-complete, hence finding the exact solution especially for large problem sizes is intractable. The heterogeneous and dynamic feature of cloud resources makes optimum task scheduling non-trivial. Therefore, efficient task scheduling algorithms are required for optimum resource utilization. Symbiotic Organisms Search (SOS) has been shown to perform competitively with Particle Swarm Optimization (PSO). The aim of this study is to optimize task scheduling in cloud computing environment based on a proposed Simulated Annealing (SA) based SOS (SASOS) in order to improve the convergence rate and quality of solution of SOS. The SOS algorithm has a strong global exploration capability and uses fewer parameters. The systematic reasoning ability of SA is employed to find better solutions on local solution regions, hence, adding exploration ability to SOS. Also, a fitness function is proposed which takes into account the utilization level of virtual machines (VMs) which reduced makespan and degree of imbalance among VMs. CloudSim toolkit was used to evaluate the efficiency of the proposed method using both synthetic and standard workload. Results of simulation showed that hybrid SOS performs better than SOS in terms of convergence speed, response time, degree of imbalance, and makespan.}, } @article {pmid27342254, year = {2016}, author = {Habermann, N and Mardin, BR and Yakneen, S and Korbel, JO}, title = {Using large-scale genome variation cohorts to decipher the molecular mechanism of cancer.}, journal = {Comptes rendus biologies}, volume = {339}, number = {7-8}, pages = {308-313}, doi = {10.1016/j.crvi.2016.05.008}, pmid = {27342254}, issn = {1768-3238}, mesh = {Animals ; Antineoplastic Agents/pharmacology/therapeutic use ; Genome, Human ; Genomic Structural Variation/drug effects/*genetics ; Humans ; Molecular Biology ; Neoplasms/drug therapy/*genetics ; }, abstract = {Characterizing genomic structural variations (SVs) in the human genome remains challenging, and there is a growing interest to understand somatic SVs occurring in cancer, a disease of the genome. A havoc-causing SV process known as chromothripsis scars the genome when localized chromosome shattering and repair occur in a one-off catastrophe. Recent efforts led to the development of a set of conceptual criteria for the inference of chromothripsis events in cancer genomes and to the development of experimental model systems for studying this striking DNA alteration process in vitro. We discuss these approaches, and additionally touch upon current "Big Data" efforts that employ hybrid cloud computing to enable studies of numerous cancer genomes in an effort to search for commonalities and differences in molecular DNA alteration processes in cancer.}, } @article {pmid27342137, year = {2016}, author = {Spanakis, EG and Santana, S and Tsiknakis, M and Marias, K and Sakkalis, V and Teixeira, A and Janssen, JH and de Jong, H and Tziraki, C}, title = {Technology-Based Innovations to Foster Personalized Healthy Lifestyles and Well-Being: A Targeted Review.}, journal = {Journal of medical Internet research}, volume = {18}, number = {6}, pages = {e128}, pmid = {27342137}, issn = {1438-8871}, mesh = {Computer Security ; Confidentiality ; *Healthy Lifestyle ; Humans ; *Telemedicine ; }, abstract = {BACKGROUND: New community-based arrangements and novel technologies can empower individuals to be active participants in their health maintenance, enabling people to control and self-regulate their health and wellness and make better health- and lifestyle-related decisions. Mobile sensing technology and health systems responsive to individual profiles combined with cloud computing can expand innovation for new types of interoperable services that are consumer-oriented and community-based. This could fuel a paradigm shift in the way health care can be, or should be, provided and received, while lessening the burden on exhausted health and social care systems.

OBJECTIVE: Our goal is to identify and discuss the main scientific and engineering challenges that need to be successfully addressed in delivering state-of-the-art, ubiquitous eHealth and mHealth services, including citizen-centered wellness management services, and reposition their role and potential within a broader context of diverse sociotechnical drivers, agents, and stakeholders.

METHODS: We review the state-of-the-art relevant to the development and implementation of eHealth and mHealth services in critical domains. We identify and discuss scientific, engineering, and implementation-related challenges that need to be overcome to move research, development, and the market forward.

RESULTS: Several important advances have been identified in the fields of systems for personalized health monitoring, such as smartphone platforms and intelligent ubiquitous services. Sensors embedded in smartphones and clothes are making the unobtrusive recognition of physical activity, behavior, and lifestyle possible, and thus the deployment of platforms for health assistance and citizen empowerment. Similarly, significant advances are observed in the domain of infrastructure supporting services. Still, many technical problems remain to be solved, combined with no less challenging issues related to security, privacy, trust, and organizational dynamics.

CONCLUSIONS: Delivering innovative ubiquitous eHealth and mHealth services, including citizen-centered wellness and lifestyle management services, goes well beyond the development of technical solutions. For the large-scale information and communication technology-supported adoption of healthier lifestyles to take place, crucial innovations are needed in the process of making and deploying usable empowering end-user services that are trusted and user-acceptable. Such innovations require multidomain, multilevel, transdisciplinary work, grounded in theory but driven by citizens' and health care professionals' needs, expectations, and capabilities and matched by business ability to bring innovation to the market.}, } @article {pmid27340682, year = {2016}, author = {Ma, W and Sartipi, K and Sharghigoorabi, H and Koff, D and Bak, P}, title = {OpenID Connect as a security service in cloud-based medical imaging systems.}, journal = {Journal of medical imaging (Bellingham, Wash.)}, volume = {3}, number = {2}, pages = {026501}, pmid = {27340682}, issn = {2329-4302}, abstract = {The evolution of cloud computing is driving the next generation of medical imaging systems. However, privacy and security concerns have been consistently regarded as the major obstacles for adoption of cloud computing by healthcare domains. OpenID Connect, combining OpenID and OAuth together, is an emerging representational state transfer-based federated identity solution. It is one of the most adopted open standards to potentially become the de facto standard for securing cloud computing and mobile applications, which is also regarded as "Kerberos of cloud." We introduce OpenID Connect as an authentication and authorization service in cloud-based diagnostic imaging (DI) systems, and propose enhancements that allow for incorporating this technology within distributed enterprise environments. The objective of this study is to offer solutions for secure sharing of medical images among diagnostic imaging repository (DI-r) and heterogeneous picture archiving and communication systems (PACS) as well as Web-based and mobile clients in the cloud ecosystem. The main objective is to use OpenID Connect open-source single sign-on and authorization service and in a user-centric manner, while deploying DI-r and PACS to private or community clouds should provide equivalent security levels to traditional computing model.}, } @article {pmid27332367, year = {2016}, author = {Moraes, KB and Martins, FZ and de Camargo, MD and Vieira, DF and Magalhães, AM and Silveira, DT}, title = {Nursing Activities Score: Cloud Computerized Structure.}, journal = {Studies in health technology and informatics}, volume = {225}, number = {}, pages = {836-837}, pmid = {27332367}, issn = {1879-8365}, mesh = {Brazil ; *Cloud Computing ; Critical Care Nursing/*statistics & numerical data ; Electronic Health Records/statistics & numerical data ; Nursing Informatics/methods ; Postanesthesia Nursing/*statistics & numerical data ; Practice Patterns, Nurses'/*statistics & numerical data ; *Software ; Workload/*statistics & numerical data ; }, abstract = {This study objective to describe the cloud Nursing Activities Score implementation process in the Intensive Care Unit of the Post-Anesthesia Recovery Room. It is a case study. The tools used were the Google applications with high productivity interconnecting the topic knowledge on behalf of the nursing professionals and information technology professionals. As partial results, it was determined that the average nursing staff workload in the ICU/PARR during the first 24 hours, according to the score on the scale, was 91.75 ± 18.2. Each point of NAS is converted into 14.4 minutes, which is equivalent to an average of 22 working hours. Currently the instrument is implemented in the institution, reinforcing the need to update and raise awareness concerning the need to maintain the new routine.}, } @article {pmid27332366, year = {2016}, author = {de Oliveira Riboldi, C and Macedo, AB and Mergen, T and Dias, VL and da Costa, DG and Malvezzi, ML and Magalhães, AM and Silveira, DT}, title = {Classification of Patient Care Complexity: Cloud Technology.}, journal = {Studies in health technology and informatics}, volume = {225}, number = {}, pages = {834-835}, pmid = {27332366}, issn = {1879-8365}, mesh = {Brazil ; *Cloud Computing ; Decision Support Systems, Clinical/*organization & administration ; Nursing Care/*organization & administration ; Nursing Diagnosis/*organization & administration ; Patient Care Planning/*organization & administration ; Software ; }, abstract = {Presentation of the computerized structure to implement, in a university hospital in the South of Brazil, the Patients Classification System of Perroca, which categorizes patients according to the care complexity. This solution also aims to corroborate a recent study at the hospital, which evidenced that the increasing workload presents a direct relation with the institutional quality indicators. The tools used were the Google applications with high productivity interconnecting the topic knowledge on behalf of the nursing professionals and information technology professionals.}, } @article {pmid27330890, year = {2016}, author = {Al-Shaqi, R and Mourshed, M and Rezgui, Y}, title = {Progress in ambient assisted systems for independent living by the elderly.}, journal = {SpringerPlus}, volume = {5}, number = {}, pages = {624}, pmid = {27330890}, issn = {2193-1801}, abstract = {One of the challenges of the ageing population in many countries is the efficient delivery of health and care services, which is further complicated by the increase in neurological conditions among the elderly due to rising life expectancy. Personal care of the elderly is of concern to their relatives, in case they are alone in their homes and unforeseen circumstances occur, affecting their wellbeing. The alternative; i.e. care in nursing homes or hospitals is costly and increases further if specialized care is mobilized to patients' place of residence. Enabling technologies for independent living by the elderly such as the ambient assisted living systems (AALS) are seen as essential to enhancing care in a cost-effective manner. In light of significant advances in telecommunication, computing and sensor miniaturization, as well as the ubiquity of mobile and connected devices embodying the concept of the Internet of Things (IoT), end-to-end solutions for ambient assisted living have become a reality. The premise of such applications is the continuous and most often real-time monitoring of the environment and occupant behavior using an event-driven intelligent system, thereby providing a facility for monitoring and assessment, and triggering assistance as and when needed. As a growing area of research, it is essential to investigate the approaches for developing AALS in literature to identify current practices and directions for future research. This paper is, therefore, aimed at a comprehensive and critical review of the frameworks and sensor systems used in various ambient assisted living systems, as well as their objectives and relationships with care and clinical systems. Findings from our work suggest that most frameworks focused on activity monitoring for assessing immediate risks, while the opportunities for integrating environmental factors for analytics and decision-making, in particular for the long-term care were often overlooked. The potential for wearable devices and sensors, as well as distributed storage and access (e.g. cloud) are yet to be fully appreciated. There is a distinct lack of strong supporting clinical evidence from the implemented technologies. Socio-cultural aspects such as divergence among groups, acceptability and usability of AALS were also overlooked. Future systems need to look into the issues of privacy and cyber security.}, } @article {pmid27296526, year = {2016}, author = {Rideout, JR and Chase, JH and Bolyen, E and Ackermann, G and González, A and Knight, R and Caporaso, JG}, title = {Keemei: cloud-based validation of tabular bioinformatics file formats in Google Sheets.}, journal = {GigaScience}, volume = {5}, number = {}, pages = {27}, pmid = {27296526}, issn = {2047-217X}, mesh = {Cloud Computing ; Computational Biology/*methods ; Humans ; Information Storage and Retrieval ; Software ; User-Computer Interface ; }, abstract = {BACKGROUND: Bioinformatics software often requires human-generated tabular text files as input and has specific requirements for how those data are formatted. Users frequently manage these data in spreadsheet programs, which is convenient for researchers who are compiling the requisite information because the spreadsheet programs can easily be used on different platforms including laptops and tablets, and because they provide a familiar interface. It is increasingly common for many different researchers to be involved in compiling these data, including study coordinators, clinicians, lab technicians and bioinformaticians. As a result, many research groups are shifting toward using cloud-based spreadsheet programs, such as Google Sheets, which support the concurrent editing of a single spreadsheet by different users working on different platforms. Most of the researchers who enter data are not familiar with the formatting requirements of the bioinformatics programs that will be used, so validating and correcting file formats is often a bottleneck prior to beginning bioinformatics analysis.

MAIN TEXT: We present Keemei, a Google Sheets Add-on, for validating tabular files used in bioinformatics analyses. Keemei is available free of charge from Google's Chrome Web Store. Keemei can be installed and run on any web browser supported by Google Sheets. Keemei currently supports the validation of two widely used tabular bioinformatics formats, the Quantitative Insights into Microbial Ecology (QIIME) sample metadata mapping file format and the Spatially Referenced Genetic Data (SRGD) format, but is designed to easily support the addition of others.

CONCLUSIONS: Keemei will save researchers time and frustration by providing a convenient interface for tabular bioinformatics file format validation. By allowing everyone involved with data entry for a project to easily validate their data, it will reduce the validation and formatting bottlenecks that are commonly encountered when human-generated data files are first used with a bioinformatics system. Simplifying the validation of essential tabular data files, such as sample metadata, will reduce common errors and thereby improve the quality and reliability of research outcomes.}, } @article {pmid27295683, year = {2017}, author = {Ceri, S and Kaitoua, A and Masseroli, M and Pinoli, P and Venco, F}, title = {Data Management for Heterogeneous Genomic Datasets.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {14}, number = {6}, pages = {1251-1264}, doi = {10.1109/TCBB.2016.2576447}, pmid = {27295683}, issn = {1557-9964}, mesh = {Cloud Computing ; *Database Management Systems ; *Databases, Genetic ; *Genomics ; Sequence Analysis, DNA ; }, abstract = {Next Generation Sequencing (NGS), a family of technologies for reading DNA and RNA, is changing biological research, and will soon change medical practice, by quickly providing sequencing data and high-level features of numerous individual genomes in different biological and clinical conditions. The availability of millions of whole genome sequences may soon become the biggest and most important "big data" problem of mankind. In this exciting framework, we recently proposed a new paradigm to raise the level of abstraction in NGS data management, by introducing a GenoMetric Query Language (GMQL) and demonstrating its usefulness through several biological query examples. Leveraging on that effort, here we motivate and formalize GMQL operations, especially focusing on the most characteristic and domain-specific ones. Furthermore, we address their efficient implementation and illustrate the architecture of the new software system that we have developed for their execution on big genomic data in a cloud computing environment, providing the evaluation of its performance. The new system implementation is available for download at the GMQL website (http://www.bioinformatics.deib.polimi.it/GMQL/); GMQL can also be tested through a set of predefined queries on ENCODE and Roadmap Epigenomics data at http://www.bioinformatics.deib.polimi.it/GMQL/queries/.}, } @article {pmid27283472, year = {2016}, author = {Beswick, DM and Holsinger, FC and Kaplan, MJ and Fischbein, NJ and Hara, W and Colevas, AD and Le, QT and Berry, GJ and Hwang, PH}, title = {Design and rationale of a prospective, multi-institutional registry for patients with sinonasal malignancy.}, journal = {The Laryngoscope}, volume = {126}, number = {9}, pages = {1977-1980}, doi = {10.1002/lary.25996}, pmid = {27283472}, issn = {1531-4995}, mesh = {*Cloud Computing ; Humans ; Paranasal Sinus Neoplasms/*therapy ; Prospective Studies ; Quality of Life ; *Registries ; Treatment Outcome ; United States ; }, abstract = {OBJECTIVES/HYPOTHESIS: Assessment of patients with sinonasal malignancy is challenging due to the low disease incidence and diverse histopathology. The current literature is composed mainly of retrospective studies with heterogeneous cohorts, and the rarity of cases limits our understanding of disease characteristics and treatment outcomes. We describe the development of a prospective, multi-institutional registry that utilizes cloud-based computing to evaluate treatment outcomes in patients with sinonasal cancer.

METHODS: A web-based, secure database was built to prospectively capture longitudinal outcomes and quality-of-life (QoL) data in patients diagnosed with sinonasal malignancy. Demographics, tumor staging, and treatment outcomes data are being collected. The Sinonasal Outcome Test-22 and University of Washington Quality of Life Questionnaire are administered at presentation and at recurring intervals. To date, seven institutions are participating nationally.

CONCLUSION: This prospective, multi-institutional registry will provide novel oncological and QoL outcomes on patients with sinonasal malignancy to inform management decisions and disease prognostication. The application of cloud-based computing facilitates secure multi-institutional collaboration and may serve as a model for future registry development for the study of rare diseases in otolaryngology.

LEVEL OF EVIDENCE: 2C Laryngoscope, 126:1977-1980, 2016.}, } @article {pmid27282236, year = {2016}, author = {Mastmeyer, A and Fortmeier, D and Handels, H}, title = {Efficient patient modeling for visuo-haptic VR simulation using a generic patient atlas.}, journal = {Computer methods and programs in biomedicine}, volume = {132}, number = {}, pages = {161-175}, doi = {10.1016/j.cmpb.2016.04.017}, pmid = {27282236}, issn = {1872-7565}, mesh = {Algorithms ; Humans ; *Models, Theoretical ; *User-Computer Interface ; }, abstract = {BACKGROUND AND OBJECTIVE: This work presents a new time-saving virtual patient modeling system by way of example for an existing visuo-haptic training and planning virtual reality (VR) system for percutaneous transhepatic cholangio-drainage (PTCD).

METHODS: Our modeling process is based on a generic patient atlas to start with. It is defined by organ-specific optimized models, method modules and parameters, i.e. mainly individual segmentation masks, transfer functions to fill the gaps between the masks and intensity image data. In this contribution, we show how generic patient atlases can be generalized to new patient data. The methodology consists of patient-specific, locally-adaptive transfer functions and dedicated modeling methods such as multi-atlas segmentation, vessel filtering and spline-modeling.

RESULTS: Our full image volume segmentation algorithm yields median DICE coefficients of 0.98, 0.93, 0.82, 0.74, 0.51 and 0.48 regarding soft-tissue, liver, bone, skin, blood and bile vessels for ten test patients and three selected reference patients. Compared to standard slice-wise manual contouring time saving is remarkable.

CONCLUSIONS: Our segmentation process shows out efficiency and robustness for upper abdominal puncture simulation systems. This marks a significant step toward establishing patient-specific training and hands-on planning systems in a clinical environment.}, } @article {pmid27282229, year = {2016}, author = {Goli-Malekabadi, Z and Sargolzaei-Javan, M and Akbari, MK}, title = {An effective model for store and retrieve big health data in cloud computing.}, journal = {Computer methods and programs in biomedicine}, volume = {132}, number = {}, pages = {75-82}, doi = {10.1016/j.cmpb.2016.04.016}, pmid = {27282229}, issn = {1872-7565}, mesh = {*Cloud Computing ; *Information Storage and Retrieval ; *Models, Theoretical ; }, abstract = {BACKGROUND AND OBJECTIVE: The volume of healthcare data including different and variable text types, sounds, and images is increasing day to day. Therefore, the storage and processing of these data is a necessary and challenging issue. Generally, relational databases are used for storing health data which are not able to handle the massive and diverse nature of them.

METHODS: This study aimed at presenting the model based on NoSQL databases for the storage of healthcare data. Despite different types of NoSQL databases, document-based DBs were selected by a survey on the nature of health data. The presented model was implemented in the Cloud environment for accessing to the distribution properties. Then, the data were distributed on the database by applying the Shard property.

RESULTS: The efficiency of the model was evaluated in comparison with the previous data model, Relational Database, considering query time, data preparation, flexibility, and extensibility parameters. The results showed that the presented model approximately performed the same as SQL Server for "read" query while it acted more efficiently than SQL Server for "write" query. Also, the performance of the presented model was better than SQL Server in the case of flexibility, data preparation and extensibility.

CONCLUSIONS: Based on these observations, the proposed model was more effective than Relational Databases for handling health data.}, } @article {pmid27281411, year = {2016}, author = {Datta, S and Bettinger, K and Snyder, M}, title = {Secure cloud computing for genomic data.}, journal = {Nature biotechnology}, volume = {34}, number = {6}, pages = {588-591}, pmid = {27281411}, issn = {1546-1696}, mesh = {*Cloud Computing ; *Computer Security ; *Confidentiality ; *Databases, Genetic ; Information Dissemination/*methods ; Information Storage and Retrieval/*methods ; }, } @article {pmid27274686, year = {2016}, author = {Palmer, TN}, title = {A personal perspective on modelling the climate system.}, journal = {Proceedings. Mathematical, physical, and engineering sciences}, volume = {472}, number = {2188}, pages = {20150772}, pmid = {27274686}, issn = {1364-5021}, abstract = {Given their increasing relevance for society, I suggest that the climate science community itself does not treat the development of error-free ab initio models of the climate system with sufficient urgency. With increasing levels of difficulty, I discuss a number of proposals for speeding up such development. Firstly, I believe that climate science should make better use of the pool of post-PhD talent in mathematics and physics, for developing next-generation climate models. Secondly, I believe there is more scope for the development of modelling systems which link weather and climate prediction more seamlessly. Finally, here in Europe, I call for a new European Programme on Extreme Computing and Climate to advance our ability to simulate climate extremes, and understand the drivers of such extremes. A key goal for such a programme is the development of a 1 km global climate system model to run on the first exascale supercomputers in the early 2020s.}, } @article {pmid27267963, year = {2016}, author = {Spjuth, O and Bongcam-Rudloff, E and Dahlberg, J and Dahlö, M and Kallio, A and Pireddu, L and Vezzi, F and Korpelainen, E}, title = {Recommendations on e-infrastructures for next-generation sequencing.}, journal = {GigaScience}, volume = {5}, number = {}, pages = {26}, pmid = {27267963}, issn = {2047-217X}, mesh = {Computational Biology/methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Information Storage and Retrieval ; Internet ; Sequence Analysis, DNA/*methods ; Software ; }, abstract = {With ever-increasing amounts of data being produced by next-generation sequencing (NGS) experiments, the requirements placed on supporting e-infrastructures have grown. In this work, we provide recommendations based on the collective experiences from participants in the EU COST Action SeqAhead for the tasks of data preprocessing, upstream processing, data delivery, and downstream analysis, as well as long-term storage and archiving. We cover demands on computational and storage resources, networks, software stacks, automation of analysis, education, and also discuss emerging trends in the field. E-infrastructures for NGS require substantial effort to set up and maintain over time, and with sequencing technologies and best practices for data analysis evolving rapidly it is important to prioritize both processing capacity and e-infrastructure flexibility when making strategic decisions to support the data analysis demands of tomorrow. Due to increasingly demanding technical requirements we recommend that e-infrastructure development and maintenance be handled by a professional service unit, be it internal or external to the organization, and emphasis should be placed on collaboration between researchers and IT professionals.}, } @article {pmid27261155, year = {2016}, author = {O'Reilly-Shah, V and Mackey, S}, title = {Survalytics: An Open-Source Cloud-Integrated Experience Sampling, Survey, and Analytics and Metadata Collection Module for Android Operating System Apps.}, journal = {JMIR mHealth and uHealth}, volume = {4}, number = {2}, pages = {e46}, pmid = {27261155}, issn = {2291-5222}, abstract = {BACKGROUND: We describe here Survalytics, a software module designed to address two broad areas of need. The first area is in the domain of surveys and app analytics: developers of mobile apps in both academic and commercial environments require information about their users, as well as how the apps are being used, to understand who their users are and how to optimally approach app development. The second area of need is in the field of ecological momentary assessment, also referred to as experience sampling: researchers in a wide variety of fields, spanning from the social sciences to psychology to clinical medicine, would like to be able to capture daily or even more frequent data from research subjects while in their natural environment.

OBJECTIVE: Survalytics is an open-source solution for the collection of survey responses as well as arbitrary analytic metadata from users of Android operating system apps.

METHODS: Surveys may be administered in any combination of one-time questions and ongoing questions. The module may be deployed as a stand-alone app for experience sampling purposes or as an add-on to existing apps. The module takes advantage of free-tier NoSQL cloud database management offered by the Amazon Web Services DynamoDB platform to package a secure, flexible, extensible data collection module. DynamoDB is capable of Health Insurance Portability and Accountability Act compliant storage of personal health information.

RESULTS: The provided example app may be used without modification for a basic experience sampling project, and we provide example questions for daily collection of blood glucose data from study subjects.

CONCLUSIONS: The module will help researchers in a wide variety of fields rapidly develop tailor-made Android apps for a variety of data collection purposes.}, } @article {pmid27244718, year = {2017}, author = {Keysers, D and Deselaers, T and Rowley, HA and Wang, LL and Carbune, V}, title = {Multi-Language Online Handwriting Recognition.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {39}, number = {6}, pages = {1180-1194}, doi = {10.1109/TPAMI.2016.2572693}, pmid = {27244718}, issn = {1939-3539}, abstract = {We describe Google's online handwriting recognition system that currently supports 22 scripts and 97 languages. The system's focus is on fast, high-accuracy text entry for mobile, touch-enabled devices. We use a combination of state-of-the-art components and combine them with novel additions in a flexible framework. This architecture allows us to easily transfer improvements between languages and scripts. This made it possible to build recognizers for languages that, to the best of our knowledge, are not handled by any other online handwriting recognition system. The approach also enabled us to use the same architecture both on very powerful machines for recognition in the cloud as well as on mobile devices with more limited computational power by changing some of the settings of the system. In this paper we give a general overview of the system architecture and the novel components, such as unified time- and position-based input interpretation, trainable segmentation, minimum-error rate training for feature combination, and a cascade of pruning strategies. We present experimental results for different setups. The system is currently publicly available in several Google products, for example in Google Translate and as an input method for Android devices.}, } @article {pmid27239551, year = {2016}, author = {Florence, AP and Shanthi, V and Simon, CB}, title = {Energy Conservation Using Dynamic Voltage Frequency Scaling for Computational Cloud.}, journal = {TheScientificWorldJournal}, volume = {2016}, number = {}, pages = {9328070}, pmid = {27239551}, issn = {1537-744X}, abstract = {Cloud computing is a new technology which supports resource sharing on a "Pay as you go" basis around the world. It provides various services such as SaaS, IaaS, and PaaS. Computation is a part of IaaS and the entire computational requests are to be served efficiently with optimal power utilization in the cloud. Recently, various algorithms are developed to reduce power consumption and even Dynamic Voltage and Frequency Scaling (DVFS) scheme is also used in this perspective. In this paper we have devised methodology which analyzes the behavior of the given cloud request and identifies the associated type of algorithm. Once the type of algorithm is identified, using their asymptotic notations, its time complexity is calculated. Using best fit strategy the appropriate host is identified and the incoming job is allocated to the victimized host. Using the measured time complexity the required clock frequency of the host is measured. According to that CPU frequency is scaled up or down using DVFS scheme, enabling energy to be saved up to 55% of total Watts consumption.}, } @article {pmid27222731, year = {2016}, author = {Menychtas, A and Tsanakas, P and Maglogiannis, I}, title = {Automated integration of wireless biosignal collection devices for patient-centred decision-making in point-of-care systems.}, journal = {Healthcare technology letters}, volume = {3}, number = {1}, pages = {34-40}, pmid = {27222731}, issn = {2053-3713}, abstract = {The proper acquisition of biosignals data from various biosensor devices and their remote accessibility are still issues that prevent the wide adoption of point-of-care systems in the routine of monitoring chronic patients. This Letter presents an advanced framework for enabling patient monitoring that utilises a cloud computing infrastructure for data management and analysis. The framework introduces also a local mechanism for uniform biosignals collection from wearables and biosignal sensors, and decision support modules, in order to enable prompt and essential decisions. A prototype smartphone application and the related cloud modules have been implemented for demonstrating the value of the proposed framework. Initial results regarding the performance of the system and the effectiveness in data management and decision-making have been quite encouraging.}, } @article {pmid27216779, year = {2016}, author = {Ribeiro, JV and Bernardi, RC and Rudack, T and Stone, JE and Phillips, JC and Freddolino, PL and Schulten, K}, title = {QwikMD - Integrative Molecular Dynamics Toolkit for Novices and Experts.}, journal = {Scientific reports}, volume = {6}, number = {}, pages = {26536}, pmid = {27216779}, issn = {2045-2322}, support = {P41 GM104601/GM/NIGMS NIH HHS/United States ; R00 GM097033/GM/NIGMS NIH HHS/United States ; }, abstract = {The proper functioning of biomolecules in living cells requires them to assume particular structures and to undergo conformational changes. Both biomolecular structure and motion can be studied using a wide variety of techniques, but none offers the level of detail as do molecular dynamics (MD) simulations. Integrating two widely used modeling programs, namely NAMD and VMD, we have created a robust, user-friendly software, QwikMD, which enables novices and experts alike to address biomedically relevant questions, where often only molecular dynamics simulations can provide answers. Performing both simple and advanced MD simulations interactively, QwikMD automates as many steps as necessary for preparing, carrying out, and analyzing simulations while checking for common errors and enabling reproducibility. QwikMD meets also the needs of experts in the field, increasing the efficiency and quality of their work by carrying out tedious or repetitive tasks while enabling easy control of every step. Whether carrying out simulations within the live view mode on a small laptop or performing complex and large simulations on supercomputers or Cloud computers, QwikMD uses the same steps and user interface. QwikMD is freely available by download on group and personal computers. It is also available on the cloud at Amazon Web Services.}, } @article {pmid27215009, year = {2015}, author = {Long, E and Huang, B and Wang, L and Lin, X and Lin, H}, title = {Construction of databases: advances and significance in clinical research.}, journal = {Eye science}, volume = {30}, number = {4}, pages = {184-189}, pmid = {27215009}, issn = {1000-4432}, mesh = {*Biomedical Research ; *Databases, Factual ; Humans ; Randomized Controlled Trials as Topic ; Workflow ; }, abstract = {Widely used in clinical research, the database is a new type of data management automation technology and the most efficient tool for data management. In this article, we first explain some basic concepts, such as the definition, classification, and establishment of databases. Afterward, the workflow for establishing databases, inputting data, verifying data, and managing databases is presented. Meanwhile, by discussing the application of databases in clinical research, we illuminate the important role of databases in clinical research practice. Lastly, we introduce the reanalysis of randomized controlled trials (RCTs) and cloud computing techniques, showing the most recent advancements of databases in clinical research.}, } @article {pmid27213384, year = {2016}, author = {Liu, J and Zhang, L and Sun, R}, title = {1-RAAP: An Efficient 1-Round Anonymous Authentication Protocol for Wireless Body Area Networks.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {5}, pages = {}, pmid = {27213384}, issn = {1424-8220}, abstract = {Thanks to the rapid technological convergence of wireless communications, medical sensors and cloud computing, Wireless Body Area Networks (WBANs) have emerged as a novel networking paradigm enabling ubiquitous Internet services, allowing people to receive medical care, monitor health status in real-time, analyze sports data and even enjoy online entertainment remotely. However, because of the mobility and openness of wireless communications, WBANs are inevitably exposed to a large set of potential attacks, significantly undermining their utility and impeding their widespread deployment. To prevent attackers from threatening legitimate WBAN users or abusing WBAN services, an efficient and secure authentication protocol termed 1-Round Anonymous Authentication Protocol (1-RAAP) is proposed in this paper. In particular, 1-RAAP preserves anonymity, mutual authentication, non-repudiation and some other desirable security properties, while only requiring users to perform several low cost computational operations. More importantly, 1-RAAP is provably secure thanks to its design basis, which is resistant to the anonymous in the random oracle model. To validate the computational efficiency of 1-RAAP, a set of comprehensive comparative studies between 1-RAAP and other authentication protocols is conducted, and the results clearly show that 1-RAAP achieves the best performance in terms of computational overhead.}, } @article {pmid27209270, year = {2019}, author = {Huang, X and Matricardi, PM}, title = {Allergy and Asthma Care in the Mobile Phone Era.}, journal = {Clinical reviews in allergy & immunology}, volume = {56}, number = {2}, pages = {161-173}, pmid = {27209270}, issn = {1559-0267}, mesh = {Asthma/*diagnosis/*therapy ; *Cell Phone ; Disease Management ; Electronic Data Processing ; Humans ; Hypersensitivity/*diagnosis/*therapy ; Medication Adherence ; Mobile Applications ; Rhinitis, Allergic/diagnosis/therapy ; Self-Management ; *Telemedicine/methods ; }, abstract = {Strategies to improve patients' adherence to treatment are essential to reduce the great health and economic burden of allergic rhinitis and asthma. Mobile phone applications (apps) for a better management of allergic diseases are growing in number, but their usefulness for doctors and patients is still debated. Controlled trials have investigated the feasibility, cost-effectiveness, security, and perspectives of the use of tele-medicine in the self-management of asthma. These studies focused on different tools or devices, such as SMS, telephone calls, automatic voice response system, mobile applications, speech recognition system, or cloud-computing systems. While some trials concluded that m-Health can improve asthma control and the patient's quality of life, others did not show any advantage in relation to usual care. The only controlled study on allergic rhinitis showed an improvement of adherence to treatment among tele-monitored patients compared to those managed with usual care. Most studies have also highlighted a few shortcomings and limitations of tele-medicine, mainly concerning security and cost-efficiency. The use of smartphones and apps for a personalized asthma and allergy care needs to be further evaluated and optimized before conclusions on its usefulness can be drawn.}, } @article {pmid27208530, year = {2016}, author = {Luo, S and Ren, B}, title = {The monitoring and managing application of cloud computing based on Internet of Things.}, journal = {Computer methods and programs in biomedicine}, volume = {130}, number = {}, pages = {154-161}, doi = {10.1016/j.cmpb.2016.03.024}, pmid = {27208530}, issn = {1872-7565}, mesh = {*Cloud Computing ; Computer Simulation ; *Internet ; Medical Informatics ; }, abstract = {Cloud computing and the Internet of Things are the two hot points in the Internet application field. The application of the two new technologies is in hot discussion and research, but quite less on the field of medical monitoring and managing application. Thus, in this paper, we study and analyze the application of cloud computing and the Internet of Things on the medical field. And we manage to make a combination of the two techniques in the medical monitoring and managing field. The model architecture for remote monitoring cloud platform of healthcare information (RMCPHI) was established firstly. Then the RMCPHI architecture was analyzed. Finally an efficient PSOSAA algorithm was proposed for the medical monitoring and managing application of cloud computing. Simulation results showed that our proposed scheme can improve the efficiency about 50%.}, } @article {pmid27195740, year = {2016}, author = {Harriman, KL and Murugesu, M}, title = {An Organolanthanide Building Block Approach to Single-Molecule Magnets.}, journal = {Accounts of chemical research}, volume = {49}, number = {6}, pages = {1158-1167}, doi = {10.1021/acs.accounts.6b00100}, pmid = {27195740}, issn = {1520-4898}, abstract = {Single-molecule magnets (SMMs) are highly sought after for their potential application in high-density information storage, spintronics, and quantum computing. SMMs exhibit slow relaxation of the magnetization of purely molecular origin, thus making them excellent candidates towards the aforementioned applications. In recent years, significant focus has been placed on the rare earth elements due to their large intrinsic magnetic anisotropy arising from the near degeneracy of the 4f orbitals. Traditionally, coordination chemistry has been utilized to fabricate lanthanide-based SMMs; however, heteroatomic donor atoms such as oxygen and nitrogen have limited orbital overlap with the shielded 4f orbitals. Thus, control over the anisotropic axis and induction of f-f interactions are limited, meaning that the performance of these systems can only extend so far. To this end, we have placed considerable attention on the development of novel SMMs whose donor atoms are conjugated hydrocarbons, thereby allowing us to perturb the crystal field of lanthanide ions through the use of an electronic π-cloud. This approach allows for fine tuning of the anisotropic axis of the molecule, allowing this method the potential to elicit SMMs capable of reaching much larger values for the two vital performance measurements of an SMM, the energy barrier to spin reversal (Ueff), and the blocking temperature of the magnetization (TB). In this Account, we describe our efforts to exploit the inherent anisotropy of the late 4f elements; namely, Dy(III) and Er(III), through the use of cyclooctatetraenyl (COT) metallocenes. With respect to the Er(III) derivatives, we have seen record breaking success, reaching blocking temperatures as high as 14 K with frozen solution magnetometry. These results represent the first example of such a high TB being observed for a system with only a single spin center, formally known as a single-ion magnet (SIM). Our continued interrelationship between theoretical and experimental chemistry allows us to shed light on the mechanisms and electronic properties that govern the slow relaxation dynamics inherent to this unique set of SMMs, thus providing insight into the role by which both symmetry and crystal field effects contribute to the magnetic properties. As we look to the future success of such materials in practical devices, we must gain an understanding of how the 4f elements communicate magnetically, a subject upon which there is still limited knowledge. As such, we have described our work on coupling mononuclear metallocenes to generate new dinuclear SMMs. Through a building block approach, we have been able to gain access to new double,- triple- and quadruple-decker complexes that possess remarkable properties; exhibiting TB of 12 K and Ueff above 300 K. Our goal is to develop a fundamental platform from which to study 4f coupling, while maintaining and enhancing the strict axiality of the anisotropy of the 4f ions. This Account will present a successful strategy employed in the production of novel and high-performing SMMs, as well as a clear overview of the lessons learned throughout.}, } @article {pmid27183115, year = {2016}, author = {Hahn, AS and Konwar, KM and Louca, S and Hanson, NW and Hallam, SJ}, title = {The information science of microbial ecology.}, journal = {Current opinion in microbiology}, volume = {31}, number = {}, pages = {209-216}, doi = {10.1016/j.mib.2016.04.014}, pmid = {27183115}, issn = {1879-0364}, mesh = {*Ecological and Environmental Phenomena ; Ecosystem ; Electronic Data Processing/*methods ; High-Throughput Nucleotide Sequencing ; Information Science/*methods ; *Information Services ; Internet ; Microbial Consortia/*genetics ; }, abstract = {A revolution is unfolding in microbial ecology where petabytes of 'multi-omics' data are produced using next generation sequencing and mass spectrometry platforms. This cornucopia of biological information has enormous potential to reveal the hidden metabolic powers of microbial communities in natural and engineered ecosystems. However, to realize this potential, the development of new technologies and interpretative frameworks grounded in ecological design principles are needed to overcome computational and analytical bottlenecks. Here we explore the relationship between microbial ecology and information science in the era of cloud-based computation. We consider microorganisms as individual information processing units implementing a distributed metabolic algorithm and describe developments in ecoinformatics and ubiquitous computing with the potential to eliminate bottlenecks and empower knowledge creation and translation.}, } @article {pmid27155893, year = {2016}, author = {Sajid, A and Abbas, H}, title = {Data Privacy in Cloud-assisted Healthcare Systems: State of the Art and Future Challenges.}, journal = {Journal of medical systems}, volume = {40}, number = {6}, pages = {155}, pmid = {27155893}, issn = {1573-689X}, mesh = {*Cloud Computing ; *Computer Security ; Confidentiality ; Health Information Systems ; Humans ; Software ; User-Computer Interface ; }, abstract = {The widespread deployment and utility of Wireless Body Area Networks (WBAN's) in healthcare systems required new technologies like Internet of Things (IoT) and cloud computing, that are able to deal with the storage and processing limitations of WBAN's. This amalgamation of WBAN-based healthcare systems to cloud-based healthcare systems gave rise to serious privacy concerns to the sensitive healthcare data. Hence, there is a need for the proactive identification and effective mitigation mechanisms for these patient's data privacy concerns that pose continuous threats to the integrity and stability of the healthcare environment. For this purpose, a systematic literature review has been conducted that presents a clear picture of the privacy concerns of patient's data in cloud-assisted healthcare systems and analyzed the mechanisms that are recently proposed by the research community. The methodology used for conducting the review was based on Kitchenham guidelines. Results from the review show that most of the patient's data privacy techniques do not fully address the privacy concerns and therefore require more efforts. The summary presented in this paper would help in setting research directions for the techniques and mechanisms that are needed to address the patient's data privacy concerns in a balanced and light-weight manner by considering all the aspects and limitations of the cloud-assisted healthcare systems.}, } @article {pmid27153614, year = {2016}, author = {Nellore, A and Wilks, C and Hansen, KD and Leek, JT and Langmead, B}, title = {Rail-dbGaP: analyzing dbGaP-protected data in the cloud with Amazon Elastic MapReduce.}, journal = {Bioinformatics (Oxford, England)}, volume = {32}, number = {16}, pages = {2551-2553}, pmid = {27153614}, issn = {1367-4811}, support = {R01 GM118568/GM/NIGMS NIH HHS/United States ; R01 GM105705/GM/NIGMS NIH HHS/United States ; }, mesh = {Algorithms ; *Computational Biology ; *Databases, Genetic ; High-Throughput Nucleotide Sequencing ; Humans ; RNA ; Reproducibility of Results ; *Software ; }, abstract = {MOTIVATION: Public archives contain thousands of trillions of bases of valuable sequencing data. More than 40% of the Sequence Read Archive is human data protected by provisions such as dbGaP. To analyse dbGaP-protected data, researchers must typically work with IT administrators and signing officials to ensure all levels of security are implemented at their institution. This is a major obstacle, impeding reproducibility and reducing the utility of archived data.

RESULTS: We present a protocol and software tool for analyzing protected data in a commercial cloud. The protocol, Rail-dbGaP, is applicable to any tool running on Amazon Web Services Elastic MapReduce. The tool, Rail-RNA v0.2, is a spliced aligner for RNA-seq data, which we demonstrate by running on 9662 samples from the dbGaP-protected GTEx consortium dataset. The Rail-dbGaP protocol makes explicit for the first time the steps an investigator must take to develop Elastic MapReduce pipelines that analyse dbGaP-protected data in a manner compliant with NIH guidelines. Rail-RNA automates implementation of the protocol, making it easy for typical biomedical investigators to study protected RNA-seq data, regardless of their local IT resources or expertise.

Rail-RNA is available from http://rail.bio Technical details on the Rail-dbGaP protocol as well as an implementation walkthrough are available at https://github.com/nellore/rail-dbgap Detailed instructions on running Rail-RNA on dbGaP-protected data using Amazon Web Services are available at http://docs.rail.bio/dbgap/

CONTACTS: : anellore@gmail.com or langmea@cs.jhu.edu

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid27137302, year = {2016}, author = {Yang, H and He, Y and Zhang, J and Ji, Y and Bai, W and Lee, Y}, title = {Performance evaluation of multi-stratum resources optimization with network functions virtualization for cloud-based radio over optical fiber networks.}, journal = {Optics express}, volume = {24}, number = {8}, pages = {8666-8678}, doi = {10.1364/OE.24.008666}, pmid = {27137302}, issn = {1094-4087}, abstract = {Cloud radio access network (C-RAN) has become a promising scenario to accommodate high-performance services with ubiquitous user coverage and real-time cloud computing using cloud BBUs. In our previous work, we implemented cross stratum optimization of optical network and application stratums resources that allows to accommodate the services in optical networks. In view of this, this study extends to consider the multiple dimensional resources optimization of radio, optical and BBU processing in 5G age. We propose a novel multi-stratum resources optimization (MSRO) architecture with network functions virtualization for cloud-based radio over optical fiber networks (C-RoFN) using software defined control. A global evaluation scheme (GES) for MSRO in C-RoFN is introduced based on the proposed architecture. The MSRO can enhance the responsiveness to dynamic end-to-end user demands and globally optimize radio frequency, optical and BBU resources effectively to maximize radio coverage. The efficiency and feasibility of the proposed architecture are experimentally demonstrated on OpenFlow-based enhanced SDN testbed. The performance of GES under heavy traffic load scenario is also quantitatively evaluated based on MSRO architecture in terms of resource occupation rate and path provisioning latency, compared with other provisioning scheme.}, } @article {pmid27136944, year = {2016}, author = {Peisert, S and Barnett, W and Dart, E and Cuff, J and Grossman, RL and Balas, E and Berman, A and Shankar, A and Tierney, B}, title = {The Medical Science DMZ.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {23}, number = {6}, pages = {1199-1201}, doi = {10.1093/jamia/ocw032}, pmid = {27136944}, issn = {1527-974X}, mesh = {*Computer Communication Networks ; *Computer Security/legislation & jurisprudence ; *Computing Methodologies ; Confidentiality/legislation & jurisprudence ; Government Regulation ; Health Insurance Portability and Accountability Act ; Medical Records Systems, Computerized/legislation & jurisprudence ; United States ; }, abstract = {OBJECTIVE: We describe use cases and an institutional reference architecture for maintaining high-capacity, data-intensive network flows (e.g., 10, 40, 100 Gbps+) in a scientific, medical context while still adhering to security and privacy laws and regulations.

MATERIALS AND METHODS: High-end networking, packet filter firewalls, network intrusion detection systems.

RESULTS: We describe a "Medical Science DMZ" concept as an option for secure, high-volume transport of large, sensitive data sets between research institutions over national research networks.

DISCUSSION: The exponentially increasing amounts of "omics" data, the rapid increase of high-quality imaging, and other rapidly growing clinical data sets have resulted in the rise of biomedical research "big data." The storage, analysis, and network resources required to process these data and integrate them into patient diagnoses and treatments have grown to scales that strain the capabilities of academic health centers. Some data are not generated locally and cannot be sustained locally, and shared data repositories such as those provided by the National Library of Medicine, the National Cancer Institute, and international partners such as the European Bioinformatics Institute are rapidly growing. The ability to store and compute using these data must therefore be addressed by a combination of local, national, and industry resources that exchange large data sets. Maintaining data-intensive flows that comply with HIPAA and other regulations presents a new challenge for biomedical research. Recognizing this, we describe a strategy that marries performance and security by borrowing from and redefining the concept of a "Science DMZ"-a framework that is used in physical sciences and engineering research to manage high-capacity data flows.

CONCLUSION: By implementing a Medical Science DMZ architecture, biomedical researchers can leverage the scale provided by high-performance computer and cloud storage facilities and national high-speed research networks while preserving privacy and meeting regulatory requirements.}, } @article {pmid27130330, year = {2016}, author = {Bhuvaneshwar, K and Belouali, A and Singh, V and Johnson, RM and Song, L and Alaoui, A and Harris, MA and Clarke, R and Weiner, LM and Gusev, Y and Madhavan, S}, title = {G-DOC Plus - an integrative bioinformatics platform for precision medicine.}, journal = {BMC bioinformatics}, volume = {17}, number = {1}, pages = {193}, pmid = {27130330}, issn = {1471-2105}, support = {U01 FD004319/FD/FDA HHS/United States ; U54 CA149147/CA/NCI NIH HHS/United States ; UL1 TR001409/TR/NCATS NIH HHS/United States ; UL1 TR000101/TR/NCATS NIH HHS/United States ; HHSN261200800001C/RC/CCR NIH HHS/United States ; HHSN261200800001E/CA/NCI NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; *Databases, Factual ; Humans ; Neoplasms/genetics ; Oligonucleotide Array Sequence Analysis ; Precision Medicine/*methods ; Transcriptome ; }, abstract = {BACKGROUND: G-DOC Plus is a data integration and bioinformatics platform that uses cloud computing and other advanced computational tools to handle a variety of biomedical BIG DATA including gene expression arrays, NGS and medical images so that they can be analyzed in the full context of other omics and clinical information.

RESULTS: G-DOC Plus currently holds data from over 10,000 patients selected from private and public resources including Gene Expression Omnibus (GEO), The Cancer Genome Atlas (TCGA) and the recently added datasets from REpository for Molecular BRAin Neoplasia DaTa (REMBRANDT), caArray studies of lung and colon cancer, ImmPort and the 1000 genomes data sets. The system allows researchers to explore clinical-omic data one sample at a time, as a cohort of samples; or at the level of population, providing the user with a comprehensive view of the data. G-DOC Plus tools have been leveraged in cancer and non-cancer studies for hypothesis generation and validation; biomarker discovery and multi-omics analysis, to explore somatic mutations and cancer MRI images; as well as for training and graduate education in bioinformatics, data and computational sciences. Several of these use cases are described in this paper to demonstrate its multifaceted usability.

CONCLUSION: G-DOC Plus can be used to support a variety of user groups in multiple domains to enable hypothesis generation for precision medicine research. The long-term vision of G-DOC Plus is to extend this translational bioinformatics platform to stay current with emerging omics technologies and analysis methods to continue supporting novel hypothesis generation, analysis and validation for integrative biomedical research. By integrating several aspects of the disease and exposing various data elements, such as outpatient lab workup, pathology, radiology, current treatments, molecular signatures and expected outcomes over a web interface, G-DOC Plus will continue to strengthen precision medicine research. G-DOC Plus is available at: https://gdoc.georgetown.edu .}, } @article {pmid27127335, year = {2016}, author = {Bao, S and Damon, SM and Landman, BA and Gokhale, A}, title = {Performance Management of High Performance Computing for Medical Image Processing in Amazon Web Services.}, journal = {Proceedings of SPIE--the International Society for Optical Engineering}, volume = {9789}, number = {}, pages = {}, pmid = {27127335}, issn = {0277-786X}, support = {R01 EB015611/EB/NIBIB NIH HHS/United States ; R03 EB012461/EB/NIBIB NIH HHS/United States ; UL1 RR024975/RR/NCRR NIH HHS/United States ; UL1 TR000445/TR/NCATS NIH HHS/United States ; }, abstract = {Adopting high performance cloud computing for medical image processing is a popular trend given the pressing needs of large studies. Amazon Web Services (AWS) provide reliable, on-demand, and inexpensive cloud computing services. Our research objective is to implement an affordable, scalable and easy-to-use AWS framework for the Java Image Science Toolkit (JIST). JIST is a plugin for Medical-Image Processing, Analysis, and Visualization (MIPAV) that provides a graphical pipeline implementation allowing users to quickly test and develop pipelines. JIST is DRMAA-compliant allowing it to run on portable batch system grids. However, as new processing methods are implemented and developed, memory may often be a bottleneck for not only lab computers, but also possibly some local grids. Integrating JIST with the AWS cloud alleviates these possible restrictions and does not require users to have deep knowledge of programming in Java. Workflow definition/management and cloud configurations are two key challenges in this research. Using a simple unified control panel, users have the ability to set the numbers of nodes and select from a variety of pre-configured AWS EC2 nodes with different numbers of processors and memory storage. Intuitively, we configured Amazon S3 storage to be mounted by pay-for-use Amazon EC2 instances. Hence, S3 storage is recognized as a shared cloud resource. The Amazon EC2 instances provide pre-installs of all necessary packages to run JIST. This work presents an implementation that facilitates the integration of JIST with AWS. We describe the theoretical cost/benefit formulae to decide between local serial execution versus cloud computing and apply this analysis to an empirical diffusion tensor imaging pipeline.}, } @article {pmid27109933, year = {2016}, author = {Yasnoff, WA}, title = {A secure and efficiently searchable health information architecture.}, journal = {Journal of biomedical informatics}, volume = {61}, number = {}, pages = {237-246}, doi = {10.1016/j.jbi.2016.04.004}, pmid = {27109933}, issn = {1532-0480}, mesh = {*Cloud Computing ; *Computer Security ; Databases, Factual ; Humans ; Internet ; *Medical Records Systems, Computerized ; }, abstract = {Patient-centric repositories of health records are an important component of health information infrastructure. However, patient information in a single repository is potentially vulnerable to loss of the entire dataset from a single unauthorized intrusion. A new health record storage architecture, the personal grid, eliminates this risk by separately storing and encrypting each person's record. The tradeoff for this improved security is that a personal grid repository must be sequentially searched since each record must be individually accessed and decrypted. To allow reasonable search times for large numbers of records, parallel processing with hundreds (or even thousands) of on-demand virtual servers (now available in cloud computing environments) is used. Estimated search times for a 10 million record personal grid using 500 servers vary from 7 to 33min depending on the complexity of the query. Since extremely rapid searching is not a critical requirement of health information infrastructure, the personal grid may provide a practical and useful alternative architecture that eliminates the large-scale security vulnerabilities of traditional databases by sacrificing unnecessary searching speed.}, } @article {pmid27102885, year = {2017}, author = {Bitsaki, M and Koutras, C and Koutras, G and Leymann, F and Steimle, F and Wagner, S and Wieland, M}, title = {ChronicOnline: Implementing a mHealth solution for monitoring and early alerting in chronic obstructive pulmonary disease.}, journal = {Health informatics journal}, volume = {23}, number = {3}, pages = {197-207}, doi = {10.1177/1460458216641480}, pmid = {27102885}, issn = {1741-2811}, mesh = {Humans ; Information Dissemination/methods ; Mobile Applications/trends ; Monitoring, Physiologic/*instrumentation/*methods ; Pulmonary Disease, Chronic Obstructive/complications/*diagnosis/therapy ; Telemedicine/*methods/standards ; }, abstract = {Lack of time or economic difficulties prevent chronic obstructive pulmonary disease patients from communicating regularly with their physicians, thus inducing exacerbation of their chronic condition and possible hospitalization. Enhancing Chronic patients' Health Online proposes a new, sustainable and innovative business model that provides at low cost and at significant savings to the national health system, a preventive health service for chronic obstructive pulmonary disease patients, by combining human medical expertise with state-of-the-art online service delivery based on cloud computing, service-oriented architecture, data analytics, and mobile applications. In this article, we implement the frontend applications of the Enhancing Chronic patients' Health Online system and describe their functionality and the interfaces available to the users.}, } @article {pmid27097993, year = {2016}, author = {Satoh, A}, title = {[Development of a System to Use Patient's Information Which is Required at the Radiological Department].}, journal = {Nihon Hoshasen Gijutsu Gakkai zasshi}, volume = {72}, number = {4}, pages = {319-325}, doi = {10.6009/jjrt.2016_JSRT_72.4.319}, pmid = {27097993}, issn = {0369-4305}, mesh = {Humans ; *Medical Records Systems, Computerized ; *Radiology Department, Hospital ; *Radiology Information Systems ; Software ; }, abstract = {The purpose of this study is to develop a new system to get and share some data of a patient which are required for a radiological examination not using an electronic medical chart or a radiological information system (RIS), and also to demonstrate that this system is operated on cloud technology. I used Java Enterprise Edition (Java EE) as a programing language and MySQL as a server software, and I used two laptops as hardware for client computer and server computer. For cloud computing, I hired a server of Google App Engine for Java (GAE). As a result, I could get some data of the patient required at his/her examination instantly using this system. This system also helps to improve the efficiency of examination. For example, it has been useful when I want to decide radiographic condition or to create CT images such as multi-planar reconstruction (MPR) or volume rendering (VR). When it comes to cloud computing, the GAE was used experimentally due to some legal restrictions. From the above points it is clear that this system has played an important role in radiological examinations, but there has been still few things which I have to resolve for cloud computing.}, } @article {pmid27094989, year = {2016}, author = {Sharif, B and Lundin, RM and Morgan, P and Hall, JE and Dhadda, A and Mann, C and Donoghue, D and Brownlow, E and Hill, F and Carr, G and Turley, H and Hassall, J and Atkinson, M and Jones, M and Martin, R and Rollason, S and Ibrahim, Y and Kopczynska, M and Szakmany, T and , }, title = {Developing a digital data collection platform to measure the prevalence of sepsis in Wales.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {23}, number = {6}, pages = {1185-1189}, doi = {10.1093/jamia/ocv208}, pmid = {27094989}, issn = {1527-974X}, mesh = {Data Collection/*methods ; Education, Medical ; Humans ; *Mobile Applications ; Prevalence ; Sepsis/*epidemiology ; Students, Medical ; Wales/epidemiology ; }, abstract = {OBJECTIVE: To develop a secure, efficient, and easy-to-use data collection platform to measure the prevalence of sepsis in Wales over 24 hours.

MATERIALS AND METHODS: Open Data Kit was used on Android devices with Google App Engine and a digital data collection form.

RESULTS: A total of 184 students participated in the study using 59 devices across 16 hospitals, 1198 datasets were submitted, and 97% of participants found the Open Data Kit form easy to use.

DISCUSSION: We successfully demonstrated that by combining a reliable Android device, a free open-source data collection framework, a scalable cloud-based server, and a team of 184 medical students, we can deliver a low-cost, highly reliable platform that requires little training or maintenance, providing results immediately on completion of data collection.

CONCLUSION: Our platform allowed us to measure, for the first time, the prevalence of sepsis in Wales over 24 hours.}, } @article {pmid27075578, year = {2016}, author = {Fukunishi, Y and Mashimo, T and Misoo, K and Wakabayashi, Y and Miyaki, T and Ohta, S and Nakamura, M and Ikeda, K}, title = {Miscellaneous Topics in Computer-Aided Drug Design: Synthetic Accessibility and GPU Computing, and Other Topics.}, journal = {Current pharmaceutical design}, volume = {22}, number = {23}, pages = {3555-3568}, pmid = {27075578}, issn = {1873-4286}, mesh = {Computer Graphics ; Computer Simulation ; *Computer-Aided Design ; Computers ; Databases, Factual ; *Drug Design ; Software ; }, abstract = {Computer-aided drug design is still a state-of-the-art process in medicinal chemistry, and the main topics in this field have been extensively studied and well reviewed. These topics include compound databases, ligand-binding pocket prediction, protein-compound docking, virtual screening, target/off-target prediction, physical property prediction, molecular simulation and pharmacokinetics/pharmacodynamics (PK/PD) prediction. Message and Conclusion: However, there are also a number of secondary or miscellaneous topics that have been less well covered. For example, methods for synthesizing and predicting the synthetic accessibility (SA) of designed compounds are important in practical drug development, and hardware/software resources for performing the computations in computer-aided drug design are crucial. Cloud computing and general purpose graphics processing unit (GPGPU) computing have been used in virtual screening and molecular dynamics simulations. Not surprisingly, there is a growing demand for computer systems which combine these resources. In the present review, we summarize and discuss these various topics of drug design.}, } @article {pmid27071896, year = {2016}, author = {Hucíková, A and Babic, A}, title = {Cloud Computing in Healthcare: A Space of Opportunities and Challenges.}, journal = {Studies in health technology and informatics}, volume = {221}, number = {}, pages = {122}, pmid = {27071896}, issn = {1879-8365}, mesh = {*Cloud Computing ; Electronic Health Records/*organization & administration ; Information Dissemination/*methods ; Internet/*organization & administration ; *Models, Organizational ; Systems Integration ; Telecommunications/*organization & administration ; }, } @article {pmid27059737, year = {2016}, author = {Chen, SW and Chiang, DL and Liu, CH and Chen, TS and Lai, F and Wang, H and Wei, W}, title = {Confidentiality Protection of Digital Health Records in Cloud Computing.}, journal = {Journal of medical systems}, volume = {40}, number = {5}, pages = {124}, pmid = {27059737}, issn = {1573-689X}, mesh = {*Cloud Computing ; *Computer Security ; *Confidentiality ; Electronic Health Records/*organization & administration ; Humans ; }, abstract = {Electronic medical records containing confidential information were uploaded to the cloud. The cloud allows medical crews to access and manage the data and integration of medical records easily. This data system provides relevant information to medical personnel and facilitates and improve electronic medical record management and data transmission. A structure of cloud-based and patient-centered personal health record (PHR) is proposed in this study. This technique helps patients to manage their health information, such as appointment date with doctor, health reports, and a completed understanding of their own health conditions. It will create patients a positive attitudes to maintain the health. The patients make decision on their own for those whom has access to their records over a specific span of time specified by the patients. Storing data in the cloud environment can reduce costs and enhance the share of information, but the potential threat of information security should be taken into consideration. This study is proposing the cloud-based secure transmission mechanism is suitable for multiple users (like nurse aides, patients, and family members).}, } @article {pmid27020957, year = {2016}, author = {Duvick, J and Standage, DS and Merchant, N and Brendel, VP}, title = {xGDBvm: A Web GUI-Driven Workflow for Annotating Eukaryotic Genomes in the Cloud.}, journal = {The Plant cell}, volume = {28}, number = {4}, pages = {840-854}, pmid = {27020957}, issn = {1532-298X}, mesh = {Computational Biology ; Genome, Plant/genetics ; *Software ; Workflow ; }, abstract = {Genome-wide annotation of gene structure requires the integration of numerous computational steps. Currently, annotation is arguably best accomplished through collaboration of bioinformatics and domain experts, with broad community involvement. However, such a collaborative approach is not scalable at today's pace of sequence generation. To address this problem, we developed the xGDBvm software, which uses an intuitive graphical user interface to access a number of common genome analysis and gene structure tools, preconfigured in a self-contained virtual machine image. Once their virtual machine instance is deployed through iPlant's Atmosphere cloud services, users access the xGDBvm workflow via a unified Web interface to manage inputs, set program parameters, configure links to high-performance computing (HPC) resources, view and manage output, apply analysis and editing tools, or access contextual help. The xGDBvm workflow will mask the genome, compute spliced alignments from transcript and/or protein inputs (locally or on a remote HPC cluster), predict gene structures and gene structure quality, and display output in a public or private genome browser complete with accessory tools. Problematic gene predictions are flagged and can be reannotated using the integrated yrGATE annotation tool. xGDBvm can also be configured to append or replace existing data or load precomputed data. Multiple genomes can be annotated and displayed, and outputs can be archived for sharing or backup. xGDBvm can be adapted to a variety of use cases including de novo genome annotation, reannotation, comparison of different annotations, and training or teaching.}, } @article {pmid27000778, year = {2016}, author = {Li, CT and Lee, CC and Weng, CY}, title = {A Secure Cloud-Assisted Wireless Body Area Network in Mobile Emergency Medical Care System.}, journal = {Journal of medical systems}, volume = {40}, number = {5}, pages = {117}, pmid = {27000778}, issn = {1573-689X}, mesh = {Cell Phone ; *Cloud Computing ; *Computer Communication Networks ; *Computer Security ; Confidentiality ; Emergency Medical Services/*methods ; Humans ; Monitoring, Ambulatory ; Telemetry/*methods ; }, abstract = {Recent advances in medical treatment and emergency applications, the need of integrating wireless body area network (WBAN) with cloud computing can be motivated by providing useful and real time information about patients' health state to the doctors and emergency staffs. WBAN is a set of body sensors carried by the patient to collect and transmit numerous health items to medical clouds via wireless and public communication channels. Therefore, a cloud-assisted WBAN facilitates response in case of emergency which can save patients' lives. Since the patient's data is sensitive and private, it is important to provide strong security and protection on the patient's medical data over public and insecure communication channels. In this paper, we address the challenge of participant authentication in mobile emergency medical care systems for patients supervision and propose a secure cloud-assisted architecture for accessing and monitoring health items collected by WBAN. For ensuring a high level of security and providing a mutual authentication property, chaotic maps based authentication and key agreement mechanisms are designed according to the concept of Diffie-Hellman key exchange, which depends on the CMBDLP and CMBDHP problems. Security and performance analyses show how the proposed system guaranteed the patient privacy and the system confidentiality of sensitive medical data while preserving the low computation property in medical treatment and remote medical monitoring.}, } @article {pmid26989153, year = {2016}, author = {Simonyan, V and Chumakov, K and Dingerdissen, H and Faison, W and Goldweber, S and Golikov, A and Gulzar, N and Karagiannis, K and Vinh Nguyen Lam, P and Maudru, T and Muravitskaja, O and Osipova, E and Pan, Y and Pschenichnov, A and Rostovtsev, A and Santana-Quintero, L and Smith, K and Thompson, EE and Tkachenko, V and Torcivia-Rodriguez, J and Voskanian, A and Wan, Q and Wang, J and Wu, TJ and Wilson, C and Mazumder, R}, title = {High-performance integrated virtual environment (HIVE): a robust infrastructure for next-generation sequence data analysis.}, journal = {Database : the journal of biological databases and curation}, volume = {2016}, number = {}, pages = {}, pmid = {26989153}, issn = {1758-0463}, mesh = {Computational Biology ; High-Throughput Nucleotide Sequencing/*methods ; Mutation/genetics ; Poliovirus/genetics ; Poliovirus Vaccines/immunology ; Proteomics ; Recombination, Genetic ; Sequence Alignment ; Statistics as Topic ; *User-Computer Interface ; }, abstract = {The High-performance Integrated Virtual Environment (HIVE) is a distributed storage and compute environment designed primarily to handle next-generation sequencing (NGS) data. This multicomponent cloud infrastructure provides secure web access for authorized users to deposit, retrieve, annotate and compute on NGS data, and to analyse the outcomes using web interface visual environments appropriately built in collaboration with research and regulatory scientists and other end users. Unlike many massively parallel computing environments, HIVE uses a cloud control server which virtualizes services, not processes. It is both very robust and flexible due to the abstraction layer introduced between computational requests and operating system processes. The novel paradigm of moving computations to the data, instead of moving data to computational nodes, has proven to be significantly less taxing for both hardware and network infrastructure.The honeycomb data model developed for HIVE integrates metadata into an object-oriented model. Its distinction from other object-oriented databases is in the additional implementation of a unified application program interface to search, view and manipulate data of all types. This model simplifies the introduction of new data types, thereby minimizing the need for database restructuring and streamlining the development of new integrated information systems. The honeycomb model employs a highly secure hierarchical access control and permission system, allowing determination of data access privileges in a finely granular manner without flooding the security subsystem with a multiplicity of rules. HIVE infrastructure will allow engineers and scientists to perform NGS analysis in a manner that is both efficient and secure. HIVE is actively supported in public and private domains, and project collaborations are welcomed. Database URL: https://hive.biochemistry.gwu.edu.}, } @article {pmid26981584, year = {2016}, author = {Muthurajan, V and Narayanasamy, B}, title = {An Elliptic Curve Based Schnorr Cloud Security Model in Distributed Environment.}, journal = {TheScientificWorldJournal}, volume = {2016}, number = {}, pages = {4913015}, pmid = {26981584}, issn = {1537-744X}, abstract = {Cloud computing requires the security upgrade in data transmission approaches. In general, key-based encryption/decryption (symmetric and asymmetric) mechanisms ensure the secure data transfer between the devices. The symmetric key mechanisms (pseudorandom function) provide minimum protection level compared to asymmetric key (RSA, AES, and ECC) schemes. The presence of expired content and the irrelevant resources cause unauthorized data access adversely. This paper investigates how the integrity and secure data transfer are improved based on the Elliptic Curve based Schnorr scheme. This paper proposes a virtual machine based cloud model with Hybrid Cloud Security Algorithm (HCSA) to remove the expired content. The HCSA-based auditing improves the malicious activity prediction during the data transfer. The duplication in the cloud server degrades the performance of EC-Schnorr based encryption schemes. This paper utilizes the blooming filter concept to avoid the cloud server duplication. The combination of EC-Schnorr and blooming filter efficiently improves the security performance. The comparative analysis between proposed HCSA and the existing Distributed Hash Table (DHT) regarding execution time, computational overhead, and auditing time with auditing requests and servers confirms the effectiveness of HCSA in the cloud security model creation.}, } @article {pmid26968893, year = {2016}, author = {de la Garza, L and Veit, J and Szolek, A and Röttig, M and Aiche, S and Gesing, S and Reinert, K and Kohlbacher, O}, title = {From the desktop to the grid: scalable bioinformatics via workflow conversion.}, journal = {BMC bioinformatics}, volume = {17}, number = {}, pages = {127}, pmid = {26968893}, issn = {1471-2105}, mesh = {Computational Biology/*methods ; *Computer Communication Networks ; *Microcomputers ; Reproducibility of Results ; *Software ; *Workflow ; }, abstract = {BACKGROUND: Reproducibility is one of the tenets of the scientific method. Scientific experiments often comprise complex data flows, selection of adequate parameters, and analysis and visualization of intermediate and end results. Breaking down the complexity of such experiments into the joint collaboration of small, repeatable, well defined tasks, each with well defined inputs, parameters, and outputs, offers the immediate benefit of identifying bottlenecks, pinpoint sections which could benefit from parallelization, among others. Workflows rest upon the notion of splitting complex work into the joint effort of several manageable tasks. There are several engines that give users the ability to design and execute workflows. Each engine was created to address certain problems of a specific community, therefore each one has its advantages and shortcomings. Furthermore, not all features of all workflow engines are royalty-free -an aspect that could potentially drive away members of the scientific community.

RESULTS: We have developed a set of tools that enables the scientific community to benefit from workflow interoperability. We developed a platform-free structured representation of parameters, inputs, outputs of command-line tools in so-called Common Tool Descriptor documents. We have also overcome the shortcomings and combined the features of two royalty-free workflow engines with a substantial user community: the Konstanz Information Miner, an engine which we see as a formidable workflow editor, and the Grid and User Support Environment, a web-based framework able to interact with several high-performance computing resources. We have thus created a free and highly accessible way to design workflows on a desktop computer and execute them on high-performance computing resources.

CONCLUSIONS: Our work will not only reduce time spent on designing scientific workflows, but also make executing workflows on remote high-performance computing resources more accessible to technically inexperienced users. We strongly believe that our efforts not only decrease the turnaround time to obtain scientific results but also have a positive impact on reproducibility, thus elevating the quality of obtained scientific results.}, } @article {pmid26960378, year = {2016}, author = {Zhang, Y and Guo, SL and Han, LN and Li, TL}, title = {Application and Exploration of Big Data Mining in Clinical Medicine.}, journal = {Chinese medical journal}, volume = {129}, number = {6}, pages = {731-738}, pmid = {26960378}, issn = {2542-5641}, mesh = {Bayes Theorem ; *Clinical Medicine ; *Data Mining ; Decision Support Systems, Clinical ; Decision Trees ; Evidence-Based Medicine ; Fuzzy Logic ; Humans ; Neural Networks, Computer ; Pattern Recognition, Automated ; }, abstract = {OBJECTIVE: To review theories and technologies of big data mining and their application in clinical medicine.

DATA SOURCES: Literatures published in English or Chinese regarding theories and technologies of big data mining and the concrete applications of data mining technology in clinical medicine were obtained from PubMed and Chinese Hospital Knowledge Database from 1975 to 2015.

STUDY SELECTION: Original articles regarding big data mining theory/technology and big data mining's application in the medical field were selected.

RESULTS: This review characterized the basic theories and technologies of big data mining including fuzzy theory, rough set theory, cloud theory, Dempster-Shafer theory, artificial neural network, genetic algorithm, inductive learning theory, Bayesian network, decision tree, pattern recognition, high-performance computing, and statistical analysis. The application of big data mining in clinical medicine was analyzed in the fields of disease risk assessment, clinical decision support, prediction of disease development, guidance of rational use of drugs, medical management, and evidence-based medicine.

CONCLUSION: Big data mining has the potential to play an important role in clinical medicine.}, } @article {pmid26958881, year = {2016}, author = {Walker, MA and Madduri, R and Rodriguez, A and Greenstein, JL and Winslow, RL}, title = {Models and Simulations as a Service: Exploring the Use of Galaxy for Delivering Computational Models.}, journal = {Biophysical journal}, volume = {110}, number = {5}, pages = {1038-1043}, pmid = {26958881}, issn = {1542-0086}, support = {R24 HL085343/HL/NHLBI NIH HHS/United States ; R24HL085343/HL/NHLBI NIH HHS/United States ; }, mesh = {Animals ; Axons/physiology ; Calcium/metabolism ; Calcium Signaling ; *Computer Simulation ; Decapodiformes ; *Internet ; *Software ; }, abstract = {We describe the ways in which Galaxy, a web-based reproducible research platform, can be used for web-based sharing of complex computational models. Galaxy allows users to seamlessly customize and run simulations on cloud computing resources, a concept we refer to as Models and Simulations as a Service (MaSS). To illustrate this application of Galaxy, we have developed a tool suite for simulating a high spatial-resolution model of the cardiac Ca(2+) spark that requires supercomputing resources for execution. We also present tools for simulating models encoded in the SBML and CellML model description languages, thus demonstrating how Galaxy's reproducible research features can be leveraged by existing technologies. Finally, we demonstrate how the Galaxy workflow editor can be used to compose integrative models from constituent submodules. This work represents an important novel approach, to our knowledge, to making computational simulations more accessible to the broader scientific community.}, } @article {pmid26955656, year = {2016}, author = {Devi, DC and Uthariaraj, VR}, title = {Load Balancing in Cloud Computing Environment Using Improved Weighted Round Robin Algorithm for Nonpreemptive Dependent Tasks.}, journal = {TheScientificWorldJournal}, volume = {2016}, number = {}, pages = {3896065}, pmid = {26955656}, issn = {1537-744X}, abstract = {Cloud computing uses the concepts of scheduling and load balancing to migrate tasks to underutilized VMs for effectively sharing the resources. The scheduling of the nonpreemptive tasks in the cloud computing environment is an irrecoverable restraint and hence it has to be assigned to the most appropriate VMs at the initial placement itself. Practically, the arrived jobs consist of multiple interdependent tasks and they may execute the independent tasks in multiple VMs or in the same VM's multiple cores. Also, the jobs arrive during the run time of the server in varying random intervals under various load conditions. The participating heterogeneous resources are managed by allocating the tasks to appropriate resources by static or dynamic scheduling to make the cloud computing more efficient and thus it improves the user satisfaction. Objective of this work is to introduce and evaluate the proposed scheduling and load balancing algorithm by considering the capabilities of each virtual machine (VM), the task length of each requested job, and the interdependency of multiple tasks. Performance of the proposed algorithm is studied by comparing with the existing methods.}, } @article {pmid26955035, year = {2016}, author = {Muelder, C and Zhu, B and Chen, W and Zhang, H and Ma, KL}, title = {Visual Analysis of Cloud Computing Performance Using Behavioral Lines.}, journal = {IEEE transactions on visualization and computer graphics}, volume = {22}, number = {6}, pages = {1694-1704}, doi = {10.1109/TVCG.2016.2534558}, pmid = {26955035}, issn = {1941-0506}, abstract = {Cloud computing is an essential technology to Big Data analytics and services. A cloud computing system is often comprised of a large number of parallel computing and storage devices. Monitoring the usage and performance of such a system is important for efficient operations, maintenance, and security. Tracing every application on a large cloud system is untenable due to scale and privacy issues. But profile data can be collected relatively efficiently by regularly sampling the state of the system, including properties such as CPU load, memory usage, network usage, and others, creating a set of multivariate time series for each system. Adequate tools for studying such large-scale, multidimensional data are lacking. In this paper, we present a visual based analysis approach to understanding and analyzing the performance and behavior of cloud computing systems. Our design is based on similarity measures and a layout method to portray the behavior of each compute node over time. When visualizing a large number of behavioral lines together, distinct patterns often appear suggesting particular types of performance bottleneck. The resulting system provides multiple linked views, which allow the user to interactively explore the data by examining the data or a selected subset at different levels of detail. Our case studies, which use datasets collected from two different cloud systems, show that this visual based approach is effective in identifying trends and anomalies of the systems.}, } @article {pmid26954507, year = {2016}, author = {Oh, J and Choi, CH and Park, MK and Kim, BK and Hwang, K and Lee, SH and Hong, SG and Nasir, A and Cho, WS and Kim, KM}, title = {CLUSTOM-CLOUD: In-Memory Data Grid-Based Software for Clustering 16S rRNA Sequence Data in the Cloud Environment.}, journal = {PloS one}, volume = {11}, number = {3}, pages = {e0151064}, pmid = {26954507}, issn = {1932-6203}, mesh = {*Cluster Analysis ; Computational Biology/methods ; *Environmental Microbiology ; Humans ; RNA, Ribosomal, 16S/*genetics ; Reproducibility of Results ; *Software ; Workflow ; }, abstract = {High-throughput sequencing can produce hundreds of thousands of 16S rRNA sequence reads corresponding to different organisms present in the environmental samples. Typically, analysis of microbial diversity in bioinformatics starts from pre-processing followed by clustering 16S rRNA reads into relatively fewer operational taxonomic units (OTUs). The OTUs are reliable indicators of microbial diversity and greatly accelerate the downstream analysis time. However, existing hierarchical clustering algorithms that are generally more accurate than greedy heuristic algorithms struggle with large sequence datasets. To keep pace with the rapid rise in sequencing data, we present CLUSTOM-CLOUD, which is the first distributed sequence clustering program based on In-Memory Data Grid (IMDG) technology-a distributed data structure to store all data in the main memory of multiple computing nodes. The IMDG technology helps CLUSTOM-CLOUD to enhance both its capability of handling larger datasets and its computational scalability better than its ancestor, CLUSTOM, while maintaining high accuracy. Clustering speed of CLUSTOM-CLOUD was evaluated on published 16S rRNA human microbiome sequence datasets using the small laboratory cluster (10 nodes) and under the Amazon EC2 cloud-computing environments. Under the laboratory environment, it required only ~3 hours to process dataset of size 200 K reads regardless of the complexity of the human microbiome data. In turn, one million reads were processed in approximately 20, 14, and 11 hours when utilizing 20, 30, and 40 nodes on the Amazon EC2 cloud-computing environment. The running time evaluation indicates that CLUSTOM-CLOUD can handle much larger sequence datasets than CLUSTOM and is also a scalable distributed processing system. The comparative accuracy test using 16S rRNA pyrosequences of a mock community shows that CLUSTOM-CLOUD achieves higher accuracy than DOTUR, mothur, ESPRIT-Tree, UCLUST and Swarm. CLUSTOM-CLOUD is written in JAVA and is freely available at http://clustomcloud.kopri.re.kr.}, } @article {pmid26949976, year = {2016}, author = {Doerr, S and Harvey, MJ and Noé, F and De Fabritiis, G}, title = {HTMD: High-Throughput Molecular Dynamics for Molecular Discovery.}, journal = {Journal of chemical theory and computation}, volume = {12}, number = {4}, pages = {1845-1852}, doi = {10.1021/acs.jctc.6b00049}, pmid = {26949976}, issn = {1549-9626}, abstract = {Recent advances in molecular simulations have allowed scientists to investigate slower biological processes than ever before. Together with these advances came an explosion of data that has transformed a traditionally computing-bound into a data-bound problem. Here, we present HTMD, a programmable, extensible platform written in Python that aims to solve the data generation and analysis problem as well as increase reproducibility by providing a complete workspace for simulation-based discovery. So far, HTMD includes system building for CHARMM and AMBER force fields, projection methods, clustering, molecular simulation production, adaptive sampling, an Amazon cloud interface, Markov state models, and visualization. As a result, a single, short HTMD script can lead from a PDB structure to useful quantities such as relaxation time scales, equilibrium populations, metastable conformations, and kinetic rates. In this paper, we focus on the adaptive sampling and Markov state modeling features.}, } @article {pmid26940635, year = {2016}, author = {Melício Monteiro, EJ and Costa, C and Oliveira, JL}, title = {A Cloud Architecture for Teleradiology-as-a-Service.}, journal = {Methods of information in medicine}, volume = {55}, number = {3}, pages = {203-214}, doi = {10.3414/ME14-01-0052}, pmid = {26940635}, issn = {2511-705X}, mesh = {*Cloud Computing ; Internet ; Radiology Information Systems ; Social Media ; *Teleradiology ; Time Factors ; }, abstract = {BACKGROUND: Telemedicine has been promoted by healthcare professionals as an efficient way to obtain remote assistance from specialised centres, to get a second opinion about complex diagnosis or even to share knowledge among practitioners. The current economic restrictions in many countries are increasing the demand for these solutions even more, in order to optimize processes and reduce costs. However, despite some technological solutions already in place, their adoption has been hindered by the lack of usability, especially in the set-up process.

OBJECTIVES: In this article we propose a telemedicine platform that relies on a cloud computing infrastructure and social media principles to simplify the creation of dynamic user-based groups, opening up opportunities for the establishment of teleradiology trust domains.

METHODS: The collaborative platform is provided as a Software-as-a-Service solution, supporting real time and asynchronous collaboration between users. To evaluate the solution, we have deployed the platform in a private cloud infrastructure. The system is made up of three main components - the collaborative framework, the Medical Management Information System (MMIS) and the HTML5 (Hyper Text Markup Language) Web client application - connected by a message-oriented middleware.

RESULTS: The solution allows physicians to create easily dynamic network groups for synchronous or asynchronous cooperation. The network created improves dataflow between colleagues and also knowledge sharing and cooperation through social media tools. The platform was implemented and it has already been used in two distinct scenarios: teaching of radiology and tele-reporting.

CONCLUSIONS: Collaborative systems can simplify the establishment of telemedicine expert groups with tools that enable physicians to improve their clinical practice. Streamlining the usage of this kind of systems through the adoption of Web technologies that are common in social media will increase the quality of current solutions, facilitating the sharing of clinical information, medical imaging studies and patient diagnostics among collaborators.}, } @article {pmid26925205, year = {2015}, author = {Bhuvaneshwar, K and Sulakhe, D and Gauba, R and Rodriguez, A and Madduri, R and Dave, U and Lacinski, L and Foster, I and Gusev, Y and Madhavan, S}, title = {A case study for cloud based high throughput analysis of NGS data using the globus genomics system.}, journal = {Computational and structural biotechnology journal}, volume = {13}, number = {}, pages = {64-74}, pmid = {26925205}, issn = {2001-0370}, support = {U01 HG008390/HG/NHGRI NIH HHS/United States ; U54 CA149147/CA/NCI NIH HHS/United States ; }, abstract = {Next generation sequencing (NGS) technologies produce massive amounts of data requiring a powerful computational infrastructure, high quality bioinformatics software, and skilled personnel to operate the tools. We present a case study of a practical solution to this data management and analysis challenge that simplifies terabyte scale data handling and provides advanced tools for NGS data analysis. These capabilities are implemented using the "Globus Genomics" system, which is an enhanced Galaxy workflow system made available as a service that offers users the capability to process and transfer data easily, reliably and quickly to address end-to-endNGS analysis requirements. The Globus Genomics system is built on Amazon 's cloud computing infrastructure. The system takes advantage of elastic scaling of compute resources to run multiple workflows in parallel and it also helps meet the scale-out analysis needs of modern translational genomics research.}, } @article {pmid26924983, year = {2015}, author = {Fasani, RA and Livi, CB and Choudhury, DR and Kleensang, A and Bouhifd, M and Pendse, SN and McMullen, PD and Andersen, ME and Hartung, T and Rosenberg, M}, title = {The Human Toxome Collaboratorium: A Shared Environment for Multi-Omic Computational Collaboration within a Consortium.}, journal = {Frontiers in pharmacology}, volume = {6}, number = {}, pages = {322}, pmid = {26924983}, issn = {1663-9812}, support = {R01 ES020750/ES/NIEHS NIH HHS/United States ; }, abstract = {The Human Toxome Project is part of a long-term vision to modernize toxicity testing for the 21st century. In the initial phase of the project, a consortium of six academic, commercial, and government organizations has partnered to map pathways of toxicity, using endocrine disruption as a model hazard. Experimental data is generated at multiple sites, and analyzed using a range of computational tools. While effectively gathering, managing, and analyzing the data for high-content experiments is a challenge in its own right, doing so for a growing number of -omics technologies, with larger data sets, across multiple institutions complicates the process. Interestingly, one of the most difficult, ongoing challenges has been the computational collaboration between the geographically separate institutions. Existing solutions cannot handle the growing heterogeneous data, provide a computational environment for consistent analysis, accommodate different workflows, and adapt to the constantly evolving methods and goals of a research project. To meet the needs of the project, we have created and managed The Human Toxome Collaboratorium, a shared computational environment hosted on third-party cloud services. The Collaboratorium provides a familiar virtual desktop, with a mix of commercial, open-source, and custom-built applications. It shares some of the challenges of traditional information technology, but with unique and unexpected constraints that emerge from the cloud. Here we describe the problems we faced, the current architecture of the solution, an example of its use, the major lessons we learned, and the future potential of the concept. In particular, the Collaboratorium represents a novel distribution method that could increase the reproducibility and reusability of results from similar large, multi-omic studies.}, } @article {pmid26918190, year = {2016}, author = {Dinov, ID}, title = {Methodological challenges and analytic opportunities for modeling and interpreting Big Healthcare Data.}, journal = {GigaScience}, volume = {5}, number = {}, pages = {12}, pmid = {26918190}, issn = {2047-217X}, support = {P30 AG053760/AG/NIA NIH HHS/United States ; U54 EB020406/EB/NIBIB NIH HHS/United States ; P20 NR015331/NR/NINR NIH HHS/United States ; P30 DK089503/DK/NIDDK NIH HHS/United States ; P50 NS091856/NS/NINDS NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; Delivery of Health Care/*statistics & numerical data ; Humans ; *Models, Theoretical ; Neuroimaging/statistics & numerical data ; Principal Component Analysis ; Reproducibility of Results ; *Software ; }, abstract = {Managing, processing and understanding big healthcare data is challenging, costly and demanding. Without a robust fundamental theory for representation, analysis and inference, a roadmap for uniform handling and analyzing of such complex data remains elusive. In this article, we outline various big data challenges, opportunities, modeling methods and software techniques for blending complex healthcare data, advanced analytic tools, and distributed scientific computing. Using imaging, genetic and healthcare data we provide examples of processing heterogeneous datasets using distributed cloud services, automated and semi-automated classification techniques, and open-science protocols. Despite substantial advances, new innovative technologies need to be developed that enhance, scale and optimize the management and processing of large, complex and heterogeneous data. Stakeholder investments in data acquisition, research and development, computational infrastructure and education will be critical to realize the huge potential of big data, to reap the expected information benefits and to build lasting knowledge assets. Multi-faceted proprietary, open-source, and community developments will be essential to enable broad, reliable, sustainable and efficient data-driven discovery and analytics. Big data will affect every sector of the economy and their hallmark will be 'team science'.}, } @article {pmid26915830, year = {2016}, author = {Cross, R and Olivieri, L and O'Brien, K and Kellman, P and Xue, H and Hansen, M}, title = {Improved workflow for quantification of left ventricular volumes and mass using free-breathing motion corrected cine imaging.}, journal = {Journal of cardiovascular magnetic resonance : official journal of the Society for Cardiovascular Magnetic Resonance}, volume = {18}, number = {}, pages = {10}, pmid = {26915830}, issn = {1532-429X}, support = {//Intramural NIH HHS/United States ; }, mesh = {Adult ; Algorithms ; Cloud Computing ; Feasibility Studies ; Female ; Heart Diseases/*diagnosis/pathology/physiopathology ; Humans ; Image Interpretation, Computer-Assisted/*methods ; Magnetic Resonance Imaging, Cine/*methods ; Male ; Middle Aged ; Observer Variation ; Predictive Value of Tests ; Reproducibility of Results ; *Respiration ; *Stroke Volume ; *Ventricular Function, Left ; *Workflow ; Young Adult ; }, abstract = {BACKGROUND: Traditional cine imaging for cardiac functional assessment requires breath-holding, which can be problematic in some situations. Free-breathing techniques have relied on multiple averages or real-time imaging, producing images that can be spatially and/or temporally blurred. To overcome this, methods have been developed to acquire real-time images over multiple cardiac cycles, which are subsequently motion corrected and reformatted to yield a single image series displaying one cardiac cycle with high temporal and spatial resolution. Application of these algorithms has required significant additional reconstruction time. The use of distributed computing was recently proposed as a way to improve clinical workflow with such algorithms. In this study, we have deployed a distributed computing version of motion corrected re-binning reconstruction for free-breathing evaluation of cardiac function.

METHODS: Twenty five patients and 25 volunteers underwent cardiovascular magnetic resonance (CMR) for evaluation of left ventricular end-systolic volume (ESV), end-diastolic volume (EDV), and end-diastolic mass. Measurements using motion corrected re-binning were compared to those using breath-held SSFP and to free-breathing SSFP with multiple averages, and were performed by two independent observers. Pearson correlation coefficients and Bland-Altman plots tested agreement across techniques. Concordance correlation coefficient and Bland-Altman analysis tested inter-observer variability. Total scan plus reconstruction times were tested for significant differences using paired t-test.

RESULTS: Measured volumes and mass obtained by motion corrected re-binning and by averaged free-breathing SSFP compared favorably to those obtained by breath-held SSFP (r = 0.9863/0.9813 for EDV, 0.9550/0.9685 for ESV, 0.9952/0.9771 for mass). Inter-observer variability was good with concordance correlation coefficients between observers across all acquisition types suggesting substantial agreement. Both motion corrected re-binning and averaged free-breathing SSFP acquisition and reconstruction times were shorter than breath-held SSFP techniques (p < 0.0001). On average, motion corrected re-binning required 3 min less than breath-held SSFP imaging, a 37% reduction in acquisition and reconstruction time.

CONCLUSIONS: The motion corrected re-binning image reconstruction technique provides robust cardiac imaging that can be used for quantification that compares favorably to breath-held SSFP as well as multiple average free-breathing SSFP, but can be obtained in a fraction of the time when using cloud-based distributed computing reconstruction.}, } @article {pmid26901201, year = {2016}, author = {Han, G and Que, W and Jia, G and Shu, L}, title = {An Efficient Virtual Machine Consolidation Scheme for Multimedia Cloud Computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {2}, pages = {246}, pmid = {26901201}, issn = {1424-8220}, abstract = {Cloud computing has innovated the IT industry in recent years, as it can delivery subscription-based services to users in the pay-as-you-go model. Meanwhile, multimedia cloud computing is emerging based on cloud computing to provide a variety of media services on the Internet. However, with the growing popularity of multimedia cloud computing, its large energy consumption cannot only contribute to greenhouse gas emissions, but also result in the rising of cloud users' costs. Therefore, the multimedia cloud providers should try to minimize its energy consumption as much as possible while satisfying the consumers' resource requirements and guaranteeing quality of service (QoS). In this paper, we have proposed a remaining utilization-aware (RUA) algorithm for virtual machine (VM) placement, and a power-aware algorithm (PA) is proposed to find proper hosts to shut down for energy saving. These two algorithms have been combined and applied to cloud data centers for completing the process of VM consolidation. Simulation results have shown that there exists a trade-off between the cloud data center's energy consumption and service-level agreement (SLA) violations. Besides, the RUA algorithm is able to deal with variable workload to prevent hosts from overloading after VM placement and to reduce the SLA violations dramatically.}, } @article {pmid26897747, year = {2016}, author = {Jaghoori, MM and Bleijlevens, B and Olabarriaga, SD}, title = {1001 Ways to run AutoDock Vina for virtual screening.}, journal = {Journal of computer-aided molecular design}, volume = {30}, number = {3}, pages = {237-249}, pmid = {26897747}, issn = {1573-4951}, mesh = {*Computer-Aided Design/economics ; Databases, Pharmaceutical ; *Drug Discovery/economics/methods ; Humans ; Ligands ; Molecular Docking Simulation ; Nuclear Receptor Subfamily 4, Group A, Member 1/metabolism ; Proteins/metabolism ; Reproducibility of Results ; *Software/economics ; User-Computer Interface ; }, abstract = {Large-scale computing technologies have enabled high-throughput virtual screening involving thousands to millions of drug candidates. It is not trivial, however, for biochemical scientists to evaluate the technical alternatives and their implications for running such large experiments. Besides experience with the molecular docking tool itself, the scientist needs to learn how to run it on high-performance computing (HPC) infrastructures, and understand the impact of the choices made. Here, we review such considerations for a specific tool, AutoDock Vina, and use experimental data to illustrate the following points: (1) an additional level of parallelization increases virtual screening throughput on a multi-core machine; (2) capturing of the random seed is not enough (though necessary) for reproducibility on heterogeneous distributed computing systems; (3) the overall time spent on the screening of a ligand library can be improved by analysis of factors affecting execution time per ligand, including number of active torsions, heavy atoms and exhaustiveness. We also illustrate differences among four common HPC infrastructures: grid, Hadoop, small cluster and multi-core (virtual machine on the cloud). Our analysis shows that these platforms are suitable for screening experiments of different sizes. These considerations can guide scientists when choosing the best computing platform and set-up for their future large virtual screening experiments.}, } @article {pmid26887022, year = {2017}, author = {Wang, L and Liu, M and Meng, MQ}, title = {A Hierarchical Auction-Based Mechanism for Real-Time Resource Allocation in Cloud Robotic Systems.}, journal = {IEEE transactions on cybernetics}, volume = {47}, number = {2}, pages = {473-484}, doi = {10.1109/TCYB.2016.2519525}, pmid = {26887022}, issn = {2168-2275}, abstract = {Cloud computing enables users to share computing resources on-demand. The cloud computing framework cannot be directly mapped to cloud robotic systems with ad hoc networks since cloud robotic systems have additional constraints such as limited bandwidth and dynamic structure. However, most multirobotic applications with cooperative control adopt this decentralized approach to avoid a single point of failure. Robots need to continuously update intensive data to execute tasks in a coordinated manner, which implies real-time requirements. Thus, a resource allocation strategy is required, especially in such resource-constrained environments. This paper proposes a hierarchical auction-based mechanism, namely link quality matrix (LQM) auction, which is suitable for ad hoc networks by introducing a link quality indicator. The proposed algorithm produces a fast and robust method that is accurate and scalable. It reduces both global communication and unnecessary repeated computation. The proposed method is designed for firm real-time resource retrieval for physical multirobot systems. A joint surveillance scenario empirically validates the proposed mechanism by assessing several practical metrics. The results show that the proposed LQM auction outperforms state-of-the-art algorithms for resource allocation.}, } @article {pmid26887003, year = {2017}, author = {Mahjani, B and Toor, S and Nettelblad, C and Holmgren, S}, title = {A Flexible Computational Framework Using R and Map-Reduce for Permutation Tests of Massive Genetic Analysis of Complex Traits.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {14}, number = {2}, pages = {381-392}, doi = {10.1109/TCBB.2016.2527639}, pmid = {26887003}, issn = {1557-9964}, mesh = {Algorithms ; Computational Biology/*methods ; Epistasis, Genetic ; Models, Genetic ; Quantitative Trait Loci/*genetics ; *Software ; }, abstract = {In quantitative trait locus (QTL) mapping significance of putative QTL is often determined using permutation testing. The computational needs to calculate the significance level are immense, 10[4] up to 10[8] or even more permutations can be needed. We have previously introduced the PruneDIRECT algorithm for multiple QTL scan with epistatic interactions. This algorithm has specific strengths for permutation testing. Here, we present a flexible, parallel computing framework for identifying multiple interacting QTL using the PruneDIRECT algorithm which uses the map-reduce model as implemented in Hadoop. The framework is implemented in R, a widely used software tool among geneticists. This enables users to rearrange algorithmic steps to adapt genetic models, search algorithms, and parallelization steps to their needs in a flexible way. Our work underlines the maturity of accessing distributed parallel computing for computationally demanding bioinformatics applications through building workflows within existing scientific environments. We investigate the PruneDIRECT algorithm, comparing its performance to exhaustive search and DIRECT algorithm using our framework on a public cloud resource. We find that PruneDIRECT is vastly superior for permutation testing, and perform 2 ×10[5] permutations for a 2D QTL problem in 15 hours, using 100 cloud processes. We show that our framework scales out almost linearly for a 3D QTL search.}, } @article {pmid26886482, year = {2017}, author = {de Bruin, B and Floridi, L}, title = {The Ethics of Cloud Computing.}, journal = {Science and engineering ethics}, volume = {23}, number = {1}, pages = {21-39}, pmid = {26886482}, issn = {1471-5546}, mesh = {Cloud Computing/*ethics ; Humans ; Internet ; Medical Records Systems, Computerized/ethics ; Privacy ; }, abstract = {Cloud computing is rapidly gaining traction in business. It offers businesses online services on demand (such as Gmail, iCloud and Salesforce) and allows them to cut costs on hardware and IT support. This is the first paper in business ethics dealing with this new technology. It analyzes the informational duties of hosting companies that own and operate cloud computing datacentres (e.g., Amazon). It considers the cloud services providers leasing 'space in the cloud' from hosting companies (e.g., Dropbox, Salesforce). And it examines the business and private 'clouders' using these services. The first part of the paper argues that hosting companies, services providers and clouders have mutual informational (epistemic) obligations to provide and seek information about relevant issues such as consumer privacy, reliability of services, data mining and data ownership. The concept of interlucency is developed as an epistemic virtue governing ethically effective communication. The second part considers potential forms of government restrictions on or proscriptions against the development and use of cloud computing technology. Referring to the concept of technology neutrality, it argues that interference with hosting companies and cloud services providers is hardly ever necessary or justified. It is argued, too, however, that businesses using cloud services (e.g., banks, law firms, hospitals etc. storing client data in the cloud) will have to follow rather more stringent regulations.}, } @article {pmid26876764, year = {2016}, author = {Leuenberger, H and Leuenberger, MN}, title = {Impact of the digital revolution on the future of pharmaceutical formulation science.}, journal = {European journal of pharmaceutical sciences : official journal of the European Federation for Pharmaceutical Sciences}, volume = {87}, number = {}, pages = {100-111}, doi = {10.1016/j.ejps.2016.02.005}, pmid = {26876764}, issn = {1879-0720}, mesh = {Animals ; *Chemistry, Pharmaceutical ; Clinical Trials as Topic ; *Computers ; Drug Combinations ; Drug Discovery ; Drugs, Generic ; Humans ; Models, Theoretical ; Pharmacokinetics ; *Technology, Pharmaceutical/economics/methods ; Therapeutic Equivalency ; }, abstract = {The ongoing digital revolution is no longer limited to the application of apps on the smart phone for daily needs but starts to affect also our professional life in formulation science. The software platform F-CAD (Formulation-Computer Aided Design) of CINCAP can be used to develop and test in silico capsule and tablet formulations. Such an approach allows the pharmaceutical industry to adopt the workflow of the automotive and aircraft industry. Thus, the first prototype of the drug delivery vehicle is prepared virtually by mimicking the composition (particle size distribution of the active drug substance and of the excipients within the tablet) and the process such as direct compression to obtain a defined porosity. The software is based on a cellular automaton (CA) process mimicking the dissolution profile of the capsule or tablet formulation. To take account of the type of dissolution equipment and all SOPs (Standard Operation Procedures) such as a single punch press to manufacture the tablet, a calibration of the F-CAD dissolution profile of the virtual tablet is needed. Thus, the virtual tablet becomes a copy of the real tablet. This statement is valid for all tablets manufactured within the same formulation design space. For this reason, it is important to define already for Clinical Phase I the formulation design space and to work only within this formulation design space consisting of the composition and the processes during all the Clinical Phases. Thus, it is not recommended to start with a simple capsule formulation as service dosage form and to change later to a market ready tablet formulation. The availability of F-CAD is a necessary, but not a sufficient condition to implement the workflow of the automotive and aircraft industry for developing and testing drug delivery vehicles. For a successful implementation of the new workflow, a harmonization of the equipment and the processes between the development and manufacturing departments is a must. In this context, the clinical samples for Clinical Phases I and II should be prepared with a mechanical simulator of the high-speed rotary press used for large batches for Clinical Phases III & IV. If not, the problem of working practically and virtually in different formulation design spaces will remain causing worldwide annually billion of $ losses according to the study of Benson and MacCabe. The harmonization of equipment and processes needs a close cooperation between the industrial pharmacist and the pharmaceutical engineer. In addition, Virtual Equipment Simulators (VESs) of small and large scale equipment for training and computer assisted scale-up would be desirable. A lean and intelligent management information and documentation system will improve the connectivity between the different work stations. Thus, in future, it may be possible to rent at low costs F-CAD as an IT (Information Technology) platform based on a cloud computing solution. By the adoption of the workflow of the automotive and aircraft industry significant savings, a reduced time to market, a lower attrition rate, and a much higher quality of the final marketed dosage form can be achieved.}, } @article {pmid26846750, year = {2016}, author = {Weng, SJ and Lai, LS and Gotcher, D and Wu, HH and Xu, YY and Yang, CW}, title = {Cloud Image Data Center for Healthcare Network in Taiwan.}, journal = {Journal of medical systems}, volume = {40}, number = {4}, pages = {89}, pmid = {26846750}, issn = {1573-689X}, mesh = {*Cloud Computing ; Communication ; Delivery of Health Care, Integrated/*organization & administration ; *Diagnostic Imaging ; Efficiency, Organizational ; Electronic Health Records/*organization & administration ; *Health Information Exchange ; Humans ; Quality of Health Care/organization & administration ; Taiwan ; Time Factors ; Waiting Lists ; }, abstract = {This paper investigates how a healthcare network in Taiwan uses a practical cloud image data center (CIDC) to communicate with its constituent hospital branches. A case study approach was used. The study was carried out in the central region of Taiwan, with four hospitals belonging to the Veterans Hospital healthcare network. The CIDC provides synchronous and asynchronous consultation among these branches. It provides storage, platforms, and services on demand to the hospitals. Any branch-client can pull up the patient's medical images from any hospital off this cloud. Patients can be examined at the branches, and the images and reports can be further evaluated by physicians in the main Taichung Veterans General Hospital (TVGH) to enhance the usage and efficiency of equipment in the various branches, thereby shortening the waiting time of patients. The performance of the CIDC over 5 years shows: (1) the total number of cross-hospital images accessed with CDC in the branches was 132,712; and (2) TVGH assisted the branches in keying in image reports using the CIDC 4,424 times; and (3) Implementation of the system has improved management, efficiency, speed and quality of care. Therefore, the results lead to the recommendation of continuing and expanding the cloud computing architecture to improve information sharing among branches in the healthcare network.}, } @article {pmid26843812, year = {2016}, author = {Luo, J and Wu, M and Gopukumar, D and Zhao, Y}, title = {Big Data Application in Biomedical Research and Health Care: A Literature Review.}, journal = {Biomedical informatics insights}, volume = {8}, number = {}, pages = {1-10}, pmid = {26843812}, issn = {1178-2226}, abstract = {Big data technologies are increasingly used for biomedical and health-care informatics research. Large amounts of biological and clinical data have been generated and collected at an unprecedented speed and scale. For example, the new generation of sequencing technologies enables the processing of billions of DNA sequence data per day, and the application of electronic health records (EHRs) is documenting large amounts of patient data. The cost of acquiring and analyzing biomedical data is expected to decrease dramatically with the help of technology upgrades, such as the emergence of new sequencing machines, the development of novel hardware and software for parallel computing, and the extensive expansion of EHRs. Big data applications present new opportunities to discover new knowledge and create novel methods to improve the quality of health care. The application of big data in health care is a fast-growing field, with many new discoveries and methodologies published in the last five years. In this paper, we review and discuss big data application in four major biomedical subdisciplines: (1) bioinformatics, (2) clinical informatics, (3) imaging informatics, and (4) public health informatics. Specifically, in bioinformatics, high-throughput experiments facilitate the research of new genome-wide association studies of diseases, and with clinical informatics, the clinical field benefits from the vast amount of collected patient data for making intelligent decisions. Imaging informatics is now more rapidly integrated with cloud platforms to share medical image data and workflows, and public health informatics leverages big data techniques for predicting and monitoring infectious disease outbreaks, such as Ebola. In this paper, we review the recent progress and breakthroughs of big data applications in these health-care domains and summarize the challenges, gaps, and opportunities to improve and advance big data applications in health care.}, } @article {pmid26840319, year = {2016}, author = {Zhu, H and Gao, L and Li, H}, title = {Secure and Privacy-Preserving Body Sensor Data Collection and Query Scheme.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {2}, pages = {179}, pmid = {26840319}, issn = {1424-8220}, mesh = {Algorithms ; *Biosensing Techniques ; *Computer Security ; *Confidentiality ; *Data Collection ; Humans ; Privacy ; Smartphone ; Telemetry ; }, abstract = {With the development of body sensor networks and the pervasiveness of smart phones, different types of personal data can be collected in real time by body sensors, and the potential value of massive personal data has attracted considerable interest recently. However, the privacy issues of sensitive personal data are still challenging today. Aiming at these challenges, in this paper, we focus on the threats from telemetry interface and present a secure and privacy-preserving body sensor data collection and query scheme, named SPCQ, for outsourced computing. In the proposed SPCQ scheme, users' personal information is collected by body sensors in different types and converted into multi-dimension data, and each dimension is converted into the form of a number and uploaded to the cloud server, which provides a secure, efficient and accurate data query service, while the privacy of sensitive personal information and users' query data is guaranteed. Specifically, based on an improved homomorphic encryption technology over composite order group, we propose a special weighted Euclidean distance contrast algorithm (WEDC) for multi-dimension vectors over encrypted data. With the SPCQ scheme, the confidentiality of sensitive personal data, the privacy of data users' queries and accurate query service can be achieved in the cloud server. Detailed analysis shows that SPCQ can resist various security threats from telemetry interface. In addition, we also implement SPCQ on an embedded device, smart phone and laptop with a real medical database, and extensive simulation results demonstrate that our proposed SPCQ scheme is highly efficient in terms of computation and communication costs.}, } @article {pmid26835220, year = {2016}, author = {Fylaktopoulos, G and Goumas, G and Skolarikis, M and Sotiropoulos, A and Maglogiannis, I}, title = {An overview of platforms for cloud based development.}, journal = {SpringerPlus}, volume = {5}, number = {}, pages = {38}, pmid = {26835220}, issn = {2193-1801}, abstract = {This paper provides an overview of the state of the art technologies for software development in cloud environments. The surveyed systems cover the whole spectrum of cloud-based development including integrated programming environments, code repositories, software modeling, composition and documentation tools, and application management and orchestration. In this work we evaluate the existing cloud development ecosystem based on a wide number of characteristics like applicability (e.g. programming and database technologies supported), productivity enhancement (e.g. editor capabilities, debugging tools), support for collaboration (e.g. repository functionality, version control) and post-development application hosting and we compare the surveyed systems. The conducted survey proves that software engineering in the cloud era has made its initial steps showing potential to provide concrete implementation and execution environments for cloud-based applications. However, a number of important challenges need to be addressed for this approach to be viable. These challenges are discussed in the article, while a conclusion is drawn that although several steps have been made, a compact and reliable solution does not yet exist.}, } @article {pmid28293580, year = {2016}, author = {Weiler, A}, title = {mHealth and big data will bring meaning and value to patient-reported outcomes.}, journal = {mHealth}, volume = {2}, number = {}, pages = {2}, pmid = {28293580}, issn = {2306-9740}, abstract = {The intersection of widespread mobile adoption, cloud computing and healthcare will enable patient-reported outcomes to be used to personalize care, draw insights and shorten the cycle from research to clinical implementation. Today, patient-reported outcomes are largely collected as part of a regulatory shift to value-based or bundled care. When patients are able to record their experiences in real-time and combine them with passive data collection from sensors and mobile devices, this information can inform better care for each patient and contribute to the growing body of health data that can be used to draw insights for all patients. This paper explores the current limitations of patient reported outcomes and how mobile health and big data analysis unlocks their potential as a valuable tool to deliver care.}, } @article {pmid26819493, year = {2015}, author = {Weimer, M and Chen, Y and Chun, BG and Condie, T and Curino, C and Douglas, C and Lee, Y and Majestro, T and Malkhi, D and Matusevych, S and Myers, B and Narayanamurthy, S and Ramakrishnan, R and Rao, S and Sears, R and Sezgin, B and Wang, J}, title = {REEF: Retainable Evaluator Execution Framework.}, journal = {Proceedings. ACM-SIGMOD International Conference on Management of Data}, volume = {2015}, number = {}, pages = {1343-1355}, doi = {10.1145/2723372.2742793}, pmid = {26819493}, issn = {0730-8078}, support = {U54 EB020404/EB/NIBIB NIH HHS/United States ; }, abstract = {Resource Managers like Apache YARN have emerged as a critical layer in the cloud computing system stack, but the developer abstractions for leasing cluster resources and instantiating application logic are very low-level. This flexibility comes at a high cost in terms of developer effort, as each application must repeatedly tackle the same challenges (e.g., fault-tolerance, task scheduling and coordination) and re-implement common mechanisms (e.g., caching, bulk-data transfers). This paper presents REEF, a development framework that provides a control-plane for scheduling and coordinating task-level (data-plane) work on cluster resources obtained from a Resource Manager. REEF provides mechanisms that facilitate resource re-use for data caching, and state management abstractions that greatly ease the development of elastic data processing work-flows on cloud platforms that support a Resource Manager service. REEF is being used to develop several commercial offerings such as the Azure Stream Analytics service. Furthermore, we demonstrate REEF development of a distributed shell application, a machine learning algorithm, and a port of the CORFU [4] system. REEF is also currently an Apache Incubator project that has attracted contributors from several instititutions.}, } @article {pmid26816664, year = {2016}, author = {Sun, J and Guo, Y and Wang, X and Zeng, Q}, title = {mHealth For Aging China: Opportunities and Challenges.}, journal = {Aging and disease}, volume = {7}, number = {1}, pages = {53-67}, pmid = {26816664}, issn = {2152-5250}, abstract = {The aging population with chronic and age-related diseases has become a global issue and exerted heavy burdens on the healthcare system and society. Neurological diseases are the leading chronic diseases in the geriatric population, and stroke is the leading cause of death in China. However, the uneven distribution of caregivers and critical healthcare workforce shortages are major obstacles to improving disease outcome. With the advancement of wearable health devices, cloud computing, mobile technologies and Internet of Things, mobile health (mHealth) is rapidly developing and shows a promising future in the management of chronic diseases. Its advantages include its ability to improve the quality of care, reduce the costs of care, and improve treatment outcomes by transferring in-hospital treatment to patient-centered medical treatment at home. mHealth could also enhance the international cooperation of medical providers in different time zones and the sharing of high-quality medical service resources between developed and developing countries. In this review, we focus on trends in mHealth and its clinical applications for the prevention and treatment of diseases, especially aging-related neurological diseases, and on the opportunities and challenges of mHealth in China. Operating models of mHealth in disease management are proposed; these models may benefit those who work within the mHealth system in developing countries and developed countries.}, } @article {pmid26799886, year = {2015}, author = {Gilbert, RL and Dionisio, JD and Forney, A and Dorin, P}, title = {The Identity Mapping Project: Demographic differences in patterns of distributed identity.}, journal = {Studies in health technology and informatics}, volume = {219}, number = {}, pages = {91-96}, pmid = {26799886}, issn = {1879-8365}, mesh = {Adult ; Age Factors ; *Character ; Cloud Computing/*statistics & numerical data ; Computer Communication Networks/*statistics & numerical data ; Computer Graphics/statistics & numerical data ; Databases, Factual/statistics & numerical data ; *Ego ; Female ; Humans ; Individuality ; *Interdisciplinary Communication ; *Intersectoral Collaboration ; Male ; Middle Aged ; Online Systems/statistics & numerical data ; Sex Factors ; Social Networking ; User-Computer Interface ; Utilization Review ; Young Adult ; }, abstract = {The advent of cloud computing and a multi-platform digital environment is giving rise to a new phase of human identity called "The Distributed Self." In this conception, aspects of the self are distributed into a variety of 2D and 3D digital personas with the capacity to reflect any number of combinations of now malleable personality traits. In this way, the source of human identity remains internal and embodied, but the expression or enactment of the self becomes increasingly external, disembodied, and distributed on demand. The Identity Mapping Project (IMP) is an interdisciplinary collaboration between psychology and computer Science designed to empirically investigate the development of distributed forms of identity. Methodologically, it collects a large database of "identity maps" - computerized graphical representations of how active someone is online and how their identity is expressed and distributed across 7 core digital domains: email, blogs/personal websites, social networks, online forums, online dating sites, character based digital games, and virtual worlds. The current paper reports on gender and age differences in online identity based on an initial database of distributed identity profiles.}, } @article {pmid26780711, year = {2016}, author = {Lopez-Barbosa, N and Gamarra, JD and Osma, JF}, title = {The future point-of-care detection of disease and its data capture and handling.}, journal = {Analytical and bioanalytical chemistry}, volume = {408}, number = {11}, pages = {2827-2837}, doi = {10.1007/s00216-015-9249-2}, pmid = {26780711}, issn = {1618-2650}, mesh = {Blood Gas Analysis ; *Diagnosis ; Humans ; Papillomaviridae/isolation & purification ; *Point-of-Care Systems ; }, abstract = {Point-of-care detection is a widely studied area that attracts effort and interest from a large number of fields and companies. However, there is also increased interest from the general public in this type of device, which has driven enormous changes in the design and conception of these developments and the way data is handled. Therefore, future point-of-care detection has to include communication with front-end technology, such as smartphones and networks, automation of manufacture, and the incorporation of concepts like the Internet of Things (IoT) and cloud computing. Three key examples, based on different sensing technology, are analyzed in detail on the basis of these items to highlight a route for the future design and development of point-of-care detection devices and their data capture and handling.}, } @article {pmid26761013, year = {2016}, author = {Wan, J and Liu, J and Shao, Z and Vasilakos, AV and Imran, M and Zhou, K}, title = {Mobile Crowd Sensing for Traffic Prediction in Internet of Vehicles.}, journal = {Sensors (Basel, Switzerland)}, volume = {16}, number = {1}, pages = {}, pmid = {26761013}, issn = {1424-8220}, abstract = {The advances in wireless communication techniques, mobile cloud computing, automotive and intelligent terminal technology are driving the evolution of vehicle ad hoc networks into the Internet of Vehicles (IoV) paradigm. This leads to a change in the vehicle routing problem from a calculation based on static data towards real-time traffic prediction. In this paper, we first address the taxonomy of cloud-assisted IoV from the viewpoint of the service relationship between cloud computing and IoV. Then, we review the traditional traffic prediction approached used by both Vehicle to Infrastructure (V2I) and Vehicle to Vehicle (V2V) communications. On this basis, we propose a mobile crowd sensing technology to support the creation of dynamic route choices for drivers wishing to avoid congestion. Experiments were carried out to verify the proposed approaches. Finally, we discuss the outlook of reliable traffic prediction.}, } @article {pmid26752627, year = {2016}, author = {Merchant, N and Lyons, E and Goff, S and Vaughn, M and Ware, D and Micklos, D and Antin, P}, title = {The iPlant Collaborative: Cyberinfrastructure for Enabling Data to Discovery for the Life Sciences.}, journal = {PLoS biology}, volume = {14}, number = {1}, pages = {e1002342}, pmid = {26752627}, issn = {1545-7885}, mesh = {Computational Biology/*organization & administration ; Internet ; Software ; }, abstract = {The iPlant Collaborative provides life science research communities access to comprehensive, scalable, and cohesive computational infrastructure for data management; identity management; collaboration tools; and cloud, high-performance, high-throughput computing. iPlant provides training, learning material, and best practice resources to help all researchers make the best use of their data, expand their computational skill set, and effectively manage their data and computation when working as distributed teams. iPlant's platform permits researchers to easily deposit and share their data and deploy new computational tools and analysis workflows, allowing the broader community to easily use and reuse those data and computational analyses.}, } @article {pmid26742807, year = {2016}, author = {Berggren, M and Simon, DT and Nilsson, D and Dyreklev, P and Norberg, P and Nordlinder, S and Ersman, PA and Gustafsson, G and Wikner, JJ and Hederén, J and Hentzell, H}, title = {Browsing the Real World using Organic Electronics, Si-Chips, and a Human Touch.}, journal = {Advanced materials (Deerfield Beach, Fla.)}, volume = {28}, number = {10}, pages = {1911-1916}, doi = {10.1002/adma.201504301}, pmid = {26742807}, issn = {1521-4095}, abstract = {Organic electronics have been developed according to an orthodox doctrine advocating "all-printed'', "all-organic'' and "ultra-low-cost'' primarily targeting various e-paper applications. In order to harvest from the great opportunities afforded with organic electronics potentially operating as communication and sensor outposts within existing and future complex communication infrastructures, high-quality computing and communication protocols must be integrated with the organic electronics. Here, we debate and scrutinize the twinning of the signal-processing capability of traditional integrated silicon chips with organic electronics and sensors, and to use our body as a natural local network with our bare hand as the browser of the physical world. The resulting platform provides a body network, i.e., a personalized web, composed of e-label sensors, bioelectronics, and mobile devices that together make it possible to monitor and record both our ambience and health-status parameters, supported by the ubiquitous mobile network and the resources of the "cloud".}, } @article {pmid26737411, year = {2015}, author = {Pinto Silva, PM and Silva Cunha, JP}, title = {SenseMyHeart: A cloud service and API for wearable heart monitors.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2015}, number = {}, pages = {4986-4989}, doi = {10.1109/EMBC.2015.7319511}, pmid = {26737411}, issn = {2694-0604}, mesh = {*Cloud Computing ; Electrocardiography, Ambulatory/instrumentation/*methods ; Heart Rate/physiology ; Humans ; Male ; Mobile Applications ; Neurosurgeons ; Software ; }, abstract = {In the era of ubiquitous computing, the growing adoption of wearable systems and body sensor networks is trailing the path for new research and software for cardiovascular intensity, energy expenditure and stress and fatigue detection through cardiovascular monitoring. Several systems have received clinical-certification and provide huge amounts of reliable heart-related data in a continuous basis. PhysioNet provides equally reliable open-source software tools for ECG processing and analysis that can be combined with these devices. However, this software remains difficult to use in a mobile environment and for researchers unfamiliar with Linux-based systems. In the present paper we present an approach that aims at tackling these limitations by developing a cloud service that provides an API for a PhysioNet-based pipeline for ECG processing and Heart Rate Variability measurement. We describe the proposed solution, along with its advantages and tradeoffs. We also present some client tools (windows and Android) and several projects where the developed cloud service has been used successfully as a standard for Heart Rate and Heart Rate Variability studies in different scenarios.}, } @article {pmid26736909, year = {2015}, author = {Bellafqira, R and Coatrieux, G and Bouslimi, D and Quellec, G}, title = {Content-based image retrieval in homomorphic encryption domain.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2015}, number = {}, pages = {2944-2947}, doi = {10.1109/EMBC.2015.7319009}, pmid = {26736909}, issn = {2694-0604}, mesh = {Cloud Computing ; Confidentiality ; *Databases, Factual ; Information Storage and Retrieval ; }, abstract = {In this paper, we propose a secure implementation of a content-based image retrieval (CBIR) method that makes possible diagnosis aid systems to work in externalized environment and with outsourced data as in cloud computing. This one works with homomorphic encrypted images from which it extracts wavelet based image features next used for subsequent image comparison. By doing so, our system allows a physician to retrieve the most similar images to a query image in an outsourced database while preserving data confidentiality. Our Secure CBIR is the first one that proposes to work with global image features extracted from encrypted images and does not induce extra communications in-between the client and the server. Experimental results show it achieves retrieval performance as good as if images were processed non-encrypted.}, } @article {pmid26733391, year = {2015}, author = {Zhang, Y and Dai, W and Jiang, X and Xiong, H and Wang, S}, title = {FORESEE: Fully Outsourced secuRe gEnome Study basEd on homomorphic Encryption.}, journal = {BMC medical informatics and decision making}, volume = {15 Suppl 5}, number = {Suppl 5}, pages = {S5}, pmid = {26733391}, issn = {1472-6947}, support = {R21LM012060/LM/NLM NIH HHS/United States ; R21 LM012060/LM/NLM NIH HHS/United States ; U54HL108460/HL/NHLBI NIH HHS/United States ; K99HG008175/HG/NHGRI NIH HHS/United States ; R00LM011392/LM/NLM NIH HHS/United States ; R00 LM011392/LM/NLM NIH HHS/United States ; }, mesh = {Cloud Computing/*standards ; Computer Security/*standards ; Genetic Privacy/*standards ; Genome-Wide Association Study/*standards ; Humans ; Outsourced Services/standards ; }, abstract = {BACKGROUND: The increasing availability of genome data motivates massive research studies in personalized treatment and precision medicine. Public cloud services provide a flexible way to mitigate the storage and computation burden in conducting genome-wide association studies (GWAS). However, data privacy has been widely concerned when sharing the sensitive information in a cloud environment.

METHODS: We presented a novel framework (FORESEE: Fully Outsourced secuRe gEnome Study basEd on homomorphic Encryption) to fully outsource GWAS (i.e., chi-square statistic computation) using homomorphic encryption. The proposed framework enables secure divisions over encrypted data. We introduced two division protocols (i.e., secure errorless division and secure approximation division) with a trade-off between complexity and accuracy in computing chi-square statistics.

RESULTS: The proposed framework was evaluated for the task of chi-square statistic computation with two case-control datasets from the 2015 iDASH genome privacy protection challenge. Experimental results show that the performance of FORESEE can be significantly improved through algorithmic optimization and parallel computation. Remarkably, the secure approximation division provides significant performance gain, but without missing any significance SNPs in the chi-square association test using the aforementioned datasets.

CONCLUSIONS: Unlike many existing HME based studies, in which final results need to be computed by the data owner due to the lack of the secure division operation, the proposed FORESEE framework support complete outsourcing to the cloud and output the final encrypted chi-square statistics.}, } @article {pmid26721184, year = {2016}, author = {Dalman, T and Wiechert, W and Nöh, K}, title = {A scientific workflow framework for (13)C metabolic flux analysis.}, journal = {Journal of biotechnology}, volume = {232}, number = {}, pages = {12-24}, doi = {10.1016/j.jbiotec.2015.12.032}, pmid = {26721184}, issn = {1873-4863}, mesh = {*Carbon Isotopes/analysis/metabolism ; *Cloud Computing ; Metabolic Flux Analysis/*methods ; Models, Biological ; *Software ; }, abstract = {Metabolic flux analysis (MFA) with (13)C labeling data is a high-precision technique to quantify intracellular reaction rates (fluxes). One of the major challenges of (13)C MFA is the interactivity of the computational workflow according to which the fluxes are determined from the input data (metabolic network model, labeling data, and physiological rates). Here, the workflow assembly is inevitably determined by the scientist who has to consider interacting biological, experimental, and computational aspects. Decision-making is context dependent and requires expertise, rendering an automated evaluation process hardly possible. Here, we present a scientific workflow framework (SWF) for creating, executing, and controlling on demand (13)C MFA workflows. (13)C MFA-specific tools and libraries, such as the high-performance simulation toolbox 13CFLUX2, are wrapped as web services and thereby integrated into a service-oriented architecture. Besides workflow steering, the SWF features transparent provenance collection and enables full flexibility for ad hoc scripting solutions. To handle compute-intensive tasks, cloud computing is supported. We demonstrate how the challenges posed by (13)C MFA workflows can be solved with our approach on the basis of two proof-of-concept use cases.}, } @article {pmid32362959, year = {2016}, author = {Sandhu, R and Gill, HK and Sood, SK}, title = {Smart monitoring and controlling of Pandemic Influenza A (H1N1) using Social Network Analysis and cloud computing.}, journal = {Journal of computational science}, volume = {12}, number = {}, pages = {11-22}, pmid = {32362959}, issn = {1877-7503}, abstract = {H1N1 is an infectious virus which, when spread affects a large volume of the population. It is an airborne disease that spreads easily and has a high death rate. Development of healthcare support systems using cloud computing is emerging as an effective solution with the benefits of better quality of service, reduced costs and flexibility. In this paper, an effective cloud computing architecture is proposed which predicts H1N1 infected patients and provides preventions to control infection rate. It consists of four processing components along with secure cloud storage medical database. The random decision tree is used to initially assess the infection in any patient depending on his/her symptoms. Social Network Analysis (SNA) is used to present the state of the outbreak. The proposed architecture is tested on synthetic data generated for two million users. The system provided 94% accuracy for the classification and around 81% of the resource utilization on Amazon EC2 cloud. The key point of the paper is the use of SNA graphs to calculate role of an infected user in spreading the outbreak known as Outbreak Role Index (ORI). It will help government agencies and healthcare departments to present, analyze and prevent outbreak effectively.}, } @article {pmid32214655, year = {2016}, author = {Sandhu, R and Sood, SK and Kaur, G}, title = {An intelligent system for predicting and preventing MERS-CoV infection outbreak.}, journal = {The Journal of supercomputing}, volume = {72}, number = {8}, pages = {3033-3056}, pmid = {32214655}, issn = {0920-8542}, abstract = {MERS-CoV is an airborne disease which spreads easily and has high death rate. To predict and prevent MERS-CoV, real-time analysis of user's health data and his/her geographic location are fundamental. Development of healthcare systems using cloud computing is emerging as an effective solution having benefits of better quality of service, reduced cost, scalability, and flexibility. In this paper, an effective cloud computing system is proposed which predicts MERS-CoV-infected patients using Bayesian belief network and provides geographic-based risk assessment to control its outbreak. The proposed system is tested on synthetic data generated for 0.2 million users. System provided high accuracy for classification and appropriate geographic-based risk assessment. The key point of this paper is the use of geographic positioning system to represent each MERS-CoV users on Google maps so that possibly infected users can be quarantined as early as possible. It will help uninfected citizens to avoid regional exposure and the government agencies to manage the problem more effectively.}, } @article {pmid30137706, year = {2016}, author = {Abbott, E and Faunce, T}, title = {Seeding Australian Regulation of Genomics in the Cloud – Elizabeth Abbott and Thomas Faunce.}, journal = {Journal of law and medicine}, volume = {24}, number = {2}, pages = {314-336}, pmid = {30137706}, issn = {1320-159X}, mesh = {Australia ; *Computer Communication Networks ; Genetic Privacy/*legislation & jurisprudence ; Genetic Research/*ethics/legislation & jurisprudence ; *Genomics ; *Government Regulation ; Humans ; }, abstract = {Cloud computing has facilitated a revolution in genome sequencing. As big data and personalised medicine increase in popularity in Australia, are the legal and regulatory regimes surrounding this nascent area of scientific research and clinical practice able to protect this private information? An examination of the current regulatory regime in Australia, including the Privacy Act 1988 (Cth) and medical research laws that govern cloud-based genomics research highlights that the key challenge of such research is to protect the interests of participants while also promoting collaborative research processes. This examination also highlights the potential effect that the Trans-Pacific Partnership Agreement’s Electronic Commerce Chapter may have had on using the cloud for genomics and what the consequences may have been for researchers, clinicians and individuals. Lessons learnt here will be relevant to studying similar impacts from other trade and investment agreements such as the Trade in Services Agreement (TiSA).}, } @article {pmid29431094, year = {2016}, author = {Patel, V and Armstrong, D and Ganguli, M and Roopra, S and Kantipudi, N and Albashir, S and Kamath, MV}, title = {Deep Learning in Gastrointestinal Endoscopy.}, journal = {Critical reviews in biomedical engineering}, volume = {44}, number = {6}, pages = {493-504}, doi = {10.1615/CritRevBiomedEng.2017025035}, pmid = {29431094}, issn = {1943-619X}, abstract = {Gastrointestinal (GI) endoscopy is used to inspect the lumen or interior of the GI tract for several purposes, including, (1) making a clinical diagnosis, in real time, based on the visual appearances; (2) taking targeted tissue samples for subsequent histopathological examination; and (3) in some cases, performing therapeutic interventions targeted at specific lesions. GI endoscopy is therefore predicated on the assumption that the operator-the endoscopist-is able to identify and characterize abnormalities or lesions accurately and reproducibly. However, as in other areas of clinical medicine, such as histopathology and radiology, many studies have documented marked interobserver and intraobserver variability in lesion recognition. Thus, there is a clear need and opportunity for techniques or methodologies that will enhance the quality of lesion recognition and diagnosis and improve the outcomes of GI endoscopy. Deep learning models provide a basis to make better clinical decisions in medical image analysis. Biomedical image segmentation, classification, and registration can be improved with deep learning. Recent evidence suggests that the application of deep learning methods to medical image analysis can contribute significantly to computer-aided diagnosis. Deep learning models are usually considered to be more flexible and provide reliable solutions for image analysis problems compared to conventional computer vision models. The use of fast computers offers the possibility of real-time support that is important for endoscopic diagnosis, which has to be made in real time. Advanced graphics processing units and cloud computing have also favored the use of machine learning, and more particularly, deep learning for patient care. This paper reviews the rapidly evolving literature on the feasibility of applying deep learning algorithms to endoscopic imaging.}, } @article {pmid26710255, year = {2015}, author = {Wu, L and Xue, L and Li, C and Lv, X and Chen, Z and Guo, M and Xie, Z}, title = {A Geospatial Information Grid Framework for Geological Survey.}, journal = {PloS one}, volume = {10}, number = {12}, pages = {e0145312}, pmid = {26710255}, issn = {1932-6203}, mesh = {Electronic Data Processing ; *Geographic Information Systems ; *Geographic Mapping ; Geography/methods ; Geology/*methods ; Information Dissemination/*methods ; Information Storage and Retrieval/*methods ; Internet ; Models, Theoretical ; }, abstract = {The use of digital information in geological fields is becoming very important. Thus, informatization in geological surveys should not stagnate as a result of the level of data accumulation. The integration and sharing of distributed, multi-source, heterogeneous geological information is an open problem in geological domains. Applications and services use geological spatial data with many features, including being cross-region and cross-domain and requiring real-time updating. As a result of these features, desktop and web-based geographic information systems (GISs) experience difficulties in meeting the demand for geological spatial information. To facilitate the real-time sharing of data and services in distributed environments, a GIS platform that is open, integrative, reconfigurable, reusable and elastic would represent an indispensable tool. The purpose of this paper is to develop a geological cloud-computing platform for integrating and sharing geological information based on a cloud architecture. Thus, the geological cloud-computing platform defines geological ontology semantics; designs a standard geological information framework and a standard resource integration model; builds a peer-to-peer node management mechanism; achieves the description, organization, discovery, computing and integration of the distributed resources; and provides the distributed spatial meta service, the spatial information catalog service, the multi-mode geological data service and the spatial data interoperation service. The geological survey information cloud-computing platform has been implemented, and based on the platform, some geological data services and geological processing services were developed. Furthermore, an iron mine resource forecast and an evaluation service is introduced in this paper.}, } @article {pmid26682133, year = {2015}, author = {Wang, D and Ma, D and Wong, ML and Wáng, YX}, title = {Recent advances in surgical planning & navigation for tumor biopsy and resection.}, journal = {Quantitative imaging in medicine and surgery}, volume = {5}, number = {5}, pages = {640-648}, pmid = {26682133}, issn = {2223-4292}, abstract = {This paper highlights recent advancements in imaging technologies for surgical planning and navigation in tumor biopsy and resection which need high-precision in detection and characterization of lesion margin in preoperative planning and intraoperative navigation. Multimodality image-guided surgery platforms brought great benefits in surgical planning and operation accuracy via registration of various data sets with information on morphology [X-ray, magnetic resonance (MR), computed tomography (CT)], function connectivity [functional magnetic resonance imaging (fMRI), diffusion tensor imaging (DTI), rest-status fMRI], or molecular activity [positron emission tomography (PET)]. These image-guided platforms provide a correspondence between the pre-operative surgical planning and intra-operative procedure. We envisage that the combination of advanced multimodal imaging, three-dimensional (3D) printing, and cloud computing will play increasingly important roles in planning and navigation of surgery for tumor biopsy and resection in the coming years.}, } @article {pmid26667464, year = {2016}, author = {Arsenijevic, V and Davis-Dusenbery, BN}, title = {Reproducible, Scalable Fusion Gene Detection from RNA-Seq.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {1381}, number = {}, pages = {223-237}, doi = {10.1007/978-1-4939-3204-7_13}, pmid = {26667464}, issn = {1940-6029}, mesh = {*Cloud Computing ; *Gene Fusion ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Neoplasms/genetics ; RNA/genetics ; Reproducibility of Results ; Sequence Analysis, RNA/*methods ; }, abstract = {Chromosomal rearrangements resulting in the creation of novel gene products, termed fusion genes, have been identified as driving events in the development of multiple types of cancer. As these gene products typically do not exist in normal cells, they represent valuable prognostic and therapeutic targets. Advances in next-generation sequencing and computational approaches have greatly improved our ability to detect and identify fusion genes. Nevertheless, these approaches require significant computational resources. Here we describe an approach which leverages cloud computing technologies to perform fusion gene detection from RNA sequencing data at any scale. We additionally highlight methods to enhance reproducibility of bioinformatics analyses which may be applied to any next-generation sequencing experiment.}, } @article {pmid26667455, year = {2016}, author = {Pilarsky, C and Nanduri, LK and Roy, J}, title = {Gene Expression Analysis in the Age of Mass Sequencing: An Introduction.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {1381}, number = {}, pages = {67-73}, doi = {10.1007/978-1-4939-3204-7_4}, pmid = {26667455}, issn = {1940-6029}, mesh = {Gene Expression Profiling/methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; RNA/genetics ; Sequence Analysis, RNA/*methods ; Software ; Workflow ; }, abstract = {During the last years the technology used for gene expression analysis has changed dramatically. The old mainstay, DNA microarray, has served its due course and will soon be replaced by next-generation sequencing (NGS), the Swiss army knife of modern high-throughput nucleic acid-based analysis. Therefore preparation technologies have to adapt to suit the emerging NGS technology platform. Moreover, interpretation of the results is still time consuming and employs the use of high-end computers usually not found in molecular biology laboratories. Alternatively, cloud computing might solve this problem. Nevertheless, these new challenges have to be embraced for gene expression analysis in general.}, } @article {pmid26664434, year = {2015}, author = {Yates, EJ and Dixon, LC}, title = {PageRank as a method to rank biomedical literature by importance.}, journal = {Source code for biology and medicine}, volume = {10}, number = {}, pages = {16}, pmid = {26664434}, issn = {1751-0473}, abstract = {BACKGROUND: Optimal ranking of literature importance is vital in overcoming article overload. Existing ranking methods are typically based on raw citation counts, giving a sum of 'inbound' links with no consideration of citation importance. PageRank, an algorithm originally developed for ranking webpages at the search engine, Google, could potentially be adapted to bibliometrics to quantify the relative importance weightings of a citation network. This article seeks to validate such an approach on the freely available, PubMed Central open access subset (PMC-OAS) of biomedical literature.

RESULTS: On-demand cloud computing infrastructure was used to extract a citation network from over 600,000 full-text PMC-OAS articles. PageRanks and citation counts were calculated for each node in this network. PageRank is highly correlated with citation count (R = 0.905, P < 0.01) and we thus validate the former as a surrogate of literature importance. Furthermore, the algorithm can be run in trivial time on cheap, commodity cluster hardware, lowering the barrier of entry for resource-limited open access organisations.

CONCLUSIONS: PageRank can be trivially computed on commodity cluster hardware and is linearly correlated with citation count. Given its putative benefits in quantifying relative importance, we suggest it may enrich the citation network, thereby overcoming the existing inadequacy of citation counts alone. We thus suggest PageRank as a feasible supplement to, or replacement of, existing bibliometric ranking methods.}, } @article {pmid26646249, year = {2016}, author = {Sobeslav, V and Maresova, P and Krejcar, O and Franca, TC and Kuca, K}, title = {Use of cloud computing in biomedicine.}, journal = {Journal of biomolecular structure & dynamics}, volume = {34}, number = {12}, pages = {2688-2697}, doi = {10.1080/07391102.2015.1127182}, pmid = {26646249}, issn = {1538-0254}, mesh = {*Biomedical Research/economics/instrumentation/methods ; *Cloud Computing ; Drug Discovery/methods ; *Medical Informatics/economics/instrumentation/methods ; *Medicine/methods ; }, abstract = {Nowadays, biomedicine is characterised by a growing need for processing of large amounts of data in real time. This leads to new requirements for information and communication technologies (ICT). Cloud computing offers a solution to these requirements and provides many advantages, such as cost savings, elasticity and scalability of using ICT. The aim of this paper is to explore the concept of cloud computing and the related use of this concept in the area of biomedicine. Authors offer a comprehensive analysis of the implementation of the cloud computing approach in biomedical research, decomposed into infrastructure, platform and service layer, and a recommendation for processing large amounts of data in biomedicine. Firstly, the paper describes the appropriate forms and technological solutions of cloud computing. Secondly, the high-end computing paradigm of cloud computing aspects is analysed. Finally, the potential and current use of applications in scientific research of this technology in biomedicine is discussed.}, } @article {pmid26644398, year = {2016}, author = {Kass-Hout, TA and Xu, Z and Mohebbi, M and Nelsen, H and Baker, A and Levine, J and Johanson, E and Bright, RA}, title = {OpenFDA: an innovative platform providing access to a wealth of FDA's publicly available data.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {23}, number = {3}, pages = {596-600}, pmid = {26644398}, issn = {1527-974X}, mesh = {*Adverse Drug Reaction Reporting Systems ; *Datasets as Topic ; Drug Labeling ; Government Regulation ; Ownership ; Product Recalls and Withdrawals ; *Software ; United States ; *United States Food and Drug Administration ; }, abstract = {OBJECTIVE: The objective of openFDA is to facilitate access and use of big important Food and Drug Administration public datasets by developers, researchers, and the public through harmonization of data across disparate FDA datasets provided via application programming interfaces (APIs).

MATERIALS AND METHODS: Using cutting-edge technologies deployed on FDA's new public cloud computing infrastructure, openFDA provides open data for easier, faster (over 300 requests per second per process), and better access to FDA datasets; open source code and documentation shared on GitHub for open community contributions of examples, apps and ideas; and infrastructure that can be adopted for other public health big data challenges.

RESULTS: Since its launch on June 2, 2014, openFDA has developed four APIs for drug and device adverse events, recall information for all FDA-regulated products, and drug labeling. There have been more than 20 million API calls (more than half from outside the United States), 6000 registered users, 20,000 connected Internet Protocol addresses, and dozens of new software (mobile or web) apps developed. A case study demonstrates a use of openFDA data to understand an apparent association of a drug with an adverse event.

CONCLUSION: With easier and faster access to these datasets, consumers worldwide can learn more about FDA-regulated products.}, } @article {pmid26623386, year = {2015}, author = {Temkar, P}, title = {Clinical operations generation next… The age of technology and outsourcing.}, journal = {Perspectives in clinical research}, volume = {6}, number = {4}, pages = {175-178}, doi = {10.4103/2229-3485.167098}, pmid = {26623386}, issn = {2229-3485}, abstract = {Huge cost pressures and the need to drive faster approvals has driven a technology transformation in the clinical trial (CT) industry. The CT industry is thus leveraging mobile data, cloud computing, social media, robotic automation, and electronic source to drive efficiencies in a big way. Outsourcing of clinical operations support services to technology companies with a clinical edge is gaining tremendous importance. This paper provides an overview of current technology trends, applicable Food and Drug Administration (FDA) guidelines, basic challenges that the pharma industry is facing in trying to implement such changes and its shift towards outsourcing these services to enable it to focus on site operations.}, } @article {pmid26606388, year = {2015}, author = {Pan, Y and Ding, S and Fan, W and Li, J and Yang, S}, title = {Trust-Enhanced Cloud Service Selection Model Based on QoS Analysis.}, journal = {PloS one}, volume = {10}, number = {11}, pages = {e0143448}, pmid = {26606388}, issn = {1932-6203}, mesh = {*Cloud Computing/standards ; Humans ; *Models, Theoretical ; *Trust ; }, abstract = {Cloud computing technology plays a very important role in many areas, such as in the construction and development of the smart city. Meanwhile, numerous cloud services appear on the cloud-based platform. Therefore how to how to select trustworthy cloud services remains a significant problem in such platforms, and extensively investigated owing to the ever-growing needs of users. However, trust relationship in social network has not been taken into account in existing methods of cloud service selection and recommendation. In this paper, we propose a cloud service selection model based on the trust-enhanced similarity. Firstly, the direct, indirect, and hybrid trust degrees are measured based on the interaction frequencies among users. Secondly, we estimate the overall similarity by combining the experience usability measured based on Jaccard's Coefficient and the numerical distance computed by Pearson Correlation Coefficient. Then through using the trust degree to modify the basic similarity, we obtain a trust-enhanced similarity. Finally, we utilize the trust-enhanced similarity to find similar trusted neighbors and predict the missing QoS values as the basis of cloud service selection and recommendation. The experimental results show that our approach is able to obtain optimal results via adjusting parameters and exhibits high effectiveness. The cloud services ranking by our model also have better QoS properties than other methods in the comparison experiments.}, } @article {pmid26604801, year = {2015}, author = {Ocaña, K and de Oliveira, D}, title = {Parallel computing in genomic research: advances and applications.}, journal = {Advances and applications in bioinformatics and chemistry : AABC}, volume = {8}, number = {}, pages = {23-35}, pmid = {26604801}, issn = {1178-6949}, abstract = {Today's genomic experiments have to process the so-called "biological big data" that is now reaching the size of Terabytes and Petabytes. To process this huge amount of data, scientists may require weeks or months if they use their own workstations. Parallelism techniques and high-performance computing (HPC) environments can be applied for reducing the total processing time and to ease the management, treatment, and analyses of this data. However, running bioinformatics experiments in HPC environments such as clouds, grids, clusters, and graphics processing unit requires the expertise from scientists to integrate computational, biological, and mathematical techniques and technologies. Several solutions have already been proposed to allow scientists for processing their genomic experiments using HPC capabilities and parallelism techniques. This article brings a systematic review of literature that surveys the most recently published research involving genomics and parallel computing. Our objective is to gather the main characteristics, benefits, and challenges that can be considered by scientists when running their genomic experiments to benefit from parallelism techniques and HPC capabilities.}, } @article {pmid26582268, year = {2015}, author = {Khazaei, H and McGregor, C and Eklund, JM and El-Khatib, K}, title = {Real-Time and Retrospective Health-Analytics-as-a-Service: A Novel Framework.}, journal = {JMIR medical informatics}, volume = {3}, number = {4}, pages = {e36}, pmid = {26582268}, issn = {2291-9694}, abstract = {BACKGROUND: Analytics-as-a-service (AaaS) is one of the latest provisions emerging from the cloud services family. Utilizing this paradigm of computing in health informatics will benefit patients, care providers, and governments significantly. This work is a novel approach to realize health analytics as services in critical care units in particular.

OBJECTIVE: To design, implement, evaluate, and deploy an extendable big-data compatible framework for health-analytics-as-a-service that offers both real-time and retrospective analysis.

METHODS: We present a novel framework that can realize health data analytics-as-a-service. The framework is flexible and configurable for different scenarios by utilizing the latest technologies and best practices for data acquisition, transformation, storage, analytics, knowledge extraction, and visualization. We have instantiated the proposed method, through the Artemis project, that is, a customization of the framework for live monitoring and retrospective research on premature babies and ill term infants in neonatal intensive care units (NICUs).

RESULTS: We demonstrated the proposed framework in this paper for monitoring NICUs and refer to it as the Artemis-In-Cloud (Artemis-IC) project. A pilot of Artemis has been deployed in the SickKids hospital NICU. By infusing the output of this pilot set up to an analytical model, we predict important performance measures for the final deployment of Artemis-IC. This process can be carried out for other hospitals following the same steps with minimal effort. SickKids' NICU has 36 beds and can classify the patients generally into 5 different types including surgical and premature babies. The arrival rate is estimated as 4.5 patients per day, and the average length of stay was calculated as 16 days. Mean number of medical monitoring algorithms per patient is 9, which renders 311 live algorithms for the whole NICU running on the framework. The memory and computation power required for Artemis-IC to handle the SickKids NICU will be 32 GB and 16 CPU cores, respectively. The required amount of storage was estimated as 8.6 TB per year. There will always be 34.9 patients in SickKids NICU on average. Currently, 46% of patients cannot get admitted to SickKids NICU due to lack of resources. By increasing the capacity to 90 beds, all patients can be accommodated. For such a provisioning, Artemis-IC will need 16 TB of storage per year, 55 GB of memory, and 28 CPU cores.

CONCLUSIONS: Our contributions in this work relate to a cloud architecture for the analysis of physiological data for clinical decisions support for tertiary care use. We demonstrate how to size the equipment needed in the cloud for that architecture based on a very realistic assessment of the patient characteristics and the associated clinical decision support algorithms that would be required to run for those patients. We show the principle of how this could be performed and furthermore that it can be replicated for any critical care setting within a tertiary institution.}, } @article {pmid26578764, year = {2015}, author = {Herbst, T and Scheidl, T and Fink, M and Handsteiner, J and Wittmann, B and Ursin, R and Zeilinger, A}, title = {Teleportation of entanglement over 143 km.}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, volume = {112}, number = {46}, pages = {14202-14205}, pmid = {26578764}, issn = {1091-6490}, abstract = {As a direct consequence of the no-cloning theorem, the deterministic amplification as in classical communication is impossible for unknown quantum states. This calls for more advanced techniques in a future global quantum network, e.g., for cloud quantum computing. A unique solution is the teleportation of an entangled state, i.e., entanglement swapping, representing the central resource to relay entanglement between distant nodes. Together with entanglement purification and a quantum memory it constitutes a so-called quantum repeater. Since the aforementioned building blocks have been individually demonstrated in laboratory setups only, the applicability of the required technology in real-world scenarios remained to be proven. Here we present a free-space entanglement-swapping experiment between the Canary Islands of La Palma and Tenerife, verifying the presence of quantum entanglement between two previously independent photons separated by 143 km. We obtained an expectation value for the entanglement-witness operator, more than 6 SDs beyond the classical limit. By consecutive generation of the two required photon pairs and space-like separation of the relevant measurement events, we also showed the feasibility of the swapping protocol in a long-distance scenario, where the independence of the nodes is highly demanded. Because our results already allow for efficient implementation of entanglement purification, we anticipate our research to lay the ground for a fully fledged quantum repeater over a realistic high-loss and even turbulent quantum channel.}, } @article {pmid26576208, year = {2015}, author = {Luan, A and Momeni, A and Lee, GK and Galvez, MG}, title = {Cloud-Based Applications for Organizing and Reviewing Plastic Surgery Content.}, journal = {Eplasty}, volume = {15}, number = {}, pages = {e48}, pmid = {26576208}, issn = {1937-5719}, abstract = {Cloud-based applications including Box, Dropbox, Google Drive, Evernote, Notability, and Zotero are available for smartphones, tablets, and laptops and have revolutionized the manner in which medical students and surgeons read and utilize plastic surgery literature. Here we provide an overview of the use of Cloud computing in practice and propose an algorithm for organizing the vast amount of plastic surgery literature. Given the incredible amount of data being produced in plastic surgery and other surgical subspecialties, it is prudent for plastic surgeons to lead the process of providing solutions for the efficient organization and effective integration of the ever-increasing data into clinical practice.}, } @article {pmid26571636, year = {2015}, author = {Morrow, T}, title = {Hospitals will send an integrated nurse home with each discharge.}, journal = {World hospitals and health services : the official journal of the International Hospital Federation}, volume = {51}, number = {3}, pages = {11-14}, pmid = {26571636}, issn = {1029-0540}, mesh = {*Home Care Services ; *Systems Integration ; *User-Computer Interface ; *Wireless Technology ; }, abstract = {Hospitals must adapt to the rapidly changing environment of risk by changing the health behavior of their population. There is only one way to do this efficiently and at scale; send a nurse home with every patient at the time of discharge. That nurse can ensure adherence to medication and slowly, over time, transform personal behavior to evidence based levels ... basically taking their medication as prescribed, changing eating habits, increasing exercise, getting people to throw away their cigarettes, teaching them how to cope, improving their sleep and reducing their stress. But, this approach will require a nurse to basically "live" with the patient for prolonged periods of time, as bad health behaviors are quick to start but slow to change or end. The rapid developments in artificial intelligence and natural language understanding paired with cloud based computing and integrated with a variety of data sources has led to a new marketplace comprised of cognitive technologies that can emulate even the most creative, knowledgeable and effective nurse. Termed the Virtual Health Assistant, your patients can literally talk to these agents using normal conversational language. The possibility to send a nurse home with each patient to maintain adherence and prevent readmissions has arrived. The technology is available. Who will step forward to reap the rewards first?}, } @article {pmid26555730, year = {2015}, author = {Batista, BG and Estrella, JC and Ferreira, CH and Filho, DM and Nakamura, LH and Reiff-Marganiec, S and Santana, MJ and Santana, RH}, title = {Performance Evaluation of Resource Management in Cloud Computing Environments.}, journal = {PloS one}, volume = {10}, number = {11}, pages = {e0141914}, pmid = {26555730}, issn = {1932-6203}, mesh = {*Cloud Computing ; Humans ; *Software ; *Workload ; }, abstract = {Cloud computing is a computational model in which resource providers can offer on-demand services to clients in a transparent way. However, to be able to guarantee quality of service without limiting the number of accepted requests, providers must be able to dynamically manage the available resources so that they can be optimized. This dynamic resource management is not a trivial task, since it involves meeting several challenges related to workload modeling, virtualization, performance modeling, deployment and monitoring of applications on virtualized resources. This paper carries out a performance evaluation of a module for resource management in a cloud environment that includes handling available resources during execution time and ensuring the quality of service defined in the service level agreement. An analysis was conducted of different resource configurations to define which dimension of resource scaling has a real influence on client requests. The results were used to model and implement a simulated cloud system, in which the allocated resource can be changed on-the-fly, with a corresponding change in price. In this way, the proposed module seeks to satisfy both the client by ensuring quality of service, and the provider by ensuring the best use of resources at a fair price.}, } @article {pmid26552098, year = {2017}, author = {Harvey, BS and Ji, SY}, title = {Cloud-Scale Genomic Signals Processing for Robust Large-Scale Cancer Genomic Microarray Data Analysis.}, journal = {IEEE journal of biomedical and health informatics}, volume = {21}, number = {1}, pages = {238-245}, doi = {10.1109/JBHI.2015.2496323}, pmid = {26552098}, issn = {2168-2208}, mesh = {Cell Line, Tumor ; Cloud Computing ; Databases, Genetic ; Genomics/*methods ; Humans ; Neoplasms/*genetics/metabolism ; Oligonucleotide Array Sequence Analysis/*methods ; *Signal Processing, Computer-Assisted ; }, abstract = {As microarray data available to scientists continues to increase in size and complexity, it has become overwhelmingly important to find multiple ways to bring forth oncological inference to the bioinformatics community through the analysis of large-scale cancer genomic (LSCG) DNA and mRNA microarray data that is useful to scientists. Though there have been many attempts to elucidate the issue of bringing forth biological interpretation by means of wavelet preprocessing and classification, there has not been a research effort that focuses on a cloud-scale distributed parallel (CSDP) separable 1-D wavelet decomposition technique for denoising through differential expression thresholding and classification of LSCG microarray data. This research presents a novel methodology that utilizes a CSDP separable 1-D method for wavelet-based transformation in order to initialize a threshold which will retain significantly expressed genes through the denoising process for robust classification of cancer patients. Additionally, the overall study was implemented and encompassed within CSDP environment. The utilization of cloud computing and wavelet-based thresholding for denoising was used for the classification of samples within the Global Cancer Map, Cancer Cell Line Encyclopedia, and The Cancer Genome Atlas. The results proved that separable 1-D parallel distributed wavelet denoising in the cloud and differential expression thresholding increased the computational performance and enabled the generation of higher quality LSCG microarray datasets, which led to more accurate classification results.}, } @article {pmid26543718, year = {2015}, author = {Wang, X and Yang, Y and Zeng, Y}, title = {Accurate mobile malware detection and classification in the cloud.}, journal = {SpringerPlus}, volume = {4}, number = {}, pages = {583}, pmid = {26543718}, issn = {2193-1801}, abstract = {As the dominator of the Smartphone operating system market, consequently android has attracted the attention of s malware authors and researcher alike. The number of types of android malware is increasing rapidly regardless of the considerable number of proposed malware analysis systems. In this paper, by taking advantages of low false-positive rate of misuse detection and the ability of anomaly detection to detect zero-day malware, we propose a novel hybrid detection system based on a new open-source framework CuckooDroid, which enables the use of Cuckoo Sandbox's features to analyze Android malware through dynamic and static analysis. Our proposed system mainly consists of two parts: anomaly detection engine performing abnormal apps detection through dynamic analysis; signature detection engine performing known malware detection and classification with the combination of static and dynamic analysis. We evaluate our system using 5560 malware samples and 6000 benign samples. Experiments show that our anomaly detection engine with dynamic analysis is capable of detecting zero-day malware with a low false negative rate (1.16 %) and acceptable false positive rate (1.30 %); it is worth noting that our signature detection engine with hybrid analysis can accurately classify malware samples with an average positive rate 98.94 %. Considering the intensive computing resources required by the static and dynamic analysis, our proposed detection system should be deployed off-device, such as in the Cloud. The app store markets and the ordinary users can access our detection system for malware detection through cloud service.}, } @article {pmid26540716, year = {2016}, author = {Napoli, C and Pappalardo, G and Tina, GM and Tramontana, E}, title = {Cooperative Strategy for Optimal Management of Smart Grids by Wavelet RNNs and Cloud Computing.}, journal = {IEEE transactions on neural networks and learning systems}, volume = {27}, number = {8}, pages = {1672-1685}, doi = {10.1109/TNNLS.2015.2480709}, pmid = {26540716}, issn = {2162-2388}, abstract = {Advanced smart grids have several power sources that contribute with their own irregular dynamic to the power production, while load nodes have another dynamic. Several factors have to be considered when using the owned power sources for satisfying the demand, i.e., production rate, battery charge and status, variable cost of externally bought energy, and so on. The objective of this paper is to develop appropriate neural network architectures that automatically and continuously govern power production and dispatch, in order to maximize the overall benefit over a long time. Such a control will improve the fundamental work of a smart grid. For this, status data of several components have to be gathered, and then an estimate of future power production and demand is needed. Hence, the neural network-driven forecasts are apt in this paper for renewable nonprogrammable energy sources. Then, the produced energy as well as the stored one can be supplied to consumers inside a smart grid, by means of digital technology. Among the sought benefits, reduced costs and increasing reliability and transparency are paramount.}, } @article {pmid26508343, year = {2015}, author = {Chapron, G}, title = {Wildlife in the cloud: a new approach for engaging stakeholders in wildlife management.}, journal = {Ambio}, volume = {44 Suppl 4}, number = {Suppl 4}, pages = {550-556}, pmid = {26508343}, issn = {1654-7209}, mesh = {Animals ; Animals, Wild/*physiology ; *Cloud Computing ; Conservation of Natural Resources/*methods ; Deer/*physiology ; Sweden ; }, abstract = {Research in wildlife management increasingly relies on quantitative population models. However, a remaining challenge is to have end-users, who are often alienated by mathematics, benefiting from this research. I propose a new approach, 'wildlife in the cloud,' to enable active learning by practitioners from cloud-based ecological models whose complexity remains invisible to the user. I argue that this concept carries the potential to overcome limitations of desktop-based software and allows new understandings of human-wildlife systems. This concept is illustrated by presenting an online decision-support tool for moose management in areas with predators in Sweden. The tool takes the form of a user-friendly cloud-app through which users can compare the effects of alternative management decisions, and may feed into adjustment of their hunting strategy. I explain how the dynamic nature of cloud-apps opens the door to different ways of learning, informed by ecological models that can benefit both users and researchers.}, } @article {pmid26504488, year = {2015}, author = {Hu, R and Liu, G and Jiang, J and Wang, L}, title = {G2LC: Resources Autoscaling for Real Time Bioinformatics Applications in IaaS.}, journal = {Computational and mathematical methods in medicine}, volume = {2015}, number = {}, pages = {549026}, pmid = {26504488}, issn = {1748-6718}, mesh = {Algorithms ; Cloud Computing ; Computational Biology/*methods/statistics & numerical data ; Computer Systems ; High-Throughput Nucleotide Sequencing/statistics & numerical data ; Humans ; Sequence Alignment/methods/statistics & numerical data ; Software ; User-Computer Interface ; }, abstract = {Cloud computing has started to change the way how bioinformatics research is being carried out. Researchers who have taken advantage of this technology can process larger amounts of data and speed up scientific discovery. The variability in data volume results in variable computing requirements. Therefore, bioinformatics researchers are pursuing more reliable and efficient methods for conducting sequencing analyses. This paper proposes an automated resource provisioning method, G2LC, for bioinformatics applications in IaaS. It enables application to output the results in a real time manner. Its main purpose is to guarantee applications performance, while improving resource utilization. Real sequence searching data of BLAST is used to evaluate the effectiveness of G2LC. Experimental results show that G2LC guarantees the application performance, while resource is saved up to 20.14%.}, } @article {pmid26501966, year = {2015}, author = {Afgan, E and Sloggett, C and Goonasekera, N and Makunin, I and Benson, D and Crowe, M and Gladman, S and Kowsar, Y and Pheasant, M and Horst, R and Lonie, A}, title = {Genomics Virtual Laboratory: A Practical Bioinformatics Workbench for the Cloud.}, journal = {PloS one}, volume = {10}, number = {10}, pages = {e0140829}, pmid = {26501966}, issn = {1932-6203}, mesh = {Animals ; *Cloud Computing ; Computational Biology/*methods ; Databases, Genetic ; Genomics/*methods ; Humans ; Software ; *User-Computer Interface ; }, abstract = {BACKGROUND: Analyzing high throughput genomics data is a complex and compute intensive task, generally requiring numerous software tools and large reference data sets, tied together in successive stages of data transformation and visualisation. A computational platform enabling best practice genomics analysis ideally meets a number of requirements, including: a wide range of analysis and visualisation tools, closely linked to large user and reference data sets; workflow platform(s) enabling accessible, reproducible, portable analyses, through a flexible set of interfaces; highly available, scalable computational resources; and flexibility and versatility in the use of these resources to meet demands and expertise of a variety of users. Access to an appropriate computational platform can be a significant barrier to researchers, as establishing such a platform requires a large upfront investment in hardware, experience, and expertise.

RESULTS: We designed and implemented the Genomics Virtual Laboratory (GVL) as a middleware layer of machine images, cloud management tools, and online services that enable researchers to build arbitrarily sized compute clusters on demand, pre-populated with fully configured bioinformatics tools, reference datasets and workflow and visualisation options. The platform is flexible in that users can conduct analyses through web-based (Galaxy, RStudio, IPython Notebook) or command-line interfaces, and add/remove compute nodes and data resources as required. Best-practice tutorials and protocols provide a path from introductory training to practice. The GVL is available on the OpenStack-based Australian Research Cloud (http://nectar.org.au) and the Amazon Web Services cloud. The principles, implementation and build process are designed to be cloud-agnostic.

CONCLUSIONS: This paper provides a blueprint for the design and implementation of a cloud-based Genomics Virtual Laboratory. We discuss scope, design considerations and technical and logistical constraints, and explore the value added to the research community through the suite of services and resources provided by our implementation.}, } @article {pmid26495424, year = {2015}, author = {Ramachandramurthy, S and Subramaniam, S and Ramasamy, C}, title = {Distilling Big Data: Refining Quality Information in the Era of Yottabytes.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {453597}, pmid = {26495424}, issn = {1537-744X}, abstract = {Big Data is the buzzword of the modern century. With the invasion of pervasive computing, we live in a data centric environment, where we always leave a track of data related to our day to day activities. Be it a visit to a shopping mall or hospital or surfing Internet, we create voluminous data related to credit card transactions, user details, location information, and so on. These trails of data simply define an individual and form the backbone for user-profiling. With the mobile phones and their easy access to online social networks on the go, sensor data such as geo-taggings and events and sentiments around them contribute to the already overwhelming data containers. With reductions in the cost of storage and computational devices and with increasing proliferation of Cloud, we never felt any constraints in storing or processing such data. Eventually we end up having several exabytes of data and analysing them for their usefulness has introduced new frontiers of research. Effective distillation of these data is the need of the hour to improve the veracity of the Big Data. This research targets the utilization of the Fuzzy Bayesian process model to improve the quality of information in Big Data.}, } @article {pmid26490152, year = {2015}, author = {Chen, SY and Lai, CF and Hwang, RH and Lai, YH and Wang, MS}, title = {An Adaptive Sensor Data Segments Selection Method for Wearable Health Care Services.}, journal = {Journal of medical systems}, volume = {39}, number = {12}, pages = {194}, pmid = {26490152}, issn = {1573-689X}, mesh = {Algorithms ; *Cloud Computing ; Computer Communication Networks/*instrumentation ; Humans ; Remote Sensing Technology/*instrumentation ; Telemedicine/*instrumentation ; Wireless Technology/*instrumentation ; }, abstract = {As cloud computing and wearable devices technologies mature, relevant services have grown more and more popular in recent years. The healthcare field is one of the popular services for this technology that adopts wearable devices to sense signals of negative physiological events, and to notify users. The development and implementation of long-term healthcare monitoring that can prevent or quickly respond to the occurrence of disease and accidents present an interesting challenge for computing power and energy limits. This study proposed an adaptive sensor data segments selection method for wearable health care services, and considered the sensing frequency of the various signals from human body, as well as the data transmission among the devices. The healthcare service regulates the sensing frequency of devices by considering the overall cloud computing environment and the sensing variations of wearable health care services. The experimental results show that the proposed service can effectively transmit the sensing data and prolong the overall lifetime of health care services.}, } @article {pmid26479684, year = {2016}, author = {Fu, J and Jones, M and Liu, T and Hao, W and Yan, Y and Qian, G and Jan, YK}, title = {A novel mobile-cloud system for capturing and analyzing wheelchair maneuvering data: A pilot study.}, journal = {Assistive technology : the official journal of RESNA}, volume = {28}, number = {2}, pages = {105-114}, pmid = {26479684}, issn = {1949-3614}, support = {P20 GM103447/GM/NIGMS NIH HHS/United States ; }, mesh = {Algorithms ; *Cloud Computing ; Equipment Design ; Humans ; Machine Learning ; *Mobile Applications ; Pilot Projects ; *Signal Processing, Computer-Assisted ; Smartphone ; *Wheelchairs ; }, abstract = {The purpose of this pilot study was to provide a new approach for capturing and analyzing wheelchair maneuvering data, which are critical for evaluating wheelchair users' activity levels. We proposed a mobile-cloud (MC) system, which incorporated the emerging mobile and cloud computing technologies. The MC system employed smartphone sensors to collect wheelchair maneuvering data and transmit them to the cloud for storage and analysis. A k-nearest neighbor (KNN) machine-learning algorithm was developed to mitigate the impact of sensor noise and recognize wheelchair maneuvering patterns. We conducted 30 trials in an indoor setting, where each trial contained 10 bouts (i.e., periods of continuous wheelchair movement). We also verified our approach in a different building. Different from existing approaches that require sensors to be attached to wheelchairs' wheels, we placed the smartphone into a smartphone holder attached to the wheelchair. Experimental results illustrate that our approach correctly identified all 300 bouts. Compared to existing approaches, our approach was easier to use while achieving similar accuracy in analyzing the accumulated movement time and maximum period of continuous movement (p > 0.8). Overall, the MC system provided a feasible way to ease the data collection process and generated accurate analysis results for evaluating activity levels.}, } @article {pmid26473166, year = {2015}, author = {Loganathan, S and Mukherjee, S}, title = {Job Scheduling with Efficient Resource Monitoring in Cloud Datacenter.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {983018}, pmid = {26473166}, issn = {1537-744X}, abstract = {Cloud computing is an on-demand computing model, which uses virtualization technology to provide cloud resources to users in the form of virtual machines through internet. Being an adaptable technology, cloud computing is an excellent alternative for organizations for forming their own private cloud. Since the resources are limited in these private clouds maximizing the utilization of resources and giving the guaranteed service for the user are the ultimate goal. For that, efficient scheduling is needed. This research reports on an efficient data structure for resource management and resource scheduling technique in a private cloud environment and discusses a cloud model. The proposed scheduling algorithm considers the types of jobs and the resource availability in its scheduling decision. Finally, we conducted simulations using CloudSim and compared our algorithm with other existing methods, like V-MCT and priority scheduling algorithms.}, } @article {pmid26458392, year = {2015}, author = {Jepson, P and Ladle, RJ}, title = {Nature apps: Waiting for the revolution.}, journal = {Ambio}, volume = {44}, number = {8}, pages = {827-832}, pmid = {26458392}, issn = {1654-7209}, mesh = {*Biodiversity ; *Community Participation ; Conservation of Natural Resources/*methods ; *Mobile Applications ; *Smartphone/instrumentation ; }, abstract = {Apps are small task-orientated programs with the potential to integrate the computational and sensing capacities of smartphones with the power of cloud computing, social networking, and crowdsourcing. They have the potential to transform how humans interact with nature, cause a step change in the quantity and resolution of biodiversity data, democratize access to environmental knowledge, and reinvigorate ways of enjoying nature. To assess the extent to which this potential is being exploited in relation to nature, we conducted an automated search of the Google Play Store using 96 nature-related terms. This returned data on ~36 304 apps, of which ~6301 were nature-themed. We found that few of these fully exploit the full range of capabilities inherent in the technology and/or have successfully captured the public imagination. Such breakthroughs will only be achieved by increasing the frequency and quality of collaboration between environmental scientists, information engineers, computer scientists, and interested publics.}, } @article {pmid26451333, year = {2015}, author = {Sanduja, S and Jewell, P and Aron, E and Pharai, N}, title = {Cloud Computing for Pharmacometrics: Using AWS, NONMEM, PsN, Grid Engine, and Sonic.}, journal = {CPT: pharmacometrics & systems pharmacology}, volume = {4}, number = {9}, pages = {537-546}, pmid = {26451333}, issn = {2163-8306}, abstract = {Cloud computing allows pharmacometricians to access advanced hardware, network, and security resources available to expedite analysis and reporting. Cloud-based computing environments are available at a fraction of the time and effort when compared to traditional local datacenter-based solutions. This tutorial explains how to get started with building your own personal cloud computer cluster using Amazon Web Services (AWS), NONMEM, PsN, Grid Engine, and Sonic.}, } @article {pmid26441462, year = {2016}, author = {Li, Z and Kolmanovsky, I and Atkins, E and Lu, J and Filev, DP and Michelini, J}, title = {Road Risk Modeling and Cloud-Aided Safety-Based Route Planning.}, journal = {IEEE transactions on cybernetics}, volume = {46}, number = {11}, pages = {2473-2483}, doi = {10.1109/TCYB.2015.2478698}, pmid = {26441462}, issn = {2168-2275}, abstract = {This paper presents a safety-based route planner that exploits vehicle-to-cloud-to-vehicle (V2C2V) connectivity. Time and road risk index (RRI) are considered as metrics to be balanced based on user preference. To evaluate road segment risk, a road and accident database from the highway safety information system is mined with a hybrid neural network model to predict RRI. Real-time factors such as time of day, day of the week, and weather are included as correction factors to the static RRI prediction. With real-time RRI and expected travel time, route planning is formulated as a multiobjective network flow problem and further reduced to a mixed-integer programming problem. A V2C2V implementation of our safety-based route planning approach is proposed to facilitate access to real-time information and computing resources. A real-world case study, route planning through the city of Columbus, Ohio, is presented. Several scenarios illustrate how the "best" route can be adjusted to favor time versus safety metrics.}, } @article {pmid26431035, year = {2015}, author = {Nezarat, A and Dastghaibifard, GH}, title = {Efficient Nash Equilibrium Resource Allocation Based on Game Theory Mechanism in Cloud Computing by Using Auction.}, journal = {PloS one}, volume = {10}, number = {10}, pages = {e0138424}, pmid = {26431035}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Game Theory ; Models, Theoretical ; *Resource Allocation ; }, abstract = {One of the most complex issues in the cloud computing environment is the problem of resource allocation so that, on one hand, the cloud provider expects the most profitability and, on the other hand, users also expect to have the best resources at their disposal considering the budget constraints and time. In most previous work conducted, heuristic and evolutionary approaches have been used to solve this problem. Nevertheless, since the nature of this environment is based on economic methods, using such methods can decrease response time and reducing the complexity of the problem. In this paper, an auction-based method is proposed which determines the auction winner by applying game theory mechanism and holding a repetitive game with incomplete information in a non-cooperative environment. In this method, users calculate suitable price bid with their objective function during several round and repetitions and send it to the auctioneer; and the auctioneer chooses the winning player based the suggested utility function. In the proposed method, the end point of the game is the Nash equilibrium point where players are no longer inclined to alter their bid for that resource and the final bid also satisfies the auctioneer's utility function. To prove the response space convexity, the Lagrange method is used and the proposed model is simulated in the cloudsim and the results are compared with previous work. At the end, it is concluded that this method converges to a response in a shorter time, provides the lowest service level agreement violations and the most utility to the provider.}, } @article {pmid26428290, year = {2016}, author = {Hodor, P and Chawla, A and Clark, A and Neal, L}, title = {cl-dash: rapid configuration and deployment of Hadoop clusters for bioinformatics research in the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {32}, number = {2}, pages = {301-303}, pmid = {26428290}, issn = {1367-4811}, mesh = {*Algorithms ; *Biomedical Research ; Computational Biology/*methods ; Genomics/*methods ; High-Throughput Nucleotide Sequencing ; Humans ; *Information Storage and Retrieval ; Programming Languages ; *Search Engine ; Sequence Analysis, DNA ; *Software ; }, abstract = {UNLABELLED: : One of the solutions proposed for addressing the challenge of the overwhelming abundance of genomic sequence and other biological data is the use of the Hadoop computing framework. Appropriate tools are needed to set up computational environments that facilitate research of novel bioinformatics methodology using Hadoop. Here, we present cl-dash, a complete starter kit for setting up such an environment. Configuring and deploying new Hadoop clusters can be done in minutes. Use of Amazon Web Services ensures no initial investment and minimal operation costs. Two sample bioinformatics applications help the researcher understand and learn the principles of implementing an algorithm using the MapReduce programming pattern.

Source code is available at https://bitbucket.org/booz-allen-sci-comp-team/cl-dash.git.

CONTACT: hodor_paul@bah.com.}, } @article {pmid26415329, year = {2015}, author = {Chesley, R}, title = {Cloud Computing: Implications For Providers.}, journal = {Provider (Washington, D.C.)}, volume = {41}, number = {8}, pages = {35-36}, pmid = {26415329}, issn = {0888-0352}, mesh = {*Computer Security ; *Hospice Care ; Information Storage and Retrieval/*methods ; United States ; }, } @article {pmid26410316, year = {2015}, author = {Shi, Y and Fan, H and Xiong, G}, title = {Obfuscatable multi-recipient re-encryption for secure privacy-preserving personal health record services.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {23 Suppl 1}, number = {}, pages = {S139-45}, doi = {10.3233/thc-150946}, pmid = {26410316}, issn = {1878-7401}, mesh = {*Algorithms ; *Cloud Computing ; *Computer Security ; *Confidentiality ; *Electronic Health Records ; Humans ; }, abstract = {BACKGROUND: With the rapid development of cloud computing techniques, it is attractive for personal health record (PHR) service providers to deploy their PHR applications and store the personal health data in the cloud. However, there could be a serious privacy leakage if the cloud-based system is intruded by attackers, which makes it necessary for the PHR service provider to encrypt all patients' health data on cloud servers.

OBJECTIVE: Existing techniques are insufficiently secure under circumstances where advanced threats are considered, or being inefficient when many recipients are involved. Therefore, the objectives of our solution are (1) providing a secure implementation of re-encryption in white-box attack contexts and (2) assuring the efficiency of the implementation even in multi-recipient cases.

METHODS: We designed the multi-recipient re-encryption functionality by randomness-reusing and protecting the implementation by obfuscation.

RESULTS: The proposed solution is secure even in white-box attack contexts. Furthermore, a comparison with other related work shows that the computational cost of the proposed solution is lower.

CONCLUSIONS: The proposed technique can serve as a building block for supporting secure, efficient and privacy-preserving personal health record service systems.}, } @article {pmid26410315, year = {2015}, author = {Shao, Z and Yang, B and Zhang, W and Zhao, Y and Wu, Z and Miao, M}, title = {Secure medical information sharing in cloud computing.}, journal = {Technology and health care : official journal of the European Society for Engineering and Medicine}, volume = {23 Suppl 1}, number = {}, pages = {S133-7}, doi = {10.3233/thc-150945}, pmid = {26410315}, issn = {1878-7401}, mesh = {*Algorithms ; *Cloud Computing ; *Computer Security ; *Confidentiality ; *Health Information Exchange ; Humans ; Information Storage and Retrieval ; }, abstract = {Medical information sharing is one of the most attractive applications of cloud computing, where searchable encryption is a fascinating solution for securely and conveniently sharing medical data among different medical organizers. However, almost all previous works are designed in symmetric key encryption environment. The only works in public key encryption do not support keyword trapdoor security, have long ciphertext related to the number of receivers, do not support receiver revocation without re-encrypting, and do not preserve the membership of receivers. In this paper, we propose a searchable encryption supporting multiple receivers for medical information sharing based on bilinear maps in public key encryption environment. In the proposed protocol, data owner stores only one copy of his encrypted file and its corresponding encrypted keywords on cloud for multiple designated receivers. The keyword ciphertext is significantly shorter and its length is constant without relation to the number of designated receivers, i.e., for n receivers the ciphertext length is only twice the element length in the group. Only the owner knows that with whom his data is shared, and the access to his data is still under control after having been put on the cloud. We formally prove the security of keyword ciphertext based on the intractability of Bilinear Diffie-Hellman problem and the keyword trapdoor based on Decisional Diffie-Hellman problem.}, } @article {pmid26401099, year = {2015}, author = {Dahlö, M and Haziza, F and Kallio, A and Korpelainen, E and Bongcam-Rudloff, E and Spjuth, O}, title = {BioImg.org: A Catalog of Virtual Machine Images for the Life Sciences.}, journal = {Bioinformatics and biology insights}, volume = {9}, number = {}, pages = {125-128}, pmid = {26401099}, issn = {1177-9322}, abstract = {Virtualization is becoming increasingly important in bioscience, enabling assembly and provisioning of complete computer setups, including operating system, data, software, and services packaged as virtual machine images (VMIs). We present an open catalog of VMIs for the life sciences, where scientists can share information about images and optionally upload them to a server equipped with a large file system and fast Internet connection. Other scientists can then search for and download images that can be run on the local computer or in a cloud computing environment, providing easy access to bioinformatics environments. We also describe applications where VMIs aid life science research, including distributing tools and data, supporting reproducible analysis, and facilitating education. BioImg.org is freely available at: https://bioimg.org.}, } @article {pmid26380365, year = {2015}, author = {Gokulnath, K and Uthariaraj, R}, title = {Game Theory Based Trust Model for Cloud Environment.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {709827}, pmid = {26380365}, issn = {1537-744X}, abstract = {The aim of this work is to propose a method to establish trust at bootload level in cloud computing environment. This work proposes a game theoretic based approach for achieving trust at bootload level of both resources and users perception. Nash equilibrium (NE) enhances the trust evaluation of the first-time users and providers. It also restricts the service providers and the users to violate service level agreement (SLA). Significantly, the problem of cold start and whitewashing issues are addressed by the proposed method. In addition appropriate mapping of cloud user's application to cloud service provider for segregating trust level is achieved as a part of mapping. Thus, time complexity and space complexity are handled efficiently. Experiments were carried out to compare and contrast the performance of the conventional methods and the proposed method. Several metrics like execution time, accuracy, error identification, and undecidability of the resources were considered.}, } @article {pmid26380364, year = {2015}, author = {Munisamy, SD and Chokkalingam, A}, title = {Universal Keyword Classifier on Public Key Based Encrypted Multikeyword Fuzzy Search in Public Cloud.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {706102}, pmid = {26380364}, issn = {1537-744X}, abstract = {Cloud computing has pioneered the emerging world by manifesting itself as a service through internet and facilitates third party infrastructure and applications. While customers have no visibility on how their data is stored on service provider's premises, it offers greater benefits in lowering infrastructure costs and delivering more flexibility and simplicity in managing private data. The opportunity to use cloud services on pay-per-use basis provides comfort for private data owners in managing costs and data. With the pervasive usage of internet, the focus has now shifted towards effective data utilization on the cloud without compromising security concerns. In the pursuit of increasing data utilization on public cloud storage, the key is to make effective data access through several fuzzy searching techniques. In this paper, we have discussed the existing fuzzy searching techniques and focused on reducing the searching time on the cloud storage server for effective data utilization. Our proposed Asymmetric Classifier Multikeyword Fuzzy Search method provides classifier search server that creates universal keyword classifier for the multiple keyword request which greatly reduces the searching time by learning the search path pattern for all the keywords in the fuzzy keyword set. The objective of using BTree fuzzy searchable index is to resolve typos and representation inconsistencies and also to facilitate effective data utilization.}, } @article {pmid26364202, year = {2015}, author = {Abdali-Mohammadi, F and Bajalan, V and Fathi, A}, title = {Toward a Fault Tolerant Architecture for Vital Medical-Based Wearable Computing.}, journal = {Journal of medical systems}, volume = {39}, number = {12}, pages = {149}, pmid = {26364202}, issn = {1573-689X}, mesh = {*Algorithms ; Cloud Computing ; Computer Communication Networks/instrumentation ; Equipment Design ; Equipment Failure ; Humans ; Remote Sensing Technology/*instrumentation ; Reproducibility of Results ; Telemedicine/*instrumentation ; }, abstract = {Advancements in computers and electronic technologies have led to the emergence of a new generation of efficient small intelligent systems. The products of such technologies might include Smartphones and wearable devices, which have attracted the attention of medical applications. These products are used less in critical medical applications because of their resource constraint and failure sensitivity. This is due to the fact that without safety considerations, small-integrated hardware will endanger patients' lives. Therefore, proposing some principals is required to construct wearable systems in healthcare so that the existing concerns are dealt with. Accordingly, this paper proposes an architecture for constructing wearable systems in critical medical applications. The proposed architecture is a three-tier one, supporting data flow from body sensors to cloud. The tiers of this architecture include wearable computers, mobile computing, and mobile cloud computing. One of the features of this architecture is its high possible fault tolerance due to the nature of its components. Moreover, the required protocols are presented to coordinate the components of this architecture. Finally, the reliability of this architecture is assessed by simulating the architecture and its components, and other aspects of the proposed architecture are discussed.}, } @article {pmid26357510, year = {2015}, author = {Li, X and Xu, J and Yang, Y}, title = {A Chaotic Particle Swarm Optimization-Based Heuristic for Market-Oriented Task-Level Scheduling in Cloud Workflow Systems.}, journal = {Computational intelligence and neuroscience}, volume = {2015}, number = {}, pages = {718689}, pmid = {26357510}, issn = {1687-5273}, mesh = {*Algorithms ; Computer Simulation ; *Heuristics ; *Models, Theoretical ; *Planning Techniques ; *Workflow ; }, abstract = {Cloud workflow system is a kind of platform service based on cloud computing. It facilitates the automation of workflow applications. Between cloud workflow system and its counterparts, market-oriented business model is one of the most prominent factors. The optimization of task-level scheduling in cloud workflow system is a hot topic. As the scheduling is a NP problem, Ant Colony Optimization (ACO) and Particle Swarm Optimization (PSO) have been proposed to optimize the cost. However, they have the characteristic of premature convergence in optimization process and therefore cannot effectively reduce the cost. To solve these problems, Chaotic Particle Swarm Optimization (CPSO) algorithm with chaotic sequence and adaptive inertia weight factor is applied to present the task-level scheduling. Chaotic sequence with high randomness improves the diversity of solutions, and its regularity assures a good global convergence. Adaptive inertia weight factor depends on the estimate value of cost. It makes the scheduling avoid premature convergence by properly balancing between global and local exploration. The experimental simulation shows that the cost obtained by our scheduling is always lower than the other two representative counterparts.}, } @article {pmid26357414, year = {2016}, author = {Pandey, P and Lee, EK and Pompili, D}, title = {A Distributed Computing Framework for Real-Time Detection of Stress and of Its Propagation in a Team.}, journal = {IEEE journal of biomedical and health informatics}, volume = {20}, number = {6}, pages = {1502-1512}, doi = {10.1109/JBHI.2015.2477342}, pmid = {26357414}, issn = {2168-2208}, mesh = {Computer Simulation ; Electrocardiography ; Humans ; *Models, Statistical ; Monitoring, Physiologic/*methods ; *Signal Processing, Computer-Assisted ; Stress, Physiological/*physiology ; Stress, Psychological/*diagnosis ; }, abstract = {Stress is one of the key factor that impacts the quality of our daily life: From the productivity and efficiency in the production processes to the ability of (civilian and military) individuals in making rational decisions. Also, stress can propagate from one individual to other working in a close proximity or toward a common goal, e.g., in a military operation or workforce. Real-time assessment of the stress of individuals alone is, however, not sufficient, as understanding its source and direction in which it propagates in a group of people is equally-if not more-important. A continuous near real-time in situ personal stress monitoring system to quantify level of stress of individuals and its direction of propagation in a team is envisioned. However, stress monitoring of an individual via his/her mobile device may not always be possible for extended periods of time due to limited battery capacity of these devices. To overcome this challenge a novel distributed mobile computing framework is proposed to organize the resources in the vicinity and form a mobile device cloud that enables offloading of computation tasks in stress detection algorithm from resource constrained devices (low residual battery, limited CPU cycles) to resource rich devices. Our framework also supports computing parallelization and workflows, defining how the data and tasks divided/assigned among the entities of the framework are designed. The direction of propagation and magnitude of influence of stress in a group of individuals are studied by applying real-time, in situ analysis of Granger Causality. Tangible benefits (in terms of energy expenditure and execution time) of the proposed framework in comparison to a centralized framework are presented via thorough simulations and real experiments.}, } @article {pmid26357328, year = {2015}, author = {Varghese, B and Patel, I and Barker, A}, title = {RBioCloud: A Light-Weight Framework for Bioconductor and R-based Jobs on the Cloud.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {12}, number = {4}, pages = {871-878}, doi = {10.1109/TCBB.2014.2361327}, pmid = {26357328}, issn = {1557-9964}, mesh = {Cloud Computing ; Computational Biology/*methods ; Gene Expression Profiling ; Humans ; *Internet ; *Programming Languages ; *Software ; }, abstract = {Large-scale ad hoc analytics of genomic data is popular using the R-programming language supported by over 700 software packages provided by Bioconductor. More recently, analytical jobs are benefitting from on-demand computing and storage, their scalability and their low maintenance cost, all of which are offered by the cloud. While biologists and bioinformaticists can take an analytical job and execute it on their personal workstations, it remains challenging to seamlessly execute the job on the cloud infrastructure without extensive knowledge of the cloud dashboard. How analytical jobs can not only with minimum effort be executed on the cloud, but also how both the resources and data required by the job can be managed is explored in this paper. An open-source light-weight framework for executing R-scripts using Bioconductor packages, referred to as `RBioCloud', is designed and developed. RBioCloud offers a set of simple command-line tools for managing the cloud resources, the data and the execution of the job. Three biological test cases validate the feasibility of RBioCloud. The framework is available from http://www.rbiocloud.com.}, } @article {pmid26345453, year = {2015}, author = {Suciu, G and Suciu, V and Martian, A and Craciunescu, R and Vulpe, A and Marcu, I and Halunga, S and Fratu, O}, title = {Big Data, Internet of Things and Cloud Convergence--An Architecture for Secure E-Health Applications.}, journal = {Journal of medical systems}, volume = {39}, number = {11}, pages = {141}, pmid = {26345453}, issn = {1573-689X}, mesh = {*Cloud Computing ; Computer Communication Networks/*instrumentation ; Computer Security ; Humans ; Information Storage and Retrieval ; Internet ; Remote Sensing Technology/*instrumentation ; Telemedicine/*instrumentation ; }, abstract = {Big data storage and processing are considered as one of the main applications for cloud computing systems. Furthermore, the development of the Internet of Things (IoT) paradigm has advanced the research on Machine to Machine (M2M) communications and enabled novel tele-monitoring architectures for E-Health applications. However, there is a need for converging current decentralized cloud systems, general software for processing big data and IoT systems. The purpose of this paper is to analyze existing components and methods of securely integrating big data processing with cloud M2M systems based on Remote Telemetry Units (RTUs) and to propose a converged E-Health architecture built on Exalead CloudView, a search based application. Finally, we discuss the main findings of the proposed implementation and future directions.}, } @article {pmid26338218, year = {2016}, author = {Neinstein, A and Wong, J and Look, H and Arbiter, B and Quirk, K and McCanne, S and Sun, Y and Blum, M and Adi, S}, title = {A case study in open source innovation: developing the Tidepool Platform for interoperability in type 1 diabetes management.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {23}, number = {2}, pages = {324-332}, pmid = {26338218}, issn = {1527-974X}, support = {K23 DK107894/DK/NIDDK NIH HHS/United States ; DK094726/DK/NIDDK NIH HHS/United States ; }, mesh = {Diabetes Mellitus, Type 1/*therapy ; Humans ; Ownership ; *Software ; Systems Integration ; }, abstract = {OBJECTIVE: Develop a device-agnostic cloud platform to host diabetes device data and catalyze an ecosystem of software innovation for type 1 diabetes (T1D) management.

MATERIALS AND METHODS: An interdisciplinary team decided to establish a nonprofit company, Tidepool, and build open-source software.

RESULTS: Through a user-centered design process, the authors created a software platform, the Tidepool Platform, to upload and host T1D device data in an integrated, device-agnostic fashion, as well as an application ("app"), Blip, to visualize the data. Tidepool's software utilizes the principles of modular components, modern web design including REST APIs and JavaScript, cloud computing, agile development methodology, and robust privacy and security.

DISCUSSION: By consolidating the currently scattered and siloed T1D device data ecosystem into one open platform, Tidepool can improve access to the data and enable new possibilities and efficiencies in T1D clinical care and research. The Tidepool Platform decouples diabetes apps from diabetes devices, allowing software developers to build innovative apps without requiring them to design a unique back-end (e.g., database and security) or unique ways of ingesting device data. It allows people with T1D to choose to use any preferred app regardless of which device(s) they use.

CONCLUSION: The authors believe that the Tidepool Platform can solve two current problems in the T1D device landscape: 1) limited access to T1D device data and 2) poor interoperability of data from different devices. If proven effective, Tidepool's open source, cloud model for health data interoperability is applicable to other healthcare use cases.}, } @article {pmid26327939, year = {2015}, author = {Spruijt-Metz, D and Hekler, E and Saranummi, N and Intille, S and Korhonen, I and Nilsen, W and Rivera, DE and Spring, B and Michie, S and Asch, DA and Sanna, A and Salcedo, VT and Kukakfa, R and Pavel, M}, title = {Building new computational models to support health behavior change and maintenance: new opportunities in behavioral research.}, journal = {Translational behavioral medicine}, volume = {5}, number = {3}, pages = {335-346}, pmid = {26327939}, issn = {1869-6716}, support = {MR/K023195/1/MRC_/Medical Research Council/United Kingdom ; P30 AG034546/AG/NIA NIH HHS/United States ; P30 CA060553/CA/NCI NIH HHS/United States ; R25 OD011113/OD/NIH HHS/United States ; }, abstract = {Adverse and suboptimal health behaviors and habits are responsible for approximately 40 % of preventable deaths, in addition to their unfavorable effects on quality of life and economics. Our current understanding of human behavior is largely based on static "snapshots" of human behavior, rather than ongoing, dynamic feedback loops of behavior in response to ever-changing biological, social, personal, and environmental states. This paper first discusses how new technologies (i.e., mobile sensors, smartphones, ubiquitous computing, and cloud-enabled processing/computing) and emerging systems modeling techniques enable the development of new, dynamic, and empirical models of human behavior that could facilitate just-in-time adaptive, scalable interventions. The paper then describes concrete steps to the creation of robust dynamic mathematical models of behavior including: (1) establishing "gold standard" measures, (2) the creation of a behavioral ontology for shared language and understanding tools that both enable dynamic theorizing across disciplines, (3) the development of data sharing resources, and (4) facilitating improved sharing of mathematical models and tools to support rapid aggregation of the models. We conclude with the discussion of what might be incorporated into a "knowledge commons," which could help to bring together these disparate activities into a unified system and structure for organizing knowledge about behavior.}, } @article {pmid26306232, year = {2015}, author = {Tao, S and Cui, L and Zhu, W and Sun, M and Bodenreider, O and Zhang, GQ}, title = {Mining Relation Reversals in the Evolution of SNOMED CT Using MapReduce.}, journal = {AMIA Joint Summits on Translational Science proceedings. AMIA Joint Summits on Translational Science}, volume = {2015}, number = {}, pages = {46-50}, pmid = {26306232}, issn = {2153-4063}, support = {UL1 TR000439/TR/NCATS NIH HHS/United States ; }, abstract = {Relation reversals in ontological systems refer to such patterns as a path from concept A to concept B in one version becoming a path with the position of A and B switched in another version. We present a scalable approach, using cloud computing, to systematically extract all hierarchical relation reversals among 8 SNOMED CT versions from 2009 to 2014. Taking advantage of our MapReduce algorithms for computing transitive closure and large-scale set operations, 48 reversals were found through 28 pairwise comparison of the 8 versions in 18 minutes using a 30-node local cloud, to completely cover all possible scenarios. Except for one, all such reversals occurred in three sub-hierarchies: Body Structure, Clinical Finding, and Procedure. Two (2) reversal pairs involved an uncoupling of the pair before the is-a coupling is reversed. Twelve (12) reversal pairs involved paths of length-two, and none (0) involved paths beyond length-two. Such reversals not only represent areas of potential need for additional modeling work, but also are important for identifying and handling cycles for comparative visualization of ontological evolution.}, } @article {pmid26293863, year = {2015}, author = {Regan, K and Payne, PR}, title = {From Molecules to Patients: The Clinical Applications of Translational Bioinformatics.}, journal = {Yearbook of medical informatics}, volume = {10}, number = {1}, pages = {164-169}, pmid = {26293863}, issn = {2364-0502}, mesh = {*Computational Biology ; Genomics ; Humans ; Precision Medicine ; *Translational Research, Biomedical ; }, abstract = {OBJECTIVE: In order to realize the promise of personalized medicine, Translational Bioinformatics (TBI) research will need to continue to address implementation issues across the clinical spectrum. In this review, we aim to evaluate the expanding field of TBI towards clinical applications, and define common themes and current gaps in order to motivate future research.

METHODS: Here we present the state-of-the-art of clinical implementation of TBI-based tools and resources. Our thematic analyses of a targeted literature search of recent TBI-related articles ranged across topics in genomics, data management, hypothesis generation, molecular epidemiology, diagnostics, therapeutics and personalized medicine.

RESULTS: Open areas of clinically-relevant TBI research identified in this review include developing data standards and best practices, publicly available resources, integrative systemslevel approaches, user-friendly tools for clinical support, cloud computing solutions, emerging technologies and means to address pressing legal, ethical and social issues.

CONCLUSIONS: There is a need for further research bridging the gap from foundational TBI-based theories and methodologies to clinical implementation. We have organized the topic themes presented in this review into four conceptual foci - domain analyses, knowledge engineering, computational architectures and computation methods alongside three stages of knowledge development in order to orient future TBI efforts to accelerate the goals of personalized medicine.}, } @article {pmid26291949, year = {2015}, author = {Kohlhoff, KJ and Shukla, D and Lawrenz, M and Bowman, GR and Konerding, DE and Belov, D and Altman, RB and Pande, VS}, title = {Corrigendum: Cloud-based simulations on Google Exacycle reveal ligand modulation of GPCR activation pathways.}, journal = {Nature chemistry}, volume = {7}, number = {9}, pages = {759}, pmid = {26291949}, issn = {1755-4349}, } @article {pmid26286716, year = {2015}, author = {Kanterakis, A and Deelen, P and van Dijk, F and Byelas, H and Dijkstra, M and Swertz, MA}, title = {Molgenis-impute: imputation pipeline in a box.}, journal = {BMC research notes}, volume = {8}, number = {}, pages = {359}, pmid = {26286716}, issn = {1756-0500}, mesh = {*Algorithms ; Chromosome Mapping ; Genetic Markers ; *Genome ; *Genome-Wide Association Study ; Genomics/methods/*statistics & numerical data ; *Genotype ; Humans ; Polymorphism, Single Nucleotide ; *Software ; }, abstract = {BACKGROUND: Genotype imputation is an important procedure in current genomic analysis such as genome-wide association studies, meta-analyses and fine mapping. Although high quality tools are available that perform the steps of this process, considerable effort and expertise is required to set up and run a best practice imputation pipeline, particularly for larger genotype datasets, where imputation has to scale out in parallel on computer clusters.

RESULTS: Here we present MOLGENIS-impute, an 'imputation in a box' solution that seamlessly and transparently automates the set up and running of all the steps of the imputation process. These steps include genome build liftover (liftovering), genotype phasing with SHAPEIT2, quality control, sample and chromosomal chunking/merging, and imputation with IMPUTE2. MOLGENIS-impute builds on MOLGENIS-compute, a simple pipeline management platform for submission and monitoring of bioinformatics tasks in High Performance Computing (HPC) environments like local/cloud servers, clusters and grids. All the required tools, data and scripts are downloaded and installed in a single step. Researchers with diverse backgrounds and expertise have tested MOLGENIS-impute on different locations and imputed over 30,000 samples so far using the 1,000 Genomes Project and new Genome of the Netherlands data as the imputation reference. The tests have been performed on PBS/SGE clusters, cloud VMs and in a grid HPC environment.

CONCLUSIONS: MOLGENIS-impute gives priority to the ease of setting up, configuring and running an imputation. It has minimal dependencies and wraps the pipeline in a simple command line interface, without sacrificing flexibility to adapt or limiting the options of underlying imputation tools. It does not require knowledge of a workflow system or programming, and is targeted at researchers who just want to apply best practices in imputation via simple commands. It is built on the MOLGENIS compute workflow framework to enable customization with additional computational steps or it can be included in other bioinformatics pipelines. It is available as open source from: https://github.com/molgenis/molgenis-imputation.}, } @article {pmid26276015, year = {2015}, author = {Melillo, P and Orrico, A and Scala, P and Crispino, F and Pecchia, L}, title = {Cloud-Based Smart Health Monitoring System for Automatic Cardiovascular and Fall Risk Assessment in Hypertensive Patients.}, journal = {Journal of medical systems}, volume = {39}, number = {10}, pages = {109}, pmid = {26276015}, issn = {1573-689X}, mesh = {*Accidental Falls ; Aged ; Aged, 80 and over ; Artificial Intelligence ; Cardiovascular Diseases/physiopathology ; *Cloud Computing ; Computer Security ; Electrocardiography ; Female ; Humans ; Hypertension/*physiopathology ; Male ; Monitoring, Ambulatory/*instrumentation ; Movement ; Prospective Studies ; Retrospective Studies ; Risk Assessment ; Risk Factors ; Telemetry/*instrumentation ; User-Computer Interface ; }, abstract = {The aim of this paper is to describe the design and the preliminary validation of a platform developed to collect and automatically analyze biomedical signals for risk assessment of vascular events and falls in hypertensive patients. This m-health platform, based on cloud computing, was designed to be flexible, extensible, and transparent, and to provide proactive remote monitoring via data-mining functionalities. A retrospective study was conducted to train and test the platform. The developed system was able to predict a future vascular event within the next 12 months with an accuracy rate of 84 % and to identify fallers with an accuracy rate of 72 %. In an ongoing prospective trial, almost all the recruited patients accepted favorably the system with a limited rate of inadherences causing data losses (<20 %). The developed platform supported clinical decision by processing tele-monitored data and providing quick and accurate risk assessment of vascular events and falls.}, } @article {pmid26271043, year = {2015}, author = {Bao, R and Hernandez, K and Huang, L and Kang, W and Bartom, E and Onel, K and Volchenboum, S and Andrade, J}, title = {ExScalibur: A High-Performance Cloud-Enabled Suite for Whole Exome Germline and Somatic Mutation Identification.}, journal = {PloS one}, volume = {10}, number = {8}, pages = {e0135800}, pmid = {26271043}, issn = {1932-6203}, support = {UL1 RR024999/RR/NCRR NIH HHS/United States ; UL1 TR000430/TR/NCATS NIH HHS/United States ; }, mesh = {Algorithms ; Computational Biology ; Exome/*genetics ; Germ-Line Mutation/*genetics ; Humans ; Internet ; Mutation/*genetics ; Reproducibility of Results ; *Software ; }, abstract = {Whole exome sequencing has facilitated the discovery of causal genetic variants associated with human diseases at deep coverage and low cost. In particular, the detection of somatic mutations from tumor/normal pairs has provided insights into the cancer genome. Although there is an abundance of publicly-available software for the detection of germline and somatic variants, concordance is generally limited among variant callers and alignment algorithms. Successful integration of variants detected by multiple methods requires in-depth knowledge of the software, access to high-performance computing resources, and advanced programming techniques. We present ExScalibur, a set of fully automated, highly scalable and modulated pipelines for whole exome data analysis. The suite integrates multiple alignment and variant calling algorithms for the accurate detection of germline and somatic mutations with close to 99% sensitivity and specificity. ExScalibur implements streamlined execution of analytical modules, real-time monitoring of pipeline progress, robust handling of errors and intuitive documentation that allows for increased reproducibility and sharing of results and workflows. It runs on local computers, high-performance computing clusters and cloud environments. In addition, we provide a data analysis report utility to facilitate visualization of the results that offers interactive exploration of quality control files, read alignment and variant calls, assisting downstream customization of potential disease-causing mutations. ExScalibur is open-source and is also available as a public image on Amazon cloud.}, } @article {pmid26262231, year = {2015}, author = {Santos Simões de Almeida, LH and Costa Oliveira, M}, title = {A Medical Image Backup Architecture Based on a NoSQL Database and Cloud Computing Services.}, journal = {Studies in health technology and informatics}, volume = {216}, number = {}, pages = {929}, pmid = {26262231}, issn = {1879-8365}, mesh = {*Cloud Computing ; Databases, Factual ; Humans ; *Radiology Information Systems/organization & administration ; Software ; }, abstract = {The use of digital systems for storing medical images generates a huge volume of data. Digital images are commonly stored and managed on a Picture Archiving and Communication System (PACS), under the DICOM standard. However, PACS is limited because it is strongly dependent on the server's physical space. Alternatively, Cloud Computing arises as an extensive, low cost, and reconfigurable resource. However, medical images contain patient information that can not be made available in a public cloud. Therefore, a mechanism to anonymize these images is needed. This poster presents a solution for this issue by taking digital images from PACS, converting the information contained in each image file to a NoSQL database, and using cloud computing to store digital images.}, } @article {pmid26262140, year = {2015}, author = {Abedini, M and von Cavallar, S and Chakravorty, R and Davis, M and Garnavi, R}, title = {A Cloud-Based Infrastructure for Feedback-Driven Training and Image Recognition.}, journal = {Studies in health technology and informatics}, volume = {216}, number = {}, pages = {691-695}, pmid = {26262140}, issn = {1879-8365}, mesh = {Algorithms ; *Cloud Computing ; Dermoscopy/methods ; *Expert Systems ; Feedback ; Humans ; Image Interpretation, Computer-Assisted/*methods ; *Machine Learning ; Melanoma/*pathology ; Pattern Recognition, Automated/methods ; Skin Neoplasms/*pathology ; User-Computer Interface ; }, abstract = {Advanced techniques in machine learning combined with scalable "cloud" computing infrastructure are driving the creation of new and innovative health diagnostic applications. We describe a service and application for performing image training and recognition, tailored to dermatology and melanoma identification. The system implements new machine learning approaches to provide a feedback-driven training loop. This training sequence enhances classification performance by incrementally retraining the classifier model from expert responses. To easily provide this application and associated web service to clinical practices, we also describe a scalable cloud infrastructure, deployable in public cloud infrastructure and private, on-premise systems.}, } @article {pmid26260162, year = {2015}, author = {Torreno, O and Trelles, O}, title = {Breaking the computational barriers of pairwise genome comparison.}, journal = {BMC bioinformatics}, volume = {16}, number = {1}, pages = {250}, pmid = {26260162}, issn = {1471-2105}, mesh = {*Algorithms ; Animals ; Bacteria/genetics ; Computational Biology/*methods ; Datasets as Topic ; Drosophila/genetics ; *Genome ; Humans ; Mammals/genetics ; *Software ; Synteny ; Viruses/genetics ; }, abstract = {BACKGROUND: Conventional pairwise sequence comparison software algorithms are being used to process much larger datasets than they were originally designed for. This can result in processing bottlenecks that limit software capabilities or prevent full use of the available hardware resources. Overcoming the barriers that limit the efficient computational analysis of large biological sequence datasets by retrofitting existing algorithms or by creating new applications represents a major challenge for the bioinformatics community.

RESULTS: We have developed C libraries for pairwise sequence comparison within diverse architectures, ranging from commodity systems to high performance and cloud computing environments. Exhaustive tests were performed using different datasets of closely- and distantly-related sequences that span from small viral genomes to large mammalian chromosomes. The tests demonstrated that our solution is capable of generating high quality results with a linear-time response and controlled memory consumption, being comparable or faster than the current state-of-the-art methods.

CONCLUSIONS: We have addressed the problem of pairwise and all-versus-all comparison of large sequences in general, greatly increasing the limits on input data size. The approach described here is based on a modular out-of-core strategy that uses secondary storage to avoid reaching memory limits during the identification of High-scoring Segment Pairs (HSPs) between the sequences under comparison. Software engineering concepts were applied to avoid intermediate result re-calculation, to minimise the performance impact of input/output (I/O) operations and to modularise the process, thus enhancing application flexibility and extendibility. Our computationally-efficient approach allows tasks such as the massive comparison of complete genomes, evolutionary event detection, the identification of conserved synteny blocks and inter-genome distance calculations to be performed more effectively.}, } @article {pmid26258165, year = {2015}, author = {Dorairaj, SD and Kaliannan, T}, title = {An Adaptive Multilevel Security Framework for the Data Stored in Cloud Environment.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {601017}, pmid = {26258165}, issn = {1537-744X}, abstract = {Cloud computing is renowned for delivering information technology services based on internet. Nowadays, organizations are interested in moving their massive data and computations into cloud to reap their significant benefits of on demand service, resource pooling, and rapid elasticity that helps to satisfy the dynamically changing infrastructure demand without the burden of owning, managing, and maintaining it. Since the data needs to be secured throughout its life cycle, security of the data in cloud is a major challenge to be concentrated on because the data is in third party's premises. Any uniform simple or high level security method for all the data either compromises the sensitive data or proves to be too costly with increased overhead. Any common multiple method for all data becomes vulnerable when the common security pattern is identified at the event of successful attack on any information and also encourages more attacks on all other data. This paper suggests an adaptive multilevel security framework based on cryptography techniques that provide adequate security for the classified data stored in cloud. The proposed security system acclimates well for cloud environment and is also customizable and more reliant to meet the required level of security of data with different sensitivity that changes with business needs and commercial conditions.}, } @article {pmid26254488, year = {2015}, author = {De Witte, D and Van de Velde, J and Decap, D and Van Bel, M and Audenaert, P and Demeester, P and Dhoedt, B and Vandepoele, K and Fostier, J}, title = {BLSSpeller: exhaustive comparative discovery of conserved cis-regulatory elements.}, journal = {Bioinformatics (Oxford, England)}, volume = {31}, number = {23}, pages = {3758-3766}, pmid = {26254488}, issn = {1367-4811}, mesh = {*Algorithms ; Base Sequence ; Binding Sites ; Conserved Sequence ; DNA, Plant/chemistry ; *Genome, Plant ; Nucleotide Motifs ; *Promoter Regions, Genetic ; Sequence Alignment ; Sequence Analysis, DNA/*methods ; Software ; Transcription Factors/metabolism ; }, abstract = {MOTIVATION: The accurate discovery and annotation of regulatory elements remains a challenging problem. The growing number of sequenced genomes creates new opportunities for comparative approaches to motif discovery. Putative binding sites are then considered to be functional if they are conserved in orthologous promoter sequences of multiple related species. Existing methods for comparative motif discovery usually rely on pregenerated multiple sequence alignments, which are difficult to obtain for more diverged species such as plants. As a consequence, misaligned regulatory elements often remain undetected.

RESULTS: We present a novel algorithm that supports both alignment-free and alignment-based motif discovery in the promoter sequences of related species. Putative motifs are exhaustively enumerated as words over the IUPAC alphabet and screened for conservation using the branch length score. Additionally, a confidence score is established in a genome-wide fashion. In order to take advantage of a cloud computing infrastructure, the MapReduce programming model is adopted. The method is applied to four monocotyledon plant species and it is shown that high-scoring motifs are significantly enriched for open chromatin regions in Oryza sativa and for transcription factor binding sites inferred through protein-binding microarrays in O.sativa and Zea mays. Furthermore, the method is shown to recover experimentally profiled ga2ox1-like KN1 binding sites in Z.mays.

BLSSpeller was written in Java. Source code and manual are available at http://bioinformatics.intec.ugent.be/blsspeller

CONTACT: Klaas.Vandepoele@psb.vib-ugent.be or jan.fostier@intec.ugent.be.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid26248053, year = {2015}, author = {Griffith, M and Walker, JR and Spies, NC and Ainscough, BJ and Griffith, OL}, title = {Informatics for RNA Sequencing: A Web Resource for Analysis on the Cloud.}, journal = {PLoS computational biology}, volume = {11}, number = {8}, pages = {e1004393}, pmid = {26248053}, issn = {1553-7358}, support = {T32 CA113275/CA/NCI NIH HHS/United States ; K22 CA188163/CA/NCI NIH HHS/United States ; K99 HG007940/HG/NHGRI NIH HHS/United States ; K99HG007940/HG/NHGRI NIH HHS/United States ; K22CA188163/CA/NCI NIH HHS/United States ; R00 HG007940/HG/NHGRI NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; Gene Expression Profiling ; Humans ; *Internet ; *RNA/analysis/chemistry/genetics/isolation & purification ; Sequence Analysis, RNA/*methods ; *Software ; }, abstract = {Massively parallel RNA sequencing (RNA-seq) has rapidly become the assay of choice for interrogating RNA transcript abundance and diversity. This article provides a detailed introduction to fundamental RNA-seq molecular biology and informatics concepts. We make available open-access RNA-seq tutorials that cover cloud computing, tool installation, relevant file formats, reference genomes, transcriptome annotations, quality-control strategies, expression, differential expression, and alternative splicing analysis methods. These tutorials and additional training resources are accompanied by complete analysis pipelines and test datasets made available without encumbrance at www.rnaseq.wiki.}, } @article {pmid26246857, year = {2015}, author = {Siwo, GH and Williams, SM and Moore, JH}, title = {The future of genomic medicine education in Africa.}, journal = {Genome medicine}, volume = {7}, number = {1}, pages = {47}, pmid = {26246857}, issn = {1756-994X}, abstract = {There are many challenges and opportunities for Africans in the emerging area of genome medicine. In particular, there is a need for investment in local education using real-world African genetic data sets. Cloud-based computing platforms offer one solution for engaging the next generation of biomedical scientists in tackling disease in Africa, and by extension, the world.}, } @article {pmid26232829, year = {2015}, author = {Baumrind, S and Curry, S}, title = {American Association of Orthodontists Foundation Craniofacial Growth Legacy Collection: Overview of a powerful tool for orthodontic research and teaching.}, journal = {American journal of orthodontics and dentofacial orthopedics : official publication of the American Association of Orthodontists, its constituent societies, and the American Board of Orthodontics}, volume = {148}, number = {2}, pages = {217-225}, doi = {10.1016/j.ajodo.2015.06.002}, pmid = {26232829}, issn = {1097-6752}, mesh = {Adolescent ; Age Determination by Skeleton ; Canada ; Cephalometry ; Child ; Child, Preschool ; Data Display ; *Databases, Factual ; Dental Records ; Dental Research ; Education, Dental ; Ethnicity ; Female ; *Foundations ; Humans ; Information Storage and Retrieval ; Internet ; Longitudinal Studies ; Male ; Malocclusion/physiopathology/therapy ; Maxillofacial Development/*physiology ; Models, Dental ; Online Systems ; *Orthodontics/education ; Radiography, Dental ; United States ; User-Computer Interface ; }, abstract = {This article reports on the current status of the American Association of Orthodontists Foundation (AAOF) Craniofacial Growth Legacy Collection--an AAOF-supported multi-institutional project that uses the Internet and cloud computing to collect and share craniofacial images and data for orthodontic research and education. The project gives investigators and clinicians all over the world online access to longitudinal information on craniofacial development in untreated children with malocclusions of various types. It also is a unique source of control samples for testing the validity of consensually accepted beliefs about the effects of orthodontic treatment or of failure to treat.}, } @article {pmid26230400, year = {2015}, author = {Bildosola, I and Río-Belver, R and Cilleruelo, E and Garechana, G}, title = {Design and Implementation of a Cloud Computing Adoption Decision Tool: Generating a Cloud Road.}, journal = {PloS one}, volume = {10}, number = {7}, pages = {e0134563}, doi = {10.1371/journal.pone.0134563}, pmid = {26230400}, issn = {1932-6203}, mesh = {*Cloud Computing ; *Decision Making ; *Diffusion of Innovation ; Pilot Projects ; }, abstract = {Migrating to cloud computing is one of the current enterprise challenges. This technology provides a new paradigm based on "on-demand payment" for information and communication technologies. In this sense, the small and medium enterprise is supposed to be the most interested, since initial investments are avoided and the technology allows gradual implementation. However, even if the characteristics and capacities have been widely discussed, entry into the cloud is still lacking in terms of practical, real frameworks. This paper aims at filling this gap, presenting a real tool already implemented and tested, which can be used as a cloud computing adoption decision tool. This tool uses diagnosis based on specific questions to gather the required information and subsequently provide the user with valuable information to deploy the business within the cloud, specifically in the form of Software as a Service (SaaS) solutions. This information allows the decision makers to generate their particular Cloud Road. A pilot study has been carried out with enterprises at a local level with a two-fold objective: to ascertain the degree of knowledge on cloud computing and to identify the most interesting business areas and their related tools for this technology. As expected, the results show high interest and low knowledge on this subject and the tool presented aims to readdress this mismatch, insofar as possible.}, } @article {pmid26220186, year = {2015}, author = {Ohmann, C and Canham, S and Danielyan, E and Robertshaw, S and Legré, Y and Clivio, L and Demotes, J}, title = {'Cloud computing' and clinical trials: report from an ECRIN workshop.}, journal = {Trials}, volume = {16}, number = {}, pages = {318}, doi = {10.1186/s13063-015-0835-6}, pmid = {26220186}, issn = {1745-6215}, mesh = {Clinical Trials as Topic/*methods/standards ; *Cloud Computing/standards ; Computer Security ; Guidelines as Topic ; Humans ; *Research Design/standards ; Risk Assessment ; }, abstract = {Growing use of cloud computing in clinical trials prompted the European Clinical Research Infrastructures Network, a European non-profit organisation established to support multinational clinical research, to organise a one-day workshop on the topic to clarify potential benefits and risks. The issues that arose in that workshop are summarised and include the following: the nature of cloud computing and the cloud computing industry; the risks in using cloud computing services now; the lack of explicit guidance on this subject, both generally and with reference to clinical trials; and some possible ways of reducing risks. There was particular interest in developing and using a European 'community cloud' specifically for academic clinical trial data. It was recognised that the day-long workshop was only the start of an ongoing process. Future discussion needs to include clarification of trial-specific regulatory requirements for cloud computing and involve representatives from the relevant regulatory bodies.}, } @article {pmid26204738, year = {2015}, author = {Xi, H and Gan, G and Zhang, H and Chen, C}, title = {[Design of Smart Care Tele-Monitoring System for Mother and Fetus].}, journal = {Zhongguo yi liao qi xie za zhi = Chinese journal of medical instrumentation}, volume = {39}, number = {2}, pages = {102-104}, pmid = {26204738}, issn = {1671-7104}, mesh = {Cell Phone ; Female ; Fetus ; Humans ; Internet ; Monitoring, Physiologic/*instrumentation ; Pregnancy ; *Wireless Technology ; }, abstract = {OBJECTIVE: To study and design a maternal and fetal monitoring system based on the cloud computing and internet of things, which can monitor and take smart care of the mother and fetus in 24 h.

METHODS: Using a new kind of wireless fetal monitoring detector and a mobile phone, thus the doctor can keep touch with hospital through internet. The mobile terminal was developed on the Android system, which accepted the data of fetal heart rate and uterine contraction transmitted from the wireless detector, exchange information with the server and display the monitoring data and the doctor's advice in real-time.

RESULTS: The mobile phone displayed the fetal heart rate line and uterine contraction line in real-time, recorded the fetus' grow process. It implemented the real-time communication between the doctor and the user, through wireless communication technology.

CONCLUSIONS: The system removes the constraint of traditional telephone cable for users, while the users can get remote monitoring from the medical institutions at home or in the nearest community at any time, providing health and safety guarantee for mother and fetus.}, } @article {pmid28293572, year = {2015}, author = {Patrick, JR}, title = {How mHealth will spur consumer-led healthcare.}, journal = {mHealth}, volume = {1}, number = {}, pages = {14}, pmid = {28293572}, issn = {2306-9740}, abstract = {Consumer attitudes about their healthcare are beginning to shift. They are taking more responsibility for their health and seeking to collaborate with their doctors. In some cases consumers will engage in self-diagnosis. Mobile health apps and devices, in combination with cloud computing, will play a major role to empower consumers. Consumer expectations for healthcare are rising by the day. mHealth has provided empowerment to patients through the power of the PSC, which I describe as personal supercomputers. The number of devices and apps are exploding onto the healthcare scene. Although some providers are not comfortable with consumer technology for self-diagnosis, the new technologies will lead to a new model for collaboration between patient and physician.}, } @article {pmid26157454, year = {2015}, author = {Duarte, AM and Psomopoulos, FE and Blanchet, C and Bonvin, AM and Corpas, M and Franc, A and Jimenez, RC and de Lucas, JM and Nyrönen, T and Sipos, G and Suhr, SB}, title = {Future opportunities and trends for e-infrastructures and life sciences: going beyond the grid to enable life science data analysis.}, journal = {Frontiers in genetics}, volume = {6}, number = {}, pages = {197}, pmid = {26157454}, issn = {1664-8021}, abstract = {With the increasingly rapid growth of data in life sciences we are witnessing a major transition in the way research is conducted, from hypothesis-driven studies to data-driven simulations of whole systems. Such approaches necessitate the use of large-scale computational resources and e-infrastructures, such as the European Grid Infrastructure (EGI). EGI, one of key the enablers of the digital European Research Area, is a federation of resource providers set up to deliver sustainable, integrated and secure computing services to European researchers and their international partners. Here we aim to provide the state of the art of Grid/Cloud computing in EU research as viewed from within the field of life sciences, focusing on key infrastructures and projects within the life sciences community. Rather than focusing purely on the technical aspects underlying the currently provided solutions, we outline the design aspects and key characteristics that can be identified across major research approaches. Overall, we aim to provide significant insights into the road ahead by establishing ever-strengthening connections between EGI as a whole and the life sciences community.}, } @article {pmid26156351, year = {2015}, author = {Gibney, E}, title = {European labs set sights on continent-wide computing cloud.}, journal = {Nature}, volume = {523}, number = {7559}, pages = {136-137}, doi = {10.1038/523136a}, pmid = {26156351}, issn = {1476-4687}, mesh = {*Computing Methodologies ; Europe ; Information Storage and Retrieval/economics/standards/*trends ; Science/standards/*trends ; }, } @article {pmid26146645, year = {2014}, author = {Chen, F and Wang, S and Mohammed, N and Cheng, S and Jiang, X}, title = {PRECISE:PRivacy-prEserving Cloud-assisted quality Improvement Service in hEalthcare.}, journal = {IEEE International Conference on Systems Biology : [proceedings]. IEEE International Conference on Systems Biology}, volume = {2014}, number = {}, pages = {176-183}, pmid = {26146645}, issn = {2325-0704}, support = {K99 HG008175/HG/NHGRI NIH HHS/United States ; R00 LM011392/LM/NLM NIH HHS/United States ; R21 LM012060/LM/NLM NIH HHS/United States ; U54 HL108460/HL/NHLBI NIH HHS/United States ; }, abstract = {Quality improvement (QI) requires systematic and continuous efforts to enhance healthcare services. A healthcare provider might wish to compare local statistics with those from other institutions in order to identify problems and develop intervention to improve the quality of care. However, the sharing of institution information may be deterred by institutional privacy as publicizing such statistics could lead to embarrassment and even financial damage. In this article, we propose a PRivacy-prEserving Cloud-assisted quality Improvement Service in hEalthcare (PRECISE), which aims at enabling cross-institution comparison of healthcare statistics while protecting privacy. The proposed framework relies on a set of state-of-the-art cryptographic protocols including homomorphic encryption and Yao's garbled circuit schemes. By securely pooling data from different institutions, PRECISE can rank the encrypted statistics to facilitate QI among participating institutes. We conducted experiments using MIMIC II database and demonstrated the feasibility of the proposed PRECISE framework.}, } @article {pmid26143084, year = {2015}, author = {Wilkinson, ME and Mackay, E and Quinn, PF and Stutter, M and Beven, KJ and MacLeod, CJA and Macklin, MG and Elkhatib, Y and Percy, B and Vitolo, C and Haygarth, PM}, title = {A cloud based tool for knowledge exchange on local scale flood risk.}, journal = {Journal of environmental management}, volume = {161}, number = {}, pages = {38-50}, doi = {10.1016/j.jenvman.2015.06.009}, pmid = {26143084}, issn = {1095-8630}, mesh = {*Floods ; *Internet ; Models, Theoretical ; Risk ; *Software ; United Kingdom ; }, abstract = {There is an emerging and urgent need for new approaches for the management of environmental challenges such as flood hazard in the broad context of sustainability. This requires a new way of working which bridges disciplines and organisations, and that breaks down science-culture boundaries. With this, there is growing recognition that the appropriate involvement of local communities in catchment management decisions can result in multiple benefits. However, new tools are required to connect organisations and communities. The growth of cloud based technologies offers a novel way to facilitate this process of exchange of information in environmental science and management; however, stakeholders need to be engaged with as part of the development process from the beginning rather than being presented with a final product at the end. Here we present the development of a pilot Local Environmental Virtual Observatory Flooding Tool. The aim was to develop a cloud based learning platform for stakeholders, bringing together fragmented data, models and visualisation tools that will enable these stakeholders to make scientifically informed environmental management decisions at the local scale. It has been developed by engaging with different stakeholder groups in three catchment case studies in the UK and a panel of national experts in relevant topic areas. However, these case study catchments are typical of many northern latitude catchments. The tool was designed to communicate flood risk in locally impacted communities whilst engaging with landowners/farmers about the risk of runoff from the farmed landscape. It has been developed iteratively to reflect the needs, interests and capabilities of a wide range of stakeholders. The pilot tool combines cloud based services, local catchment datasets, a hydrological model and bespoke visualisation tools to explore real time hydrometric data and the impact of flood risk caused by future land use changes. The novel aspects of the pilot tool are; the co-evolution of tools on a cloud based platform with stakeholders, policy and scientists; encouraging different science disciplines to work together; a wealth of information that is accessible and understandable to a range of stakeholders; and provides a framework for how to approach the development of such a cloud based tool in the future. Above all, stakeholders saw the tool and the potential of cloud technologies as an effective means to taking a whole systems approach to solving environmental issues. This sense of community ownership is essential in order to facilitate future appropriate and acceptable land use management decisions to be co-developed by local catchment communities. The development processes and the resulting pilot tool could be applied to local catchments globally to facilitate bottom up catchment management approaches.}, } @article {pmid26138567, year = {2015}, author = {Karthikeyan, M and Pandit, Y and Pandit, D and Vyas, R}, title = {MegaMiner: A Tool for Lead Identification Through Text Mining Using Chemoinformatics Tools and Cloud Computing Environment.}, journal = {Combinatorial chemistry & high throughput screening}, volume = {18}, number = {6}, pages = {591-603}, doi = {10.2174/1386207318666150703113525}, pmid = {26138567}, issn = {1875-5402}, mesh = {Antimalarials/*chemistry ; *Cloud Computing ; Data Mining ; Humans ; Molecular Structure ; PubMed ; *User-Computer Interface ; }, abstract = {Virtual screening is an indispensable tool to cope with the massive amount of data being tossed by the high throughput omics technologies. With the objective of enhancing the automation capability of virtual screening process a robust portal termed MegaMiner has been built using the cloud computing platform wherein the user submits a text query and directly accesses the proposed lead molecules along with their drug-like, lead-like and docking scores. Textual chemical structural data representation is fraught with ambiguity in the absence of a global identifier. We have used a combination of statistical models, chemical dictionary and regular expression for building a disease specific dictionary. To demonstrate the effectiveness of this approach, a case study on malaria has been carried out in the present work. MegaMiner offered superior results compared to other text mining search engines, as established by F score analysis. A single query term 'malaria' in the portlet led to retrieval of related PubMed records, protein classes, drug classes and 8000 scaffolds which were internally processed and filtered to suggest new molecules as potential anti-malarials. The results obtained were validated by docking the virtual molecules into relevant protein targets. It is hoped that MegaMiner will serve as an indispensable tool for not only identifying hidden relationships between various biological and chemical entities but also for building better corpus and ontologies.}, } @article {pmid26138566, year = {2015}, author = {Karthikeyan, M and Pandit, D and Bhavasar, A and Vyas, R}, title = {Design and Development of ChemInfoCloud: An Integrated Cloud Enabled Platform for Virtual Screening.}, journal = {Combinatorial chemistry & high throughput screening}, volume = {18}, number = {6}, pages = {604-619}, doi = {10.2174/1386207318666150703113656}, pmid = {26138566}, issn = {1875-5402}, mesh = {Anti-Allergic Agents/chemistry ; Antihypertensive Agents/chemistry ; *Cloud Computing ; Computational Biology ; *Data Mining ; *Drug Design ; Ligands ; Sequence Alignment ; }, abstract = {The power of cloud computing and distributed computing has been harnessed to handle vast and heterogeneous data required to be processed in any virtual screening protocol. A cloud computing platorm ChemInfoCloud was built and integrated with several chemoinformatics and bioinformatics tools. The robust engine performs the core chemoinformatics tasks of lead generation, lead optimisation and property prediction in a fast and efficient manner. It has also been provided with some of the bioinformatics functionalities including sequence alignment, active site pose prediction and protein ligand docking. Text mining, NMR chemical shift (1H, 13C) prediction and reaction fingerprint generation modules for efficient lead discovery are also implemented in this platform. We have developed an integrated problem solving cloud environment for virtual screening studies that also provides workflow management, better usability and interaction with end users using container based virtualization, OpenVz.}, } @article {pmid26130132, year = {2015}, author = {Fosso, B and Santamaria, M and Marzano, M and Alonso-Alemany, D and Valiente, G and Donvito, G and Monaco, A and Notarangelo, P and Pesole, G}, title = {BioMaS: a modular pipeline for Bioinformatic analysis of Metagenomic AmpliconS.}, journal = {BMC bioinformatics}, volume = {16}, number = {}, pages = {203}, pmid = {26130132}, issn = {1471-2105}, mesh = {Bacteria/*genetics ; Biodiversity ; Computational Biology/*methods ; Fungi/*genetics ; High-Throughput Nucleotide Sequencing/*methods ; *Metagenomics ; *Software ; }, abstract = {BACKGROUND: Substantial advances in microbiology, molecular evolution and biodiversity have been carried out in recent years thanks to Metagenomics, which allows to unveil the composition and functions of mixed microbial communities in any environmental niche. If the investigation is aimed only at the microbiome taxonomic structure, a target-based metagenomic approach, here also referred as Meta-barcoding, is generally applied. This approach commonly involves the selective amplification of a species-specific genetic marker (DNA meta-barcode) in the whole taxonomic range of interest and the exploration of its taxon-related variants through High-Throughput Sequencing (HTS) technologies. The accessibility to proper computational systems for the large-scale bioinformatic analysis of HTS data represents, currently, one of the major challenges in advanced Meta-barcoding projects.

RESULTS: BioMaS (Bioinformatic analysis of Metagenomic AmpliconS) is a new bioinformatic pipeline designed to support biomolecular researchers involved in taxonomic studies of environmental microbial communities by a completely automated workflow, comprehensive of all the fundamental steps, from raw sequence data upload and cleaning to final taxonomic identification, that are absolutely required in an appropriately designed Meta-barcoding HTS-based experiment. In its current version, BioMaS allows the analysis of both bacterial and fungal environments starting directly from the raw sequencing data from either Roche 454 or Illumina HTS platforms, following two alternative paths, respectively. BioMaS is implemented into a public web service available at https://recasgateway.ba.infn.it/ and is also available in Galaxy at http://galaxy.cloud.ba.infn.it:8080 (only for Illumina data).

CONCLUSION: BioMaS is a friendly pipeline for Meta-barcoding HTS data analysis specifically designed for users without particular computing skills. A comparative benchmark, carried out by using a simulated dataset suitably designed to broadly represent the currently known bacterial and fungal world, showed that BioMaS outperforms QIIME and MOTHUR in terms of extent and accuracy of deep taxonomic sequence assignments.}, } @article {pmid26132225, year = {2015}, author = {Mulfari, D and Celesti, A and Villari, M and Puliafito, A}, title = {Providing Assistive Technology Applications as a Service Through Cloud Computing.}, journal = {Assistive technology : the official journal of RESNA}, volume = {27}, number = {1}, pages = {44-51}, doi = {10.1080/10400435.2014.963258}, pmid = {26132225}, issn = {1040-0435}, mesh = {Information Dissemination/*methods ; Information Storage and Retrieval/*methods ; *Internet ; *Self-Help Devices ; *Software ; Therapy, Computer-Assisted ; *User-Computer Interface ; }, abstract = {Users with disabilities interact with Personal Computers (PCs) using Assistive Technology (AT) software solutions. Such applications run on a PC that a person with a disability commonly uses. However the configuration of AT applications is not trivial at all, especially whenever the user needs to work on a PC that does not allow him/her to rely on his / her AT tools (e.g., at work, at university, in an Internet point). In this paper, we discuss how cloud computing provides a valid technological solution to enhance such a scenario.With the emergence of cloud computing, many applications are executed on top of virtual machines (VMs). Virtualization allows us to achieve a software implementation of a real computer able to execute a standard operating system and any kind of application. In this paper we propose to build personalized VMs running AT programs and settings. By using the remote desktop technology, our solution enables users to control their customized virtual desktop environment by means of an HTML5-based web interface running on any computer equipped with a browser, whenever they are.}, } @article {pmid26125049, year = {2015}, author = {Jeon, H and Seo, KK}, title = {A Framework and Improvements of the Korea Cloud Services Certification System.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {918075}, pmid = {26125049}, issn = {1537-744X}, abstract = {Cloud computing service is an evolving paradigm that affects a large part of the ICT industry and provides new opportunities for ICT service providers such as the deployment of new business models and the realization of economies of scale by increasing efficiency of resource utilization. However, despite benefits of cloud services, there are some obstacles to adopt such as lack of assessing and comparing the service quality of cloud services regarding availability, security, and reliability. In order to adopt the successful cloud service and activate it, it is necessary to establish the cloud service certification system to ensure service quality and performance of cloud services. This paper proposes a framework and improvements of the Korea certification system of cloud service. In order to develop it, the critical issues related to service quality, performance, and certification of cloud service are identified and the systematic framework for the certification system of cloud services and service provider domains are developed. Improvements of the developed Korea certification system of cloud services are also proposed.}, } @article {pmid26110529, year = {2015}, author = {Shringarpure, SS and Carroll, A and De La Vega, FM and Bustamante, CD}, title = {Inexpensive and Highly Reproducible Cloud-Based Variant Calling of 2,535 Human Genomes.}, journal = {PloS one}, volume = {10}, number = {6}, pages = {e0129277}, pmid = {26110529}, issn = {1932-6203}, support = {U01 HG005715/HG/NHGRI NIH HHS/United States ; U01HG005715/HG/NHGRI NIH HHS/United States ; }, mesh = {Cloud Computing/economics ; Databases, Genetic ; *Genetic Variation ; *Genome, Human ; High-Throughput Nucleotide Sequencing/economics/*methods ; Humans ; Software ; }, abstract = {Population scale sequencing of whole human genomes is becoming economically feasible; however, data management and analysis remains a formidable challenge for many research groups. Large sequencing studies, like the 1000 Genomes Project, have improved our understanding of human demography and the effect of rare genetic variation in disease. Variant calling on datasets of hundreds or thousands of genomes is time-consuming, expensive, and not easily reproducible given the myriad components of a variant calling pipeline. Here, we describe a cloud-based pipeline for joint variant calling in large samples using the Real Time Genomics population caller. We deployed the population caller on the Amazon cloud with the DNAnexus platform in order to achieve low-cost variant calling. Using our pipeline, we were able to identify 68.3 million variants in 2,535 samples from Phase 3 of the 1000 Genomes Project. By performing the variant calling in a parallel manner, the data was processed within 5 days at a compute cost of $7.33 per sample (a total cost of $18,590 for completed jobs and $21,805 for all jobs). Analysis of cost dependence and running time on the data size suggests that, given near linear scalability, cloud computing can be a cheap and efficient platform for analyzing even larger sequencing studies in the future.}, } @article {pmid26110092, year = {2015}, author = {Mateevitsi, V and Patel, T and Leigh, J and Levy, B}, title = {Reimagining the microscope in the 21(st) century using the scalable adaptive graphics environment.}, journal = {Journal of pathology informatics}, volume = {6}, number = {}, pages = {25}, pmid = {26110092}, issn = {2229-5089}, abstract = {BACKGROUND: Whole-slide imaging (WSI), while technologically mature, remains in the early adopter phase of the technology adoption lifecycle. One reason for this current situation is that current methods of visualizing and using WSI closely follow long-existing workflows for glass slides. We set out to "reimagine" the digital microscope in the era of cloud computing by combining WSI with the rich collaborative environment of the Scalable Adaptive Graphics Environment (SAGE). SAGE is a cross-platform, open-source visualization and collaboration tool that enables users to access, display and share a variety of data-intensive information, in a variety of resolutions and formats, from multiple sources, on display walls of arbitrary size.

METHODS: A prototype of a WSI viewer app in the SAGE environment was created. While not full featured, it enabled the testing of our hypothesis that these technologies could be blended together to change the essential nature of how microscopic images are utilized for patient care, medical education, and research.

RESULTS: Using the newly created WSI viewer app, demonstration scenarios were created in the patient care and medical education scenarios. This included a live demonstration of a pathology consultation at the International Academy of Digital Pathology meeting in Boston in November 2014.

CONCLUSIONS: SAGE is well suited to display, manipulate and collaborate using WSIs, along with other images and data, for a variety of purposes. It goes beyond how glass slides and current WSI viewers are being used today, changing the nature of digital pathology in the process. A fully developed WSI viewer app within SAGE has the potential to encourage the wider adoption of WSI throughout pathology.}, } @article {pmid26087372, year = {2015}, author = {Marie, P and Desprats, T and Chabridon, S and Sibilla, M and Taconet, C}, title = {From Ambient Sensing to IoT-based Context Computing: An Open Framework for End to End QoC Management.}, journal = {Sensors (Basel, Switzerland)}, volume = {15}, number = {6}, pages = {14180-14206}, pmid = {26087372}, issn = {1424-8220}, abstract = {Quality of Context (QoC) awareness is recognized as a key point for the success of context-aware computing. At the time where the combination of the Internet of Things, Cloud Computing, and Ambient Intelligence paradigms offer together new opportunities for managing richer context data, the next generation of Distributed Context Managers (DCM) is facing new challenges concerning QoC management. This paper presents our model-driven QoCIM framework. QoCIM is the acronym for Quality of Context Information Model. We show how it can help application developers to manage the whole QoC life-cycle by providing genericity, openness and uniformity. Its usages are illustrated, both at design time and at runtime, in the case of an urban pollution context- and QoC-aware scenario.}, } @article {pmid27279746, year = {2015}, author = {Shatil, AS and Younas, S and Pourreza, H and Figley, CR}, title = {Heads in the Cloud: A Primer on Neuroimaging Applications of High Performance Computing.}, journal = {Magnetic resonance insights}, volume = {8}, number = {Suppl 1}, pages = {69-80}, pmid = {27279746}, issn = {1178-623X}, abstract = {With larger data sets and more sophisticated analyses, it is becoming increasingly common for neuroimaging researchers to push (or exceed) the limitations of standalone computer workstations. Nonetheless, although high-performance computing platforms such as clusters, grids and clouds are already in routine use by a small handful of neuroimaging researchers to increase their storage and/or computational power, the adoption of such resources by the broader neuroimaging community remains relatively uncommon. Therefore, the goal of the current manuscript is to: 1) inform prospective users about the similarities and differences between computing clusters, grids and clouds; 2) highlight their main advantages; 3) discuss when it may (and may not) be advisable to use them; 4) review some of their potential problems and barriers to access; and finally 5) give a few practical suggestions for how interested new users can start analyzing their neuroimaging data using cloud resources. Although the aim of cloud computing is to hide most of the complexity of the infrastructure management from end-users, we recognize that this can still be an intimidating area for cognitive neuroscientists, psychologists, neurologists, radiologists, and other neuroimaging researchers lacking a strong computational background. Therefore, with this in mind, we have aimed to provide a basic introduction to cloud computing in general (including some of the basic terminology, computer architectures, infrastructure and service models, etc.), a practical overview of the benefits and drawbacks, and a specific focus on how cloud resources can be used for various neuroimaging applications.}, } @article {pmid27170907, year = {2015}, author = {Khazaei, H and Mench-Bressan, N and McGregor, C and Pugh, JE}, title = {Health Informatics for Neonatal Intensive Care Units: An Analytical Modeling Perspective.}, journal = {IEEE journal of translational engineering in health and medicine}, volume = {3}, number = {}, pages = {3000109}, pmid = {27170907}, issn = {2168-2372}, abstract = {The effective use of data within intensive care units (ICUs) has great potential to create new cloud-based health analytics solutions for disease prevention or earlier condition onset detection. The Artemis project aims to achieve the above goals in the area of neonatal ICUs (NICU). In this paper, we proposed an analytical model for the Artemis cloud project which will be deployed at McMaster Children's Hospital in Hamilton. We collect not only physiological data but also the infusion pumps data that are attached to NICU beds. Using the proposed analytical model, we predict the amount of storage, memory, and computation power required for the system. Capacity planning and tradeoff analysis would be more accurate and systematic by applying the proposed analytical model in this paper. Numerical results are obtained using real inputs acquired from McMaster Children's Hospital and a pilot deployment of the system at The Hospital for Sick Children (SickKids) in Toronto.}, } @article {pmid26958447, year = {2015}, author = {Gates, RS and McLean, MJ and Osborn, WA}, title = {Smart Electronic Laboratory Notebooks for the NIST Research Environment.}, journal = {Journal of research of the National Institute of Standards and Technology}, volume = {120}, number = {}, pages = {293-303}, doi = {10.6028/jres.120.018}, pmid = {26958447}, issn = {1044-677X}, abstract = {Laboratory notebooks have been a staple of scientific research for centuries for organizing and documenting ideas and experiments. Modern laboratories are increasingly reliant on electronic data collection and analysis, so it seems inevitable that the digital revolution should come to the ordinary laboratory notebook. The most important aspect of this transition is to make the shift as comfortable and intuitive as possible, so that the creative process that is the hallmark of scientific investigation and engineering achievement is maintained, and ideally enhanced. The smart electronic laboratory notebooks described in this paper represent a paradigm shift from the old pen and paper style notebooks and provide a host of powerful operational and documentation capabilities in an intuitive format that is available anywhere at any time.}, } @article {pmid26958178, year = {2015}, author = {Cui, L}, title = {COHeRE: Cross-Ontology Hierarchical Relation Examination for Ontology Quality Assurance.}, journal = {AMIA ... Annual Symposium proceedings. AMIA Symposium}, volume = {2015}, number = {}, pages = {456-465}, pmid = {26958178}, issn = {1942-597X}, mesh = {*Biological Ontologies/organization & administration ; Cloud Computing/*standards ; Health Information Management/organization & administration/*standards ; Information Storage and Retrieval/standards ; International Classification of Diseases/standards ; Semantics ; Systematized Nomenclature of Medicine ; *Unified Medical Language System/organization & administration/standards ; }, abstract = {Biomedical ontologies play a vital role in healthcare information management, data integration, and decision support. Ontology quality assurance (OQA) is an indispensable part of the ontology engineering cycle. Most existing OQA methods are based on the knowledge provided within the targeted ontology. This paper proposes a novel cross-ontology analysis method, Cross-Ontology Hierarchical Relation Examination (COHeRE), to detect inconsistencies and possible errors in hierarchical relations across multiple ontologies. COHeRE leverages the Unified Medical Language System (UMLS) knowledge source and the MapReduce cloud computing technique for systematic, large-scale ontology quality assurance work. COHeRE consists of three main steps with the UMLS concepts and relations as the input. First, the relations claimed in source vocabularies are filtered and aggregated for each pair of concepts. Second, inconsistent relations are detected if a concept pair is related by different types of relations in different source vocabularies. Finally, the uncovered inconsistent relations are voted according to their number of occurrences across different source vocabularies. The voting result together with the inconsistent relations serve as the output of COHeRE for possible ontological change. The highest votes provide initial suggestion on how such inconsistencies might be fixed. In UMLS, 138,987 concept pairs were found to have inconsistent relationships across multiple source vocabularies. 40 inconsistent concept pairs involving hierarchical relationships were randomly selected and manually reviewed by a human expert. 95.8% of the inconsistent relations involved in these concept pairs indeed exist in their source vocabularies rather than being introduced by mistake in the UMLS integration process. 73.7% of the concept pairs with suggested relationship were agreed by the human expert. The effectiveness of COHeRE indicates that UMLS provides a promising environment to enhance qualities of biomedical ontologies by performing cross-ontology examination.}, } @article {pmid26958172, year = {2015}, author = {Chen, R and Su, H and Khalilia, M and Lin, S and Peng, Y and Davis, T and Hirsh, DA and Searles, E and Tejedor-Sojo, J and Thompson, M and Sun, J}, title = {Cloud-based Predictive Modeling System and its Application to Asthma Readmission Prediction.}, journal = {AMIA ... Annual Symposium proceedings. AMIA Symposium}, volume = {2015}, number = {}, pages = {406-415}, pmid = {26958172}, issn = {1942-597X}, support = {T32 GM008169/GM/NIGMS NIH HHS/United States ; T32 GM105490/GM/NIGMS NIH HHS/United States ; }, mesh = {*Asthma/therapy ; *Cloud Computing ; Computational Biology ; Computer Simulation ; Electronic Health Records/*organization & administration ; Forecasting ; Humans ; Models, Biological ; *Patient Readmission ; Prognosis ; }, abstract = {The predictive modeling process is time consuming and requires clinical researchers to handle complex electronic health record (EHR) data in restricted computational environments. To address this problem, we implemented a cloud-based predictive modeling system via a hybrid setup combining a secure private server with the Amazon Web Services (AWS) Elastic MapReduce platform. EHR data is preprocessed on a private server and the resulting de-identified event sequences are hosted on AWS. Based on user-specified modeling configurations, an on-demand web service launches a cluster of Elastic Compute 2 (EC2) instances on AWS to perform feature selection and classification algorithms in a distributed fashion. Afterwards, the secure private server aggregates results and displays them via interactive visualization. We tested the system on a pediatric asthma readmission task on a de-identified EHR dataset of 2,967 patients. We conduct a larger scale experiment on the CMS Linkable 2008-2010 Medicare Data Entrepreneurs' Synthetic Public Use File dataset of 2 million patients, which achieves over 25-fold speedup compared to sequential execution.}, } @article {pmid27532062, year = {2014}, author = {Phan, JH and Kothari, S and Wang, MD}, title = {omniClassifier: a Desktop Grid Computing System for Big Data Prediction Modeling.}, journal = {ACM-BCB ... ... : the ... ACM Conference on Bioinformatics, Computational Biology and Biomedicine. ACM Conference on Bioinformatics, Computational Biology and Biomedicine}, volume = {2014}, number = {}, pages = {514-523}, pmid = {27532062}, support = {RC2 CA148265/CA/NCI NIH HHS/United States ; U01 HL080711/HL/NHLBI NIH HHS/United States ; U54 CA119338/CA/NCI NIH HHS/United States ; }, abstract = {Robust prediction models are important for numerous science, engineering, and biomedical applications. However, best-practice procedures for optimizing prediction models can be computationally complex, especially when choosing models from among hundreds or thousands of parameter choices. Computational complexity has further increased with the growth of data in these fields, concurrent with the era of "Big Data". Grid computing is a potential solution to the computational challenges of Big Data. Desktop grid computing, which uses idle CPU cycles of commodity desktop machines, coupled with commercial cloud computing resources can enable research labs to gain easier and more cost effective access to vast computing resources. We have developed omniClassifier, a multi-purpose prediction modeling application that provides researchers with a tool for conducting machine learning research within the guidelines of recommended best-practices. omniClassifier is implemented as a desktop grid computing system using the Berkeley Open Infrastructure for Network Computing (BOINC) middleware. In addition to describing implementation details, we use various gene expression datasets to demonstrate the potential scalability of omniClassifier for efficient and robust Big Data prediction modeling. A prototype of omniClassifier can be accessed at http://omniclassifier.bme.gatech.edu/.}, } @article {pmid27355051, year = {2014}, author = {Yassin, AA}, title = {Efficiency and Flexibility of Fingerprint Scheme Using Partial Encryption and Discrete Wavelet Transform to Verify User in Cloud Computing.}, journal = {International scholarly research notices}, volume = {2014}, number = {}, pages = {351696}, pmid = {27355051}, issn = {2356-7872}, abstract = {Now, the security of digital images is considered more and more essential and fingerprint plays the main role in the world of image. Furthermore, fingerprint recognition is a scheme of biometric verification that applies pattern recognition techniques depending on image of fingerprint individually. In the cloud environment, an adversary has the ability to intercept information and must be secured from eavesdroppers. Unluckily, encryption and decryption functions are slow and they are often hard. Fingerprint techniques required extra hardware and software; it is masqueraded by artificial gummy fingers (spoof attacks). Additionally, when a large number of users are being verified at the same time, the mechanism will become slow. In this paper, we employed each of the partial encryptions of user's fingerprint and discrete wavelet transform to obtain a new scheme of fingerprint verification. Moreover, our proposed scheme can overcome those problems; it does not require cost, reduces the computational supplies for huge volumes of fingerprint images, and resists well-known attacks. In addition, experimental results illustrate that our proposed scheme has a good performance of user's fingerprint verification.}, } @article {pmid26852680, year = {2014}, author = {Vashist, SK and Schneider, EM and Luong, JH}, title = {Commercial Smartphone-Based Devices and Smart Applications for Personalized Healthcare Monitoring and Management.}, journal = {Diagnostics (Basel, Switzerland)}, volume = {4}, number = {3}, pages = {104-128}, pmid = {26852680}, issn = {2075-4418}, abstract = {Smartphone-based devices and applications (SBDAs) with cost effectiveness and remote sensing are the most promising and effective means of delivering mobile healthcare (mHealthcare). Several SBDAs have been commercialized for the personalized monitoring and/or management of basic physiological parameters, such as blood pressure, weight, body analysis, pulse rate, electrocardiograph, blood glucose, blood glucose saturation, sleeping and physical activity. With advances in Bluetooth technology, software, cloud computing and remote sensing, SBDAs provide real-time on-site analysis and telemedicine opportunities in remote areas. This scenario is of utmost importance for developing countries, where the number of smartphone users is about 70% of 6.8 billion cell phone subscribers worldwide with limited access to basic healthcare service. The technology platform facilitates patient-doctor communication and the patients to effectively manage and keep track of their medical conditions. Besides tremendous healthcare cost savings, SBDAs are very critical for the monitoring and effective management of emerging epidemics and food contamination outbreaks. The next decade will witness pioneering advances and increasing applications of SBDAs in this exponentially growing field of mHealthcare. This article provides a critical review of commercial SBDAs that are being widely used for personalized healthcare monitoring and management.}, } @article {pmid27877589, year = {2013}, author = {Tsai, TT and Shen, SW and Cheng, CM and Chen, CF}, title = {Paper-based tuberculosis diagnostic devices with colorimetric gold nanoparticles.}, journal = {Science and technology of advanced materials}, volume = {14}, number = {4}, pages = {044404}, pmid = {27877589}, issn = {1468-6996}, abstract = {A colorimetric sensing strategy employing gold nanoparticles and a paper assay platform has been developed for tuberculosis diagnosis. Unmodified gold nanoparticles and single-stranded detection oligonucleotides are used to achieve rapid diagnosis without complicated and time-consuming thiolated or other surface-modified probe preparation processes. To eliminate the use of sophisticated equipment for data analysis, the color variance for multiple detection results was simultaneously collected and concentrated on cellulose paper with the data readout transmitted for cloud computing via a smartphone. The results show that the 2.6 nM tuberculosis mycobacterium target sequences extracted from patients can easily be detected, and the turnaround time after the human DNA is extracted from clinical samples was approximately 1 h.}, } @article {pmid26270172, year = {2013}, author = {McKay, SJ and Skidmore, EJ and LaRose, CJ and Mercer, AW and Noutsos, C}, title = {Cloud Computing with iPlant Atmosphere.}, journal = {Current protocols in bioinformatics}, volume = {43}, number = {}, pages = {9.15.1-9.15.20}, doi = {10.1002/0471250953.bi0915s43}, pmid = {26270172}, issn = {1934-340X}, mesh = {Arabidopsis/genetics ; *Cloud Computing ; Genome ; Internet ; Sequence Analysis, RNA ; *Software ; User-Computer Interface ; }, abstract = {Cloud Computing refers to distributed computing platforms that use virtualization software to provide easy access to physical computing infrastructure and data storage, typically administered through a Web interface. Cloud-based computing provides access to powerful servers, with specific software and virtual hardware configurations, while eliminating the initial capital cost of expensive computers and reducing the ongoing operating costs of system administration, maintenance contracts, power consumption, and cooling. This eliminates a significant barrier to entry into bioinformatics and high-performance computing for many researchers. This is especially true of free or modestly priced cloud computing services. The iPlant Collaborative offers a free cloud computing service, Atmosphere, which allows users to easily create and use instances on virtual servers preconfigured for their analytical needs. Atmosphere is a self-service, on-demand platform for scientific computing. This unit demonstrates how to set up, access and use cloud computing in Atmosphere.}, } @article {pmid28518280, year = {2012}, author = {Andriole, K}, title = {MO-C-BRCD-03: The Role of Informatics in Medical Physics and Vice Versa.}, journal = {Medical physics}, volume = {39}, number = {6Part21}, pages = {3864}, doi = {10.1118/1.4735777}, pmid = {28518280}, issn = {2473-4209}, abstract = {UNLABELLED: Like Medical Physics, Imaging Informatics encompasses concepts touching every aspect of the imaging chain from image creation, acquisition, management and archival, to image processing, analysis, display and interpretation. The two disciplines are in fact quite complementary, with similar goals to improve the quality of care provided to patients using an evidence-based approach, to assure safety in the clinical and research environments, to facilitate efficiency in the workplace, and to accelerate knowledge discovery. Use-cases describing several areas of informatics activity will be given to illustrate current limitations that would benefit from medical physicist participation, and conversely areas in which informaticists may contribute to the solution. Topics to be discussed include radiation dose monitoring, process management and quality control, display technologies, business analytics techniques, and quantitative imaging. Quantitative imaging is increasingly becoming an essential part of biomedicalresearch as well as being incorporated into clinical diagnostic activities. Referring clinicians are asking for more objective information to be gleaned from the imaging tests that they order so that they may make the best clinical management decisions for their patients. Medical Physicists may be called upon to identify existing issues as well as develop, validate and implement new approaches and technologies to help move the field further toward quantitative imaging methods for the future. Biomedical imaging informatics tools and techniques such as standards, integration, data mining, cloud computing and new systems architectures, ontologies and lexicons, data visualization and navigation tools, and business analytics applications can be used to overcome some of the existing limitations.

LEARNING OBJECTIVES: 1. Describe what is meant by Medical Imaging Informatics and understand why the medical physicist should care. 2. Identify existing limitations in information technologies with respect to Medical Physics, and conversely see how Informatics may assist the medical physicist in filling some of the current gaps in their activities. 3. Understand general informatics concepts and areas of investigation including imaging and workflow standards, systems integration, computing architectures, ontologies, data mining and business analytics, data visualization and human-computer interface tools, and the importance of quantitative imaging for the future of Medical Physics and Imaging Informatics. 4. Become familiar with on-going efforts to address current challenges facing future research into and clinical implementation of quantitative imaging applications.}, } @article {pmid28517521, year = {2012}, author = {Na, Y and Suh, T and Xing, L}, title = {SU-E-T-628: A Cloud Computing Based Multi-Objective Optimization Method for Inverse Treatment Planning.}, journal = {Medical physics}, volume = {39}, number = {6Part20}, pages = {3850}, doi = {10.1118/1.4735718}, pmid = {28517521}, issn = {2473-4209}, abstract = {PURPOSE: Multi-objective (MO) plan optimization entails generation of an enormous number of IMRT or VMAT plans constituting the Pareto surface, which presents a computationally challenging task. The purpose of this work is to overcome the hurdle by developing an efficient MO method using emerging cloud computing platform.

METHODS: As a backbone of cloud computing for optimizing inverse treatment planning, Amazon Elastic Compute Cloud with a master node (17.1 GB memory, 2 virtual cores, 420 GB instance storage, 64-bit platform) is used. The master node is able to scale seamlessly a number of working group instances, called workers, based on the user-defined setting account for MO functions in clinical setting. Each worker solved the objective function with an efficient sparse decomposition method. The workers are automatically terminated if there are finished tasks. The optimized plans are archived to the master node to generate the Pareto solution set. Three clinical cases have been planned using the developed MO IMRT and VMAT planning tools to demonstrate the advantages of the proposed method.

RESULTS: The target dose coverage and critical structure sparing of plans are comparable obtained using the cloud computing platform are identical to that obtained using desktop PC (Intel Xeon® CPU 2.33GHz, 8GB memory). It is found that the MO planning speeds up the processing of obtaining the Pareto set substantially for both types of plans. The speedup scales approximately linearly with the number of nodes used for computing. With the use of N nodes, the computational time is reduced by the fitting model, 0.2+2.3/N, with r̂2>0.99, on average of the cases making real-time MO planning possible.

CONCLUSIONS: A cloud computing infrastructure is developed for MO optimization. The algorithm substantially improves the speed of inverse plan optimization. The platform is valuable for both MO planning and future off- or on-line adaptive re-planning.}, } @article {pmid27493300, year = {2011}, author = {Usmani, AM and Meo, SA}, title = {Evaluation of Science.}, journal = {Sudanese journal of paediatrics}, volume = {11}, number = {1}, pages = {6-7}, pmid = {27493300}, issn = {0256-4408}, abstract = {Scientific achievement by publishing a scientific manuscript in a peer reviewed biomedical journal is an important ingredient of research along with a career-enhancing advantages and significant amount of personal satisfaction. The road to evaluate science (research, scientific publications) among scientists often seems complicated. Scientist's career is generally summarized by the number of publications / citations, teaching the undergraduate, graduate and post-doctoral students, writing or reviewing grants and papers, preparing for and organizing meetings, participating in collaborations and conferences, advising colleagues, and serving on editorial boards of scientific journals. Scientists have been sizing up their colleagues since science began. Scientometricians have invented a wide variety of algorithms called science metrics to evaluate science. Many of the science metrics are even unknown to the everyday scientist. Unfortunately, there is no all-in-one metric. Each of them has its own strength, limitation and scope. Some of them are mistakenly applied to evaluate individuals, and each is surrounded by a cloud of variants designed to help them apply across different scientific fields or different career stages [1]. A suitable indicator should be chosen by considering the purpose of the evaluation, and how the results will be used. Scientific Evaluation assists us in: computing the research performance, comparison with peers, forecasting the growth, identifying the excellence in research, citation ranking, finding the influence of research, measuring the productivity, making policy decisions, securing funds for research and spotting trends. Key concepts in science metrics are output and impact. Evaluation of science is traditionally expressed in terms of citation counts. Although most of the science metrics are based on citation counts but two most commonly used are impact factor [2] and h-index [3].}, } @article {pmid27663000, year = {1993}, author = {}, title = {Health care research by degrees N Reid Health care research by degrees Blackwell Scientific 162pp £13.99 0632-03466-1 [Formula: see text].}, journal = {Nursing standard (Royal College of Nursing (Great Britain) : 1987)}, volume = {7}, number = {24}, pages = {41}, doi = {10.7748/ns.7.24.41.s48}, pmid = {27663000}, issn = {2047-9018}, abstract = {Inadequately understood statistics so often cloud both the argument of the researcher and the judgement of the reader. Norma Reid brings a refreshing clarity to a complex topic; she takes the mystification and mystique out of statistics. Her basic premiss that theory ought to be based on practical utility and relevance shines through her text and helps to make the subject accessible co clinicians who want to understand the underpinnings of their practice. Research methods, particularly qualitative approaches, are sketchily dealt with when compared with the wealth of detail on the mechanics of computing. Also, it is awkward to find methods and analysis not clearly separated in places (eg, Delphi studies), but ample references direct the reader to more expansive sources. Any attempt to steer the uninitiated through the minefields of computing is fraught with difficulties, and some will be disappointed to find one system used exclusively, but, perhaps, it serves as an illustration rather than a course to be slavishly followed.}, } @article {pmid26075333, year = {2015}, author = {Lin, BS and Hsiao, PC and Cheng, PH and Lee, IJ and Jan, GE}, title = {Design and Implementation of a Set-Top Box-Based Homecare System Using Hybrid Cloud.}, journal = {Telemedicine journal and e-health : the official journal of the American Telemedicine Association}, volume = {21}, number = {11}, pages = {916-922}, pmid = {26075333}, issn = {1556-3669}, mesh = {Blood Glucose ; Blood Pressure ; Body Weight ; *Cloud Computing ; Health Information Exchange ; Health Information Management/instrumentation/*methods ; Home Care Services ; Humans ; Medical Records Systems, Computerized ; Monitoring, Ambulatory/instrumentation/methods ; Radio Frequency Identification Device ; Telemedicine/instrumentation/*methods ; Television ; *User-Computer Interface ; Wireless Technology ; }, abstract = {INTRODUCTION: Telemedicine has become a prevalent topic in recent years, and several telemedicine systems have been proposed; however, such systems are an unsuitable fit for the daily requirements of users.

MATERIALS AND METHODS: The system proposed in this study was developed as a set-top box integrated with the Android™ (Google, Mountain View, CA) operating system to provide a convenient and user-friendly interface. The proposed system can assist with family healthcare management, telemedicine service delivery, and information exchange among hospitals. To manage the system, a novel type of hybrid cloud architecture was also developed.

RESULTS: Updated information is stored on a public cloud, enabling medical staff members to rapidly access information when diagnosing patients. In the long term, the stored data can be reduced to improve the efficiency of the database.

CONCLUSIONS: The proposed design offers a robust architecture for storing data in a homecare system and can thus resolve network overload and congestion resulting from accumulating data, which are inherent problems in centralized architectures, thereby improving system efficiency.}, } @article {pmid26063273, year = {2015}, author = {Nitzlnader, M and Falgenhauer, M and Gossy, C and Schreier, G}, title = {Architecture for an advanced biomedical collaboration domain for the European paediatric cancer research community (ABCD-4-E).}, journal = {Studies in health technology and informatics}, volume = {212}, number = {}, pages = {167-174}, pmid = {26063273}, issn = {1879-8365}, mesh = {Biomedical Research/*organization & administration ; Cooperative Behavior ; Electronic Health Records/*organization & administration ; Europe ; Humans ; Medical Oncology/*organization & administration ; Medical Record Linkage/methods ; Neoplasms/*classification ; Patient Care Team/*organization & administration ; Pediatrics/*organization & administration ; }, abstract = {Today, progress in biomedical research often depends on large, interdisciplinary research projects and tailored information and communication technology (ICT) support. In the context of the European Network for Cancer Research in Children and Adolescents (ENCCA) project the exchange of data between data source (Source Domain) and data consumer (Consumer Domain) systems in a distributed computing environment needs to be facilitated. This work presents the requirements and the corresponding solution architecture of the Advanced Biomedical Collaboration Domain for Europe (ABCD-4-E). The proposed concept utilises public as well as private cloud systems, the Integrating the Healthcare Enterprise (IHE) framework and web-based applications to provide the core capabilities in accordance with privacy and security needs. The utility of crucial parts of the concept was evaluated by prototypic implementation. A discussion of the design indicates that the requirements of ENCCA are fully met. A whole system demonstration is currently being prepared to verify that ABCD-4-E has the potential to evolve into a domain-bridging collaboration platform in the future.}, } @article {pmid26061287, year = {2015}, author = {Bovino, S and Grassi, T and Gianturco, FA}, title = {CH(+) Destruction by Reaction with H: Computing Quantum Rates To Model Different Molecular Regions in the Interstellar Medium.}, journal = {The journal of physical chemistry. A}, volume = {119}, number = {50}, pages = {11973-11982}, doi = {10.1021/acs.jpca.5b02785}, pmid = {26061287}, issn = {1520-5215}, abstract = {A detailed analysis of an ionic reaction that plays a crucial role in the carbon chemistry of the interstellar medium (ISM) is carried out by computing ab initio reactive cross sections with a quantum method and by further obtaining the corresponding CH(+) destruction rates over a range of temperatures that shows good overall agreement with existing experiments. The differences found between all existing calculations and the very-low-T experiments are discussed and explored via a simple numerical model that links these cross section reductions to collinear approaches where nonadiabatic crossing is expected to dominate. The new rates are further linked to a complex chemical network that models the evolution of the CH(+) abundance in the photodissociation region (PDR) and molecular cloud (MC) environments of the ISM. The abundances of CH(+) are given by numerical solutions of a large set of coupled, first-order kinetics equations that employs our new chemical package krome. The analysis that we carry out reveals that the important region for CH(+) destruction is that above 100 K, hence showing that, at least for this reaction, the differences with the existing laboratory low-T experiments are of essentially no importance within the astrochemical environments discussed here because, at those temperatures, other chemical processes involving the title molecule are taking over. A detailed analysis of the chemical network involving CH(+) also shows that a slight decrease in the initial oxygen abundance might lead to higher CH(+) abundances because the main chemical carbon ion destruction channel is reduced in efficiency. This might provide an alternative chemical route to understand the reason why general astrochemical models fail when the observed CH(+) abundances are matched with the outcomes of their calculations.}, } @article {pmid26046471, year = {2015}, author = {D'Antonio, M and D'Onorio De Meo, P and Pallocca, M and Picardi, E and D'Erchia, AM and Calogero, RA and Castrignanò, T and Pesole, G}, title = {RAP: RNA-Seq Analysis Pipeline, a new cloud-based NGS web application.}, journal = {BMC genomics}, volume = {16}, number = {Suppl 6}, pages = {S3}, pmid = {26046471}, issn = {1471-2164}, mesh = {High-Throughput Nucleotide Sequencing ; Internet ; Polyadenylation ; RNA/*analysis ; Sequence Analysis, RNA/*methods ; *User-Computer Interface ; }, abstract = {BACKGROUND: The study of RNA has been dramatically improved by the introduction of Next Generation Sequencing platforms allowing massive and cheap sequencing of selected RNA fractions, also providing information on strand orientation (RNA-Seq). The complexity of transcriptomes and of their regulative pathways make RNA-Seq one of most complex field of NGS applications, addressing several aspects of the expression process (e.g. identification and quantification of expressed genes and transcripts, alternative splicing and polyadenylation, fusion genes and trans-splicing, post-transcriptional events, etc.).

METHODS: In order to provide researchers with an effective and friendly resource for analyzing RNA-Seq data, we present here RAP (RNA-Seq Analysis Pipeline), a cloud computing web application implementing a complete but modular analysis workflow. This pipeline integrates both state-of-the-art bioinformatics tools for RNA-Seq analysis and in-house developed scripts to offer to the user a comprehensive strategy for data analysis. RAP is able to perform quality checks (adopting FastQC and NGS QC Toolkit), identify and quantify expressed genes and transcripts (with Tophat, Cufflinks and HTSeq), detect alternative splicing events (using SpliceTrap) and chimeric transcripts (with ChimeraScan). This pipeline is also able to identify splicing junctions and constitutive or alternative polyadenylation sites (implementing custom analysis modules) and call for statistically significant differences in genes and transcripts expression, splicing pattern and polyadenylation site usage (using Cuffdiff2 and DESeq).

RESULTS: Through a user friendly web interface, the RAP workflow can be suitably customized by the user and it is automatically executed on our cloud computing environment. This strategy allows to access to bioinformatics tools and computational resources without specific bioinformatics and IT skills. RAP provides a set of tabular and graphical results that can be helpful to browse, filter and export analyzed data, according to the user needs.}, } @article {pmid26045962, year = {2015}, author = {Siretskiy, A and Sundqvist, T and Voznesenskiy, M and Spjuth, O}, title = {A quantitative assessment of the Hadoop framework for analyzing massively parallel DNA sequencing data.}, journal = {GigaScience}, volume = {4}, number = {}, pages = {26}, pmid = {26045962}, issn = {2047-217X}, mesh = {Computational Biology ; Internet ; Sequence Analysis, DNA/*methods ; Software ; }, abstract = {BACKGROUND: New high-throughput technologies, such as massively parallel sequencing, have transformed the life sciences into a data-intensive field. The most common e-infrastructure for analyzing this data consists of batch systems that are based on high-performance computing resources; however, the bioinformatics software that is built on this platform does not scale well in the general case. Recently, the Hadoop platform has emerged as an interesting option to address the challenges of increasingly large datasets with distributed storage, distributed processing, built-in data locality, fault tolerance, and an appealing programming methodology.

RESULTS: In this work we introduce metrics and report on a quantitative comparison between Hadoop and a single node of conventional high-performance computing resources for the tasks of short read mapping and variant calling. We calculate efficiency as a function of data size and observe that the Hadoop platform is more efficient for biologically relevant data sizes in terms of computing hours for both split and un-split data files. We also quantify the advantages of the data locality provided by Hadoop for NGS problems, and show that a classical architecture with network-attached storage will not scale when computing resources increase in numbers. Measurements were performed using ten datasets of different sizes, up to 100 gigabases, using the pipeline implemented in Crossbow. To make a fair comparison, we implemented an improved preprocessor for Hadoop with better performance for splittable data files. For improved usability, we implemented a graphical user interface for Crossbow in a private cloud environment using the CloudGene platform. All of the code and data in this study are freely available as open source in public repositories.

CONCLUSIONS: From our experiments we can conclude that the improved Hadoop pipeline scales better than the same pipeline on high-performance computing resources, we also conclude that Hadoop is an economically viable option for the common data sizes that are currently used in massively parallel sequencing. Given that datasets are expected to increase over time, Hadoop is a framework that we envision will have an increasingly important role in future biological data analysis.}, } @article {pmid26044652, year = {2015}, author = {Wang, C and Guo, M and Liu, X and Liu, Y and Zou, Q}, title = {SeedsGraph: an efficient assembler for next-generation sequencing data.}, journal = {BMC medical genomics}, volume = {8 Suppl 2}, number = {Suppl 2}, pages = {S13}, pmid = {26044652}, issn = {1755-8794}, mesh = {*Algorithms ; Cluster Analysis ; Databases, Genetic ; High-Throughput Nucleotide Sequencing/*methods ; Rhodobacter sphaeroides/genetics ; Sequence Analysis, DNA/*methods ; Staphylococcus aureus/genetics ; }, abstract = {DNA sequencing technology has been rapidly evolving, and produces a large number of short reads with a fast rising tendency. This has led to a resurgence of research in whole genome shotgun assembly algorithms. We start the assembly algorithm by clustering the short reads in a cloud computing framework, and the clustering process groups fragments according to their original consensus long-sequence similarity. We condense each group of reads to a chain of seeds, which is a kind of substring with reads aligned, and then build a graph accordingly. Finally, we analyze the graph to find Euler paths, and assemble the reads related in the paths into contigs, and then lay out contigs with mate-pair information for scaffolds. The result shows that our algorithm is efficient and feasible for a large set of reads such as in next-generation sequencing technology.}, } @article {pmid26020786, year = {2015}, author = {Karr, JR and Williams, AH and Zucker, JD and Raue, A and Steiert, B and Timmer, J and Kreutz, C and , and Wilkinson, S and Allgood, BA and Bot, BM and Hoff, BR and Kellen, MR and Covert, MW and Stolovitzky, GA and Meyer, P}, title = {Summary of the DREAM8 Parameter Estimation Challenge: Toward Parameter Identification for Whole-Cell Models.}, journal = {PLoS computational biology}, volume = {11}, number = {5}, pages = {e1004096}, pmid = {26020786}, issn = {1553-7358}, support = {DP1 LM011510/LM/NLM NIH HHS/United States ; DP1 OD006413/OD/NIH HHS/United States ; P50 GM107615/GM/NIGMS NIH HHS/United States ; 5DP1LM01150-05/DP/NCCDPHP CDC HHS/United States ; }, mesh = {Algorithms ; Bacteria/genetics/metabolism ; Bioengineering ; Cells/*metabolism ; Cloud Computing ; Computational Biology ; Computer Simulation ; Genetic Association Studies/statistics & numerical data ; *Models, Biological ; Mutation ; Mycoplasma genitalium/genetics/metabolism ; }, abstract = {Whole-cell models that explicitly represent all cellular components at the molecular level have the potential to predict phenotype from genotype. However, even for simple bacteria, whole-cell models will contain thousands of parameters, many of which are poorly characterized or unknown. New algorithms are needed to estimate these parameters and enable researchers to build increasingly comprehensive models. We organized the Dialogue for Reverse Engineering Assessments and Methods (DREAM) 8 Whole-Cell Parameter Estimation Challenge to develop new parameter estimation algorithms for whole-cell models. We asked participants to identify a subset of parameters of a whole-cell model given the model's structure and in silico "experimental" data. Here we describe the challenge, the best performing methods, and new insights into the identifiability of whole-cell models. We also describe several valuable lessons we learned toward improving future challenges. Going forward, we believe that collaborative efforts supported by inexpensive cloud computing have the potential to solve whole-cell model parameter estimation.}, } @article {pmid25995962, year = {2015}, author = {Oh, S and Cha, J and Ji, M and Kang, H and Kim, S and Heo, E and Han, JS and Kang, H and Chae, H and Hwang, H and Yoo, S}, title = {Architecture Design of Healthcare Software-as-a-Service Platform for Cloud-Based Clinical Decision Support Service.}, journal = {Healthcare informatics research}, volume = {21}, number = {2}, pages = {102-110}, pmid = {25995962}, issn = {2093-3681}, abstract = {OBJECTIVES: To design a cloud computing-based Healthcare Software-as-a-Service (SaaS) Platform (HSP) for delivering healthcare information services with low cost, high clinical value, and high usability.

METHODS: We analyzed the architecture requirements of an HSP, including the interface, business services, cloud SaaS, quality attributes, privacy and security, and multi-lingual capacity. For cloud-based SaaS services, we focused on Clinical Decision Service (CDS) content services, basic functional services, and mobile services. Microsoft's Azure cloud computing for Infrastructure-as-a-Service (IaaS) and Platform-as-a-Service (PaaS) was used.

RESULTS: The functional and software views of an HSP were designed in a layered architecture. External systems can be interfaced with the HSP using SOAP and REST/JSON. The multi-tenancy model of the HSP was designed as a shared database, with a separate schema for each tenant through a single application, although healthcare data can be physically located on a cloud or in a hospital, depending on regulations. The CDS services were categorized into rule-based services for medications, alert registration services, and knowledge services.

CONCLUSIONS: We expect that cloud-based HSPs will allow small and mid-sized hospitals, in addition to large-sized hospitals, to adopt information infrastructures and health information technology with low system operation and maintenance costs.}, } @article {pmid25991282, year = {2015}, author = {Kuo, MH}, title = {Implementation of a Cloud-based Blood Pressure Data Management System.}, journal = {Studies in health technology and informatics}, volume = {210}, number = {}, pages = {882-886}, pmid = {25991282}, issn = {1879-8365}, mesh = {Blood Pressure Determination/*methods ; *Cloud Computing ; Electronic Health Records/*organization & administration ; Humans ; Hypertension/*diagnosis/*prevention & control ; Mobile Applications ; Remote Consultation/*organization & administration ; Smartphone ; User-Computer Interface ; }, abstract = {Regular monitoring of blood pressure of a patient can improve hypertension diagnosis and treatment. The objective of this study is to design and implement a cloud computing based blood pressure data management system that allows patients, nurses, physicians, and researchers to access data through the Internet anytime, anywhere and via any device.}, } @article {pmid25991144, year = {2015}, author = {Pan, W and Coatrieux, G and Bouslimi, D and Prigent, N}, title = {Secure public cloud platform for medical images sharing.}, journal = {Studies in health technology and informatics}, volume = {210}, number = {}, pages = {251-255}, pmid = {25991144}, issn = {1879-8365}, mesh = {Algorithms ; *Cloud Computing ; *Computer Security ; *Confidentiality ; Information Storage and Retrieval/*methods ; Medical Record Linkage/*methods ; Radiology Information Systems/*organization & administration ; Signal Processing, Computer-Assisted ; }, abstract = {Cloud computing promises medical imaging services offering large storage and computing capabilities for limited costs. In this data outsourcing framework, one of the greatest issues to deal with is data security. To do so, we propose to secure a public cloud platform devoted to medical image sharing by defining and deploying a security policy so as to control various security mechanisms. This policy stands on a risk assessment we conducted so as to identify security objectives with a special interest for digital content protection. These objectives are addressed by means of different security mechanisms like access and usage control policy, partial-encryption and watermarking.}, } @article {pmid25977891, year = {2015}, author = {Rai, R and Sahoo, G and Mehfuz, S}, title = {Exploring the factors influencing the cloud computing adoption: a systematic study on cloud migration.}, journal = {SpringerPlus}, volume = {4}, number = {}, pages = {197}, pmid = {25977891}, issn = {2193-1801}, abstract = {Today, most of the organizations trust on their age old legacy applications, to support their business-critical systems. However, there are several critical concerns, as maintainability and scalability issues, associated with the legacy system. In this background, cloud services offer a more agile and cost effective platform, to support business applications and IT infrastructure. As the adoption of cloud services has been increasing recently and so has been the academic research in cloud migration. However, there is a genuine need of secondary study to further strengthen this research. The primary objective of this paper is to scientifically and systematically identify, categorize and compare the existing research work in the area of legacy to cloud migration. The paper has also endeavored to consolidate the research on Security issues, which is prime factor hindering the adoption of cloud through classifying the studies on secure cloud migration. SLR (Systematic Literature Review) of thirty selected papers, published from 2009 to 2014 was conducted to properly understand the nuances of the security framework. To categorize the selected studies, authors have proposed a conceptual model for cloud migration which has resulted in a resource base of existing solutions for cloud migration. This study concludes that cloud migration research is in seminal stage but simultaneously it is also evolving and maturing, with increasing participation from academics and industry alike. The paper also identifies the need for a secure migration model, which can fortify organization's trust into cloud migration and facilitate necessary tool support to automate the migration process.}, } @article {pmid25955969, year = {2015}, author = {Cianfrocco, MA and Leschziner, AE}, title = {Low cost, high performance processing of single particle cryo-electron microscopy data in the cloud.}, journal = {eLife}, volume = {4}, number = {}, pages = {}, pmid = {25955969}, issn = {2050-084X}, support = {R01 GM092895/GM/NIGMS NIH HHS/United States ; R01 GM107214/GM/NIGMS NIH HHS/United States ; R01 GM092895A/GM/NIGMS NIH HHS/United States ; //Howard Hughes Medical Institute/United States ; }, mesh = {Cryoelectron Microscopy/*methods ; Databases, Genetic/economics/statistics & numerical data ; Multigene Family ; Ribosomes/*genetics/*ultrastructure ; Software/*economics/statistics & numerical data ; Yeasts ; }, abstract = {The advent of a new generation of electron microscopes and direct electron detectors has realized the potential of single particle cryo-electron microscopy (cryo-EM) as a technique to generate high-resolution structures. Calculating these structures requires high performance computing clusters, a resource that may be limiting to many likely cryo-EM users. To address this limitation and facilitate the spread of cryo-EM, we developed a publicly available 'off-the-shelf' computing environment on Amazon's elastic cloud computing infrastructure. This environment provides users with single particle cryo-EM software packages and the ability to create computing clusters with 16-480+ CPUs. We tested our computing environment using a publicly available 80S yeast ribosome dataset and estimate that laboratories could determine high-resolution cryo-EM structures for $50 to $1500 per structure within a timeframe comparable to local clusters. Our analysis shows that Amazon's cloud computing environment may offer a viable computing environment for cryo-EM.}, } @article {pmid25952637, year = {2015}, author = {Lucas, RW and Dees, J and Reynolds, R and Rhodes, B and Hendershot, RW}, title = {Cloud-computing and smartphones: tools for improving asthma management and understanding environmental triggers.}, journal = {Annals of allergy, asthma & immunology : official publication of the American College of Allergy, Asthma, & Immunology}, volume = {114}, number = {5}, pages = {431-432}, doi = {10.1016/j.anai.2015.02.020}, pmid = {25952637}, issn = {1534-4436}, mesh = {Adult ; Air Pollution/adverse effects/*analysis ; Asthma/*etiology ; *Cell Phone ; Disease Management ; Environmental Exposure/adverse effects/*analysis ; *Environmental Monitoring/instrumentation/methods ; Humans ; *Internet ; Male ; }, } @article {pmid25950981, year = {2015}, author = {Shukla, D and Lawrenz, M and Pande, VS}, title = {Elucidating Ligand-Modulated Conformational Landscape of GPCRs Using Cloud-Computing Approaches.}, journal = {Methods in enzymology}, volume = {557}, number = {}, pages = {551-572}, doi = {10.1016/bs.mie.2014.12.007}, pmid = {25950981}, issn = {1557-7988}, support = {GM072970/GM/NIGMS NIH HHS/United States ; R01GM62828/GM/NIGMS NIH HHS/United States ; }, mesh = {Animals ; Computer Simulation ; Humans ; Ligands ; Markov Chains ; Models, Molecular ; Protein Binding ; Protein Conformation ; Receptors, G-Protein-Coupled/*chemistry/*metabolism ; }, abstract = {G-protein-coupled receptors (GPCRs) are a versatile family of membrane-bound signaling proteins. Despite the recent successes in obtaining crystal structures of GPCRs, much needs to be learned about the conformational changes associated with their activation. Furthermore, the mechanism by which ligands modulate the activation of GPCRs has remained elusive. Molecular simulations provide a way of obtaining detailed an atomistic description of GPCR activation dynamics. However, simulating GPCR activation is challenging due to the long timescales involved and the associated challenge of gaining insights from the "Big" simulation datasets. Here, we demonstrate how cloud-computing approaches have been used to tackle these challenges and obtain insights into the activation mechanism of GPCRs. In particular, we review the use of Markov state model (MSM)-based sampling algorithms for sampling milliseconds of dynamics of a major drug target, the G-protein-coupled receptor β2-AR. MSMs of agonist and inverse agonist-bound β2-AR reveal multiple activation pathways and how ligands function via modulation of the ensemble of activation pathways. We target this ensemble of conformations with computer-aided drug design approaches, with the goal of designing drugs that interact more closely with diverse receptor states, for overall increased efficacy and specificity. We conclude by discussing how cloud-based approaches present a powerful and broadly available tool for studying the complex biological systems routinely.}, } @article {pmid25935400, year = {2015}, author = {Baskaran, V and Prescod, F and Dong, L}, title = {A smartphone-based cloud computing tool for managing type 1 diabetes in Ontarians.}, journal = {Canadian journal of diabetes}, volume = {39}, number = {3}, pages = {200-203}, doi = {10.1016/j.jcjd.2015.04.002}, pmid = {25935400}, issn = {2352-3840}, mesh = {Accidental Falls ; Algorithms ; Blood Glucose Self-Monitoring/instrumentation/*methods ; Cloud Computing/*trends ; Diabetes Mellitus, Type 1/*blood/*drug therapy ; Diabetic Coma/prevention & control ; Disease Management ; Glycated Hemoglobin/metabolism ; Humans ; Hypoglycemia/prevention & control ; Ontario ; Self Care/instrumentation/*methods ; Smartphone/*trends ; Social Class ; }, } @article {pmid25927263, year = {2015}, author = {Chan, T and Sennik, S and Zaki, A and Trotter, B}, title = {Studying with the cloud: the use of online Web-based resources to augment a traditional study group format.}, journal = {CJEM}, volume = {17}, number = {2}, pages = {192-195}, doi = {10.2310/8000.2014.141425}, pmid = {25927263}, issn = {1481-8043}, mesh = {Canada ; Computer-Assisted Instruction/*methods ; Humans ; *Internet ; Internship and Residency/*methods ; Learning ; Physicians/*standards ; }, abstract = {Cloud-based applications such as Google Docs, Skype, Dropbox, and SugarSync are revolutionizing the way that we interact with the world. Members of the millennial generation (those born after 1980) are now becoming senior residents and junior attending physicians. We describe a novel technique combining Internet- and cloud-based methods to digitally augment the classic study group used by final-year residents studying for the Royal College of Physicians and Surgeons of Canada examination. This material was developed by residents and improved over the course of 18 months. This is an innovation report about a process for enhanced communication and collaboration as there has been little research to date regarding the augmentation of learner-driven initiatives with virtual resources.}, } @article {pmid25888747, year = {2015}, author = {Griebel, L and Prokosch, HU and Köpcke, F and Toddenroth, D and Christoph, J and Leb, I and Engel, I and Sedlmayr, M}, title = {A scoping review of cloud computing in healthcare.}, journal = {BMC medical informatics and decision making}, volume = {15}, number = {}, pages = {17}, pmid = {25888747}, issn = {1472-6947}, mesh = {*Cloud Computing ; *Delivery of Health Care ; Humans ; }, abstract = {BACKGROUND: Cloud computing is a recent and fast growing area of development in healthcare. Ubiquitous, on-demand access to virtually endless resources in combination with a pay-per-use model allow for new ways of developing, delivering and using services. Cloud computing is often used in an "OMICS-context", e.g. for computing in genomics, proteomics and molecular medicine, while other field of application still seem to be underrepresented. Thus, the objective of this scoping review was to identify the current state and hot topics in research on cloud computing in healthcare beyond this traditional domain.

METHODS: MEDLINE was searched in July 2013 and in December 2014 for publications containing the terms "cloud computing" and "cloud-based". Each journal and conference article was categorized and summarized independently by two researchers who consolidated their findings.

RESULTS: 102 publications have been analyzed and 6 main topics have been found: telemedicine/teleconsultation, medical imaging, public health and patient self-management, hospital management and information systems, therapy, and secondary use of data. Commonly used features are broad network access for sharing and accessing data and rapid elasticity to dynamically adapt to computing demands. Eight articles favor the pay-for-use characteristics of cloud-based services avoiding upfront investments. Nevertheless, while 22 articles present very general potentials of cloud computing in the medical domain and 66 articles describe conceptual or prototypic projects, only 14 articles report from successful implementations. Further, in many articles cloud computing is seen as an analogy to internet-/web-based data sharing and the characteristics of the particular cloud computing approach are unfortunately not really illustrated.

CONCLUSIONS: Even though cloud computing in healthcare is of growing interest only few successful implementations yet exist and many papers just use the term "cloud" synonymously for "using virtual machines" or "web-based" with no described benefit of the cloud paradigm. The biggest threat to the adoption in the healthcare domain is caused by involving external cloud partners: many issues of data safety and security are still to be solved. Until then, cloud computing is favored more for singular, individual features such as elasticity, pay-per-use and broad network access, rather than as cloud paradigm on its own.}, } @article {pmid25874206, year = {2015}, author = {Lee, M and Yoe, H}, title = {Analysis of environmental stress factors using an artificial growth system and plant fitness optimization.}, journal = {BioMed research international}, volume = {2015}, number = {}, pages = {292543}, pmid = {25874206}, issn = {2314-6141}, mesh = {Humans ; Pleurotus/*genetics/*growth & development ; *Stress, Physiological ; }, abstract = {The environment promotes evolution. Evolutionary processes represent environmental adaptations over long time scales; evolution of crop genomes is not inducible within the relatively short time span of a human generation. Extreme environmental conditions can accelerate evolution, but such conditions are often stress inducing and disruptive. Artificial growth systems can be used to induce and select genomic variation by changing external environmental conditions, thus, accelerating evolution. By using cloud computing and big-data analysis, we analyzed environmental stress factors for Pleurotus ostreatus by assessing, evaluating, and predicting information of the growth environment. Through the indexing of environmental stress, the growth environment can be precisely controlled and developed into a technology for improving crop quality and production.}, } @article {pmid25868263, year = {2014}, author = {Zhu, L and Li, L and Meng, C}, title = {[Construction and analysis of a monitoring system with remote real-time multiple physiological parameters based on cloud computing].}, journal = {Sheng wu yi xue gong cheng xue za zhi = Journal of biomedical engineering = Shengwu yixue gongchengxue zazhi}, volume = {31}, number = {6}, pages = {1377-1383}, pmid = {25868263}, issn = {1001-5515}, mesh = {Computer Systems ; Computers ; Humans ; Information Storage and Retrieval/*methods ; Internet ; Monitoring, Physiologic/*methods ; Software ; }, abstract = {There have been problems in the existing multiple physiological parameter real-time monitoring system, such as insufficient server capacity for physiological data storage and analysis so that data consistency can not be guaranteed, poor performance in real-time, and other issues caused by the growing scale of data. We therefore pro posed a new solution which was with multiple physiological parameters and could calculate clustered background data storage and processing based on cloud computing. Through our studies, a batch processing for longitudinal analysis of patients' historical data was introduced. The process included the resource virtualization of IaaS layer for cloud platform, the construction of real-time computing platform of PaaS layer, the reception and analysis of data stream of SaaS layer, and the bottleneck problem of multi-parameter data transmission, etc. The results were to achieve in real-time physiological information transmission, storage and analysis of a large amount of data. The simulation test results showed that the remote multiple physiological parameter monitoring system based on cloud platform had obvious advantages in processing time and load balancing over the traditional server model. This architecture solved the problems including long turnaround time, poor performance of real-time analysis, lack of extensibility and other issues, which exist in the traditional remote medical services. Technical support was provided in order to facilitate a "wearable wireless sensor plus mobile wireless transmission plus cloud computing service" mode moving towards home health monitoring for multiple physiological parameter wireless monitoring.}, } @article {pmid25863787, year = {2016}, author = {Calabrese, B and Cannataro, M}, title = {Bioinformatics and Microarray Data Analysis on the Cloud.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {1375}, number = {}, pages = {25-39}, doi = {10.1007/7651_2015_236}, pmid = {25863787}, issn = {1940-6029}, mesh = {*Cloud Computing ; Computational Biology/*methods ; Humans ; Microarray Analysis/*methods ; }, abstract = {High-throughput platforms such as microarray, mass spectrometry, and next-generation sequencing are producing an increasing volume of omics data that needs large data storage and computing power. Cloud computing offers massive scalable computing and storage, data sharing, on-demand anytime and anywhere access to resources and applications, and thus, it may represent the key technology for facing those issues. In fact, in the recent years it has been adopted for the deployment of different bioinformatics solutions and services both in academia and in the industry. Although this, cloud computing presents several issues regarding the security and privacy of data, that are particularly important when analyzing patients data, such as in personalized medicine. This chapter reviews main academic and industrial cloud-based bioinformatics solutions; with a special focus on microarray data analysis solutions and underlines main issues and problems related to the use of such platforms for the storage and analysis of patients data.}, } @article {pmid25862404, year = {2015}, author = {Duret, D and Senior, A}, title = {Comparative Study of Three Different Personal Response Systems with Fourth-Year Undergraduate Veterinary Students.}, journal = {Journal of veterinary medical education}, volume = {42}, number = {2}, pages = {120-126}, doi = {10.3138/jvme.0814-079R2}, pmid = {25862404}, issn = {0748-321X}, mesh = {Cohort Studies ; Education, Veterinary/economics/*methods ; England ; *Faculty ; Humans ; Problem-Based Learning/economics/*methods ; Students/*psychology ; Surveys and Questionnaires ; Young Adult ; }, abstract = {The purpose of this study was to compare three different Personal Response Systems that have been used in recent years at the School of Veterinary Science, University of Liverpool: a technology-free system (Communicubes), a handset delivery device (TurningPoint), and a cloud-based technology (Poll Everywhere) that allows students to use a range of personal computing devices to register their answer. All three systems offer a method to promote active learning, and lecturers were encouraged to use them. However, there are cost and logistical implications for each. The authors found that both staff and students did have particular preferences for a specific system. This preference was not the same for both groups. The outcome of the comparison is that further research is needed into cloud-based technology as it offers benefits to the students but is also a distraction.}, } @article {pmid25859564, year = {2014}, author = {Chen, Y and Lin, Z and Pienta, R and Kahng, M and Chau, DH}, title = {Towards Scalable Graph Computation on Mobile Devices.}, journal = {Proceedings : ... IEEE International Conference on Big Data. IEEE International Conference on Big Data}, volume = {2014}, number = {}, pages = {29-35}, pmid = {25859564}, support = {U54 EB020404/EB/NIBIB NIH HHS/United States ; }, abstract = {Mobile devices have become increasingly central to our everyday activities, due to their portability, multi-touch capabilities, and ever-improving computational power. Such attractive features have spurred research interest in leveraging mobile devices for computation. We explore a novel approach that aims to use a single mobile device to perform scalable graph computation on large graphs that do not fit in the device's limited main memory, opening up the possibility of performing on-device analysis of large datasets, without relying on the cloud. Based on the familiar memory mapping capability provided by today's mobile operating systems, our approach to scale up computation is powerful and intentionally kept simple to maximize its applicability across the iOS and Android platforms. Our experiments demonstrate that an iPad mini can perform fast computation on large real graphs with as many as 272 million edges (Google+ social graph), at a speed that is only a few times slower than a 13″ Macbook Pro. Through creating a real world iOS app with this technique, we demonstrate the strong potential application for scalable graph computation on a single mobile device using our approach.}, } @article {pmid25849093, year = {2015}, author = {Harvey, MJ and De Fabritiis, G}, title = {AceCloud: Molecular Dynamics Simulations in the Cloud.}, journal = {Journal of chemical information and modeling}, volume = {55}, number = {5}, pages = {909-914}, doi = {10.1021/acs.jcim.5b00086}, pmid = {25849093}, issn = {1549-960X}, mesh = {*Cloud Computing ; Computer Security ; *Molecular Dynamics Simulation ; Software ; User-Computer Interface ; }, abstract = {We present AceCloud, an on-demand service for molecular dynamics simulations. AceCloud is designed to facilitate the secure execution of large ensembles of simulations on an external cloud computing service (currently Amazon Web Services). The AceCloud client, integrated into the ACEMD molecular dynamics package, provides an easy-to-use interface that abstracts all aspects of interaction with the cloud services. This gives the user the experience that all simulations are running on their local machine, minimizing the learning curve typically associated with the transition to using high performance computing services.}, } @article {pmid25847370, year = {2016}, author = {Rao, NS and Poole, SW and Ma, CY and He, F and Zhuang, J and Yau, DK}, title = {Defense of Cyber Infrastructures Against Cyber-Physical Attacks Using Game-Theoretic Models.}, journal = {Risk analysis : an official publication of the Society for Risk Analysis}, volume = {36}, number = {4}, pages = {694-710}, doi = {10.1111/risa.12362}, pmid = {25847370}, issn = {1539-6924}, mesh = {Algorithms ; Computer Communication Networks/*organization & administration ; *Computer Security ; Computer Simulation ; Computer Systems ; *Game Theory ; Humans ; Information Systems/*organization & administration ; Models, Statistical ; Probability ; Reproducibility of Results ; Software ; }, abstract = {The operation of cyber infrastructures relies on both cyber and physical components, which are subject to incidental and intentional degradations of different kinds. Within the context of network and computing infrastructures, we study the strategic interactions between an attacker and a defender using game-theoretic models that take into account both cyber and physical components. The attacker and defender optimize their individual utilities, expressed as sums of cost and system terms. First, we consider a Boolean attack-defense model, wherein the cyber and physical subinfrastructures may be attacked and reinforced as individual units. Second, we consider a component attack-defense model wherein their components may be attacked and defended, and the infrastructure requires minimum numbers of both to function. We show that the Nash equilibrium under uniform costs in both cases is computable in polynomial time, and it provides high-level deterministic conditions for the infrastructure survival. When probabilities of successful attack and defense, and of incidental failures, are incorporated into the models, the results favor the attacker but otherwise remain qualitatively similar. This approach has been motivated and validated by our experiences with UltraScience Net infrastructure, which was built to support high-performance network experiments. The analytical results, however, are more general, and we apply them to simplified models of cloud and high-performance computing infrastructures.}, } @article {pmid25842175, year = {2015}, author = {Boudry, C}, title = {Web 2.0 applications in medicine: trends and topics in the literature.}, journal = {Medicine 2.0}, volume = {4}, number = {1}, pages = {e2}, pmid = {25842175}, issn = {1923-2195}, abstract = {BACKGROUND: The World Wide Web has changed research habits, and these changes were further expanded when "Web 2.0" became popular in 2005. Bibliometrics is a helpful tool used for describing patterns of publication, for interpreting progression over time, and the geographical distribution of research in a given field. Few studies employing bibliometrics, however, have been carried out on the correlative nature of scientific literature and Web 2.0.

OBJECTIVE: The aim of this bibliometric analysis was to provide an overview of Web 2.0 implications in the biomedical literature. The objectives were to assess the growth rate of literature, key journals, authors, and country contributions, and to evaluate whether the various Web 2.0 applications were expressed within this biomedical literature, and if so, how.

METHODS: A specific query with keywords chosen to be representative of Web 2.0 applications was built for the PubMed database. Articles related to Web 2.0 were downloaded in Extensible Markup Language (XML) and were processed through developed hypertext preprocessor (PHP) scripts, then imported to Microsoft Excel 2010 for data processing.

RESULTS: A total of 1347 articles were included in this study. The number of articles related to Web 2.0 has been increasing from 2002 to 2012 (average annual growth rate was 106.3% with a maximum of 333% in 2005). The United States was by far the predominant country for authors, with 514 articles (54.0%; 514/952). The second and third most productive countries were the United Kingdom and Australia, with 87 (9.1%; 87/952) and 44 articles (4.6%; 44/952), respectively. Distribution of number of articles per author showed that the core population of researchers working on Web 2.0 in the medical field could be estimated at approximately 75. In total, 614 journals were identified during this analysis. Using Bradford's law, 27 core journals were identified, among which three (Studies in Health Technology and Informatics, Journal of Medical Internet Research, and Nucleic Acids Research) produced more than 35 articles related to Web 2.0 over the period studied. A total of 274 words in the field of Web 2.0 were found after manual sorting of the 15,878 words appearing in title and abstract fields for articles. Word frequency analysis reveals "blog" as the most recurrent, followed by "wiki", "Web 2.0", "social media", "Facebook", "social networks", "blogger", "cloud computing", "Twitter", and "blogging". All categories of Web 2.0 applications were found, indicating the successful integration of Web 2.0 into the biomedical field.

CONCLUSIONS: This study shows that the biomedical community is engaged in the use of Web 2.0 and confirms its high level of interest in these tools. Therefore, changes in the ways researchers use information seem to be far from over.}, } @article {pmid25842155, year = {2015}, author = {Hsieh, PJ}, title = {Healthcare professionals' use of health clouds: Integrating technology acceptance and status quo bias perspectives.}, journal = {International journal of medical informatics}, volume = {84}, number = {7}, pages = {512-523}, doi = {10.1016/j.ijmedinf.2015.03.004}, pmid = {25842155}, issn = {1872-8243}, mesh = {Adult ; *Attitude of Health Personnel ; *Attitude to Computers ; Diffusion of Innovation ; Electronic Health Records/*statistics & numerical data ; Female ; Humans ; Male ; Medical Informatics/*statistics & numerical data ; Middle Aged ; *Models, Psychological ; Organizational Culture ; Physicians/*psychology ; Taiwan ; Young Adult ; }, abstract = {PURPOSE: Cloud computing technology has recently been seen as an important milestone in medical informatics development. Despite its great potential, there are gaps in our understanding of how users evaluate change in relation to the health cloud and how they decide to resist it. Integrating technology acceptance and status quo bias perspectives, this study develops an integrated model to explain healthcare professionals' intention to use the health cloud service and their intention to resist it.

METHODS: A field survey was conducted in Taiwan to collect data from healthcare professionals; a structural equation model was used to examine the data. A valid sample of 209 healthcare professionals was collected for data analysis.

RESULTS: The results show that healthcare professionals' resistance to the use of the health cloud is the result of regret avoidance, inertia, perceived value, switching costs, and perceived threat. Attitude, subjective norm, and perceived behavior control are shown to have positive and direct effects on healthcare professionals' intention to use the health cloud. The results also indicate a significant negative effect in the relationship between healthcare professionals' intention and resistance to using the health cloud.

CONCLUSION: Our study illustrates the importance of incorporating user resistance in technology acceptance studies in general and in health technology usage studies in particular. This study also identifies key factors for practitioners and hospitals to make adoption decisions in relation to the health cloud. Further, the study provides a useful reference for future studies in this subject field.}, } @article {pmid25830608, year = {2015}, author = {Bhavani, SR and Senthilkumar, J and Chilambuchelvan, AG and Manjula, D and Krishnamoorthy, R and Kannan, A}, title = {CIMIDx: Prototype for a Cloud-Based System to Support Intelligent Medical Image Diagnosis With Efficiency.}, journal = {JMIR medical informatics}, volume = {3}, number = {1}, pages = {e12}, pmid = {25830608}, issn = {2291-9694}, abstract = {BACKGROUND: The Internet has greatly enhanced health care, helping patients stay up-to-date on medical issues and general knowledge. Many cancer patients use the Internet for cancer diagnosis and related information. Recently, cloud computing has emerged as a new way of delivering health services but currently, there is no generic and fully automated cloud-based self-management intervention for breast cancer patients, as practical guidelines are lacking.

OBJECTIVE: We investigated the prevalence and predictors of cloud use for medical diagnosis among women with breast cancer to gain insight into meaningful usage parameters to evaluate the use of generic, fully automated cloud-based self-intervention, by assessing how breast cancer survivors use a generic self-management model. The goal of this study was implemented and evaluated with a new prototype called "CIMIDx", based on representative association rules that support the diagnosis of medical images (mammograms).

METHODS: The proposed Cloud-Based System Support Intelligent Medical Image Diagnosis (CIMIDx) prototype includes two modules. The first is the design and development of the CIMIDx training and test cloud services. Deployed in the cloud, the prototype can be used for diagnosis and screening mammography by assessing the cancers detected, tumor sizes, histology, and stage of classification accuracy. To analyze the prototype's classification accuracy, we conducted an experiment with data provided by clients. Second, by monitoring cloud server requests, the CIMIDx usage statistics were recorded for the cloud-based self-intervention groups. We conducted an evaluation of the CIMIDx cloud service usage, in which browsing functionalities were evaluated from the end-user's perspective.

RESULTS: We performed several experiments to validate the CIMIDx prototype for breast health issues. The first set of experiments evaluated the diagnostic performance of the CIMIDx framework. We collected medical information from 150 breast cancer survivors from hospitals and health centers. The CIMIDx prototype achieved high sensitivity of up to 99.29%, and accuracy of up to 98%. The second set of experiments evaluated CIMIDx use for breast health issues, using t tests and Pearson chi-square tests to assess differences, and binary logistic regression to estimate the odds ratio (OR) for the predictors' use of CIMIDx. For the prototype usage statistics for the same 150 breast cancer survivors, we interviewed 114 (76.0%), through self-report questionnaires from CIMIDx blogs. The frequency of log-ins/person ranged from 0 to 30, total duration/person from 0 to 1500 minutes (25 hours). The 114 participants continued logging in to all phases, resulting in an intervention adherence rate of 44.3% (95% CI 33.2-55.9). The overall performance of the prototype for the good category, reported usefulness of the prototype (P=.77), overall satisfaction of the prototype (P=.31), ease of navigation (P=.89), user friendliness evaluation (P=.31), and overall satisfaction (P=.31). Positive evaluations given by 100 participants via a Web-based questionnaire supported our hypothesis.

CONCLUSIONS: The present study shows that women felt favorably about the use of a generic fully automated cloud-based self- management prototype. The study also demonstrated that the CIMIDx prototype resulted in the detection of more cancers in screening and diagnosing patients, with an increased accuracy rate.}, } @article {pmid25823046, year = {2015}, author = {Lillo-Castellano, JM and Mora-Jiménez, I and Santiago-Mozos, R and Chavarría-Asso, F and Cano-González, A and García-Alberola, A and Rojo-Álvarez, JL}, title = {Symmetrical compression distance for arrhythmia discrimination in cloud-based big-data services.}, journal = {IEEE journal of biomedical and health informatics}, volume = {19}, number = {4}, pages = {1253-1263}, doi = {10.1109/JBHI.2015.2412175}, pmid = {25823046}, issn = {2168-2208}, mesh = {Arrhythmias, Cardiac/*classification/therapy ; Databases, Factual ; Defibrillators, Implantable ; Electrocardiography/*classification ; Humans ; *Internet ; Machine Learning ; *Medical Informatics Computing ; Sensitivity and Specificity ; }, abstract = {The current development of cloud computing is completely changing the paradigm of data knowledge extraction in huge databases. An example of this technology in the cardiac arrhythmia field is the SCOOP platform, a national-level scientific cloud-based big data service for implantable cardioverter defibrillators. In this scenario, we here propose a new methodology for automatic classification of intracardiac electrograms (EGMs) in a cloud computing system, designed for minimal signal preprocessing. A new compression-based similarity measure (CSM) is created for low computational burden, so-called weighted fast compression distance, which provides better performance when compared with other CSMs in the literature. Using simple machine learning techniques, a set of 6848 EGMs extracted from SCOOP platform were classified into seven cardiac arrhythmia classes and one noise class, reaching near to 90% accuracy when previous patient arrhythmia information was available and 63% otherwise, hence overcoming in all cases the classification provided by the majority class. Results show that this methodology can be used as a high-quality service of cloud computing, providing support to physicians for improving the knowledge on patient diagnosis.}, } @article {pmid25807597, year = {2014}, author = {Noblin, A and Cortelyou-Ward, K and Servan, RM}, title = {Cloud computing and patient engagement: leveraging available technology.}, journal = {The Journal of medical practice management : MPM}, volume = {30}, number = {2}, pages = {89-93}, pmid = {25807597}, issn = {8755-0229}, mesh = {*Access to Information ; *Computer Security ; Confidentiality ; Health Insurance Portability and Accountability Act ; Humans ; Information Storage and Retrieval/*methods ; *Internet ; Meaningful Use ; Practice Management, Medical/*organization & administration ; Quality Improvement ; United States ; }, abstract = {Cloud computing technology has the potential to transform medical practices and improve patient engagement and quality of care. However, issues such as privacy and security and "fit" can make incorporation of the cloud an intimidating decision for many physicians. This article summarizes the four most common types of clouds and discusses their ideal uses, how they engage patients, and how they improve the quality of care offered. This technology also can be used to meet Meaningful Use requirements 1 and 2; and, if speculation is correct, the cloud will provide the necessary support needed for Meaningful Use 3 as well.}, } @article {pmid25803096, year = {2015}, author = {Yamada, KC and Inoue, S and Sakamoto, Y}, title = {An effective support system of emergency medical services with tablet computers.}, journal = {JMIR mHealth and uHealth}, volume = {3}, number = {1}, pages = {e23}, pmid = {25803096}, issn = {2291-5222}, abstract = {BACKGROUND: There were over 5,000,000 ambulance dispatches during 2010 in Japan, and the time for transportation has been increasing, it took over 37 minutes from dispatch to the hospitals. A way to reduce transportation time by ambulance is to shorten the time of searching for an appropriate facility/hospital during the prehospital phase. Although the information system of medical institutions and emergency medical service (EMS) was established in 2003 in Saga Prefecture, Japan, it has not been utilized efficiently. The Saga Prefectural Government renewed the previous system in an effort to make it the real-time support system that can efficiently manage emergency demand and acceptance for the first time in Japan in April 2011.

OBJECTIVE: The objective of this study was to evaluate if the new system promotes efficient emergency transportation for critically ill patients and provides valuable epidemiological data.

METHODS: The new system has provided both emergency personnel in the ambulance, or at the scene, and the medical staff in each hospital to be able to share up-to-date information about available hospitals by means of cloud computing. All 55 ambulances in Saga are equipped with tablet computers through third generation/long term evolution networks. When the emergency personnel arrive on the scene and discern the type of patient's illness, they can search for an appropriate facility/hospital with their tablet computer based on the patient's symptoms and available medical specialists. Data were collected prospectively over a three-year period from April 1, 2011 to March 31, 2013.

RESULTS: The transportation time by ambulance in Saga was shortened for the first time since the statistics were first kept in 1999; the mean time was 34.3 minutes in 2010 (based on administrative statistics) and 33.9 minutes (95% CI 33.6-34.1) in 2011. The ratio of transportation to the tertiary care facilities in Saga has decreased by 3.12% from the year before, 32.7% in 2010 (regional average) and 29.58% (9085/30,709) in 2011. The system entry completion rate by the emergency personnel was 100.00% (93,110/93,110) and by the medical staff was 46.11% (14,159/30,709) to 47.57% (14,639/30,772) over a three-year period. Finally, the new system reduced the operational costs by 40,000,000 yen (about $400,000 US dollars) a year.

CONCLUSIONS: The transportation time by ambulance was shorter following the implementation of the tablet computer in the current support system of EMS in Saga Prefecture, Japan. The cloud computing reduced the cost of the EMS system.}, } @article {pmid25794139, year = {2015}, author = {Yang, S and Zhang, X and Diao, L and Guo, F and Wang, D and Liu, Z and Li, H and Zheng, J and Pan, J and Nice, EC and Li, D and He, F}, title = {CAPER 3.0: A Scalable Cloud-Based System for Data-Intensive Analysis of Chromosome-Centric Human Proteome Project Data Sets.}, journal = {Journal of proteome research}, volume = {14}, number = {9}, pages = {3720-3728}, doi = {10.1021/pr501335w}, pmid = {25794139}, issn = {1535-3907}, mesh = {Amino Acid Sequence ; *Chromosome Mapping ; *Cloud Computing ; *Databases, Protein ; Humans ; Molecular Sequence Data ; Polymorphism, Genetic ; Proteins/chemistry/*genetics ; *Proteome ; }, abstract = {The Chromosome-centric Human Proteome Project (C-HPP) aims to catalog genome-encoded proteins using a chromosome-by-chromosome strategy. As the C-HPP proceeds, the increasing requirement for data-intensive analysis of the MS/MS data poses a challenge to the proteomic community, especially small laboratories lacking computational infrastructure. To address this challenge, we have updated the previous CAPER browser into a higher version, CAPER 3.0, which is a scalable cloud-based system for data-intensive analysis of C-HPP data sets. CAPER 3.0 uses cloud computing technology to facilitate MS/MS-based peptide identification. In particular, it can use both public and private cloud, facilitating the analysis of C-HPP data sets. CAPER 3.0 provides a graphical user interface (GUI) to help users transfer data, configure jobs, track progress, and visualize the results comprehensively. These features enable users without programming expertise to easily conduct data-intensive analysis using CAPER 3.0. Here, we illustrate the usage of CAPER 3.0 with four specific mass spectral data-intensive problems: detecting novel peptides, identifying single amino acid variants (SAVs) derived from known missense mutations, identifying sample-specific SAVs, and identifying exon-skipping events. CAPER 3.0 is available at http://prodigy.bprc.ac.cn/caper3.}, } @article {pmid25785761, year = {2015}, author = {Liu, WL and Zhang, K and Locatis, C and Ackerman, M}, title = {Cloud and traditional videoconferencing technology for telemedicine and distance learning.}, journal = {Telemedicine journal and e-health : the official journal of the American Telemedicine Association}, volume = {21}, number = {5}, pages = {422-426}, pmid = {25785761}, issn = {1556-3669}, support = {//Intramural NIH HHS/United States ; }, mesh = {Education, Distance/*methods ; Female ; Humans ; Information Dissemination ; Male ; *Systems Integration ; Technology ; Telemedicine/*methods ; United States ; *User-Computer Interface ; Videoconferencing/*instrumentation ; }, abstract = {INTRODUCTION: Cloud-based videoconferencing versus traditional systems are described for possible use in telemedicine and distance learning.

MATERIALS AND METHODS: Differences between traditional and cloud-based videoconferencing systems are examined, and the methods for identifying and testing systems are explained. Findings are presented characterizing the cloud conferencing genre and its attributes versus traditional H.323 conferencing.

RESULTS: Because the technology is rapidly evolving and needs to be evaluated in reference to local needs, it is strongly recommended that this or other reviews not be considered substitutes for personal hands-on experience.

CONCLUSIONS: This review identifies key attributes of the technology that can be used to appraise the relevance of cloud conferencing technology and to determine whether migration from traditional technology to a cloud environment is warranted. An evaluation template is provided for assessing systems appropriateness.}, } @article {pmid25768434, year = {2015}, author = {Cho, YC and Pan, JY}, title = {Design and implementation of website information disclosure assessment system.}, journal = {PloS one}, volume = {10}, number = {3}, pages = {e0117180}, pmid = {25768434}, issn = {1932-6203}, mesh = {*Computer Security ; *Confidentiality ; Data Collection ; Databases, Factual ; *Disclosure ; Electronic Mail ; Humans ; *Internet ; }, abstract = {Internet application technologies, such as cloud computing and cloud storage, have increasingly changed people's lives. Websites contain vast amounts of personal privacy information. In order to protect this information, network security technologies, such as database protection and data encryption, attract many researchers. The most serious problems concerning web vulnerability are e-mail address and network database leakages. These leakages have many causes. For example, malicious users can steal database contents, taking advantage of mistakes made by programmers and administrators. In order to mitigate this type of abuse, a website information disclosure assessment system is proposed in this study. This system utilizes a series of technologies, such as web crawler algorithms, SQL injection attack detection, and web vulnerability mining, to assess a website's information disclosure. Thirty websites, randomly sampled from the top 50 world colleges, were used to collect leakage information. This testing showed the importance of increasing the security and privacy of website information for academic websites.}, } @article {pmid25767826, year = {2015}, author = {Balasubramaniam, S and Kavitha, V}, title = {Geometric data perturbation-based personal health record transactions in cloud computing.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {927867}, pmid = {25767826}, issn = {1537-744X}, mesh = {*Cloud Computing ; *Electronic Health Records ; Humans ; }, abstract = {Cloud computing is a new delivery model for information technology services and it typically involves the provision of dynamically scalable and often virtualized resources over the Internet. However, cloud computing raises concerns on how cloud service providers, user organizations, and governments should handle such information and interactions. Personal health records represent an emerging patient-centric model for health information exchange, and they are outsourced for storage by third parties, such as cloud providers. With these records, it is necessary for each patient to encrypt their own personal health data before uploading them to cloud servers. Current techniques for encryption primarily rely on conventional cryptographic approaches. However, key management issues remain largely unsolved with these cryptographic-based encryption techniques. We propose that personal health record transactions be managed using geometric data perturbation in cloud computing. In our proposed scheme, the personal health record database is perturbed using geometric data perturbation and outsourced to the Amazon EC2 cloud.}, } @article {pmid25763310, year = {2015}, author = {Nagrath, V and Morel, O and Malik, A and Saad, N and Meriaudeau, F}, title = {Dynamic electronic institutions in agent oriented cloud robotic systems.}, journal = {SpringerPlus}, volume = {4}, number = {}, pages = {103}, pmid = {25763310}, issn = {2193-1801}, abstract = {The dot-com bubble bursted in the year 2000 followed by a swift movement towards resource virtualization and cloud computing business model. Cloud computing emerged not as new form of computing or network technology but a mere remoulding of existing technologies to suit a new business model. Cloud robotics is understood as adaptation of cloud computing ideas for robotic applications. Current efforts in cloud robotics stress upon developing robots that utilize computing and service infrastructure of the cloud, without debating on the underlying business model. HTM5 is an OMG's MDA based Meta-model for agent oriented development of cloud robotic systems. The trade-view of HTM5 promotes peer-to-peer trade amongst software agents. HTM5 agents represent various cloud entities and implement their business logic on cloud interactions. Trade in a peer-to-peer cloud robotic system is based on relationships and contracts amongst several agent subsets. Electronic Institutions are associations of heterogeneous intelligent agents which interact with each other following predefined norms. In Dynamic Electronic Institutions, the process of formation, reformation and dissolution of institutions is automated leading to run time adaptations in groups of agents. DEIs in agent oriented cloud robotic ecosystems bring order and group intellect. This article presents DEI implementations through HTM5 methodology.}, } @article {pmid25753841, year = {2015}, author = {Thackston, R and Fortenberry, RC}, title = {The performance of low-cost commercial cloud computing as an alternative in computational chemistry.}, journal = {Journal of computational chemistry}, volume = {36}, number = {12}, pages = {926-933}, doi = {10.1002/jcc.23882}, pmid = {25753841}, issn = {1096-987X}, abstract = {The growth of commercial cloud computing (CCC) as a viable means of computational infrastructure is largely unexplored for the purposes of quantum chemistry. In this work, the PSI4 suite of computational chemistry programs is installed on five different types of Amazon World Services CCC platforms. The performance for a set of electronically excited state single-point energies is compared between these CCC platforms and typical, "in-house" physical machines. Further considerations are made for the number of cores or virtual CPUs (vCPUs, for the CCC platforms), but no considerations are made for full parallelization of the program (even though parallelization of the BLAS library is implemented), complete high-performance computing cluster utilization, or steal time. Even with this most pessimistic view of the computations, CCC resources are shown to be more cost effective for significant numbers of typical quantum chemistry computations. Large numbers of large computations are still best utilized by more traditional means, but smaller-scale research may be more effectively undertaken through CCC services.}, } @article {pmid25748700, year = {2015}, author = {Billings, SD and Boctor, EM and Taylor, RH}, title = {Iterative most-likely point registration (IMLP): a robust algorithm for computing optimal shape alignment.}, journal = {PloS one}, volume = {10}, number = {3}, pages = {e0117688}, pmid = {25748700}, issn = {1932-6203}, mesh = {*Algorithms ; }, abstract = {We present a probabilistic registration algorithm that robustly solves the problem of rigid-body alignment between two shapes with high accuracy, by aptly modeling measurement noise in each shape, whether isotropic or anisotropic. For point-cloud shapes, the probabilistic framework additionally enables modeling locally-linear surface regions in the vicinity of each point to further improve registration accuracy. The proposed Iterative Most-Likely Point (IMLP) algorithm is formed as a variant of the popular Iterative Closest Point (ICP) algorithm, which iterates between point-correspondence and point-registration steps. IMLP's probabilistic framework is used to incorporate a generalized noise model into both the correspondence and the registration phases of the algorithm, hence its name as a most-likely point method rather than a closest-point method. To efficiently compute the most-likely correspondences, we devise a novel search strategy based on a principal direction (PD)-tree search. We also propose a new approach to solve the generalized total-least-squares (GTLS) sub-problem of the registration phase, wherein the point correspondences are registered under a generalized noise model. Our GTLS approach has improved accuracy, efficiency, and stability compared to prior methods presented for this problem and offers a straightforward implementation using standard least squares. We evaluate the performance of IMLP relative to a large number of prior algorithms including ICP, a robust variant on ICP, Generalized ICP (GICP), and Coherent Point Drift (CPD), as well as drawing close comparison with the prior anisotropic registration methods of GTLS-ICP and A-ICP. The performance of IMLP is shown to be superior with respect to these algorithms over a wide range of noise conditions, outliers, and misalignments using both mesh and point-cloud representations of various shapes.}, } @article {pmid25742012, year = {2015}, author = {Li, Z and Yang, C and Jin, B and Yu, M and Liu, K and Sun, M and Zhan, M}, title = {Enabling big geoscience data analytics with a cloud-based, MapReduce-enabled and service-oriented workflow framework.}, journal = {PloS one}, volume = {10}, number = {3}, pages = {e0116781}, pmid = {25742012}, issn = {1932-6203}, mesh = {Algorithms ; *Cloud Computing ; Computational Biology/*methods ; *Earth Sciences ; Internet ; *Workflow ; }, abstract = {Geoscience observations and model simulations are generating vast amounts of multi-dimensional data. Effectively analyzing these data are essential for geoscience studies. However, the tasks are challenging for geoscientists because processing the massive amount of data is both computing and data intensive in that data analytics requires complex procedures and multiple tools. To tackle these challenges, a scientific workflow framework is proposed for big geoscience data analytics. In this framework techniques are proposed by leveraging cloud computing, MapReduce, and Service Oriented Architecture (SOA). Specifically, HBase is adopted for storing and managing big geoscience data across distributed computers. MapReduce-based algorithm framework is developed to support parallel processing of geoscience data. And service-oriented workflow architecture is built for supporting on-demand complex data analytics in the cloud environment. A proof-of-concept prototype tests the performance of the framework. Results show that this innovative framework significantly improves the efficiency of big geoscience data analytics by reducing the data processing time as well as simplifying data analytical procedures for geoscientists.}, } @article {pmid25705725, year = {2014}, author = {Zhang, GQ and Zhu, W and Sun, M and Tao, S and Bodenreider, O and Cui, L}, title = {MaPLE: A MapReduce Pipeline for Lattice-based Evaluation and Its Application to SNOMED CT.}, journal = {Proceedings : ... IEEE International Conference on Big Data. IEEE International Conference on Big Data}, volume = {2014}, number = {}, pages = {754-759}, pmid = {25705725}, support = {UL1 TR000439/TR/NCATS NIH HHS/United States ; Z99 LM999999//Intramural NIH HHS/United States ; }, abstract = {Non-lattice fragments are often indicative of structural anomalies in ontological systems and, as such, represent possible areas of focus for subsequent quality assurance work. However, extracting the non-lattice fragments in large ontological systems is computationally expensive if not prohibitive, using a traditional sequential approach. In this paper we present a general MapReduce pipeline, called MaPLE (MapReduce Pipeline for Lattice-based Evaluation), for extracting non-lattice fragments in large partially ordered sets and demonstrate its applicability in ontology quality assurance. Using MaPLE in a 30-node Hadoop local cloud, we systematically extracted non-lattice fragments in 8 SNOMED CT versions from 2009 to 2014 (each containing over 300k concepts), with an average total computing time of less than 3 hours per version. With dramatically reduced time, MaPLE makes it feasible not only to perform exhaustive structural analysis of large ontological hierarchies, but also to systematically track structural changes between versions. Our change analysis showed that the average change rates on the non-lattice pairs are up to 38.6 times higher than the change rates of the background structure (concept nodes). This demonstrates that fragments around non-lattice pairs exhibit significantly higher rates of change in the process of ontological evolution.}, } @article {pmid25705716, year = {2015}, author = {Horri, A and Dastghaibyfard, G}, title = {A novel cost based model for energy consumption in cloud computing.}, journal = {TheScientificWorldJournal}, volume = {2015}, number = {}, pages = {724524}, pmid = {25705716}, issn = {1537-744X}, abstract = {Cloud data centers consume enormous amounts of electrical energy. To support green cloud computing, providers also need to minimize cloud infrastructure energy consumption while conducting the QoS. In this study, for cloud environments an energy consumption model is proposed for time-shared policy in virtualization layer. The cost and energy usage of time-shared policy were modeled in the CloudSim simulator based upon the results obtained from the real system and then proposed model was evaluated by different scenarios. In the proposed model, the cache interference costs were considered. These costs were based upon the size of data. The proposed model was implemented in the CloudSim simulator and the related simulation results indicate that the energy consumption may be considerable and that it can vary with different parameters such as the quantum parameter, data size, and the number of VMs on a host. Measured results validate the model and demonstrate that there is a tradeoff between energy consumption and QoS in the cloud environment. Also, measured results validate the model and demonstrate that there is a tradeoff between energy consumption and QoS in the cloud environment.}, } @article {pmid25699466, year = {2015}, author = {Lee, SS and Park, J and Sim, HS}, title = {Macroscopic quantum entanglement of a Kondo cloud at finite temperature.}, journal = {Physical review letters}, volume = {114}, number = {5}, pages = {057203}, doi = {10.1103/PhysRevLett.114.057203}, pmid = {25699466}, issn = {1079-7114}, abstract = {We propose a variational approach for computing the macroscopic entanglement in a many-body mixed state, based on entanglement witness operators, and compute the entanglement of formation (EoF), a mixed-state generalization of the entanglement entropy, in single- and two-channel Kondo systems at finite temperature. The thermal suppression of the EoF obeys power-law scaling at low temperature. The scaling exponent is halved from the single- to the two-channel system, which is attributed, using a bosonization method, to the non-Fermi liquid behavior of a Majorana fermion, a "half" of a complex fermion, emerging in the two-channel system. Moreover, the EoF characterizes the size and power-law tail of the Kondo screening cloud of the single-channel system.}, } @article {pmid25684202, year = {2015}, author = {Guo, X and Yu, N and Ding, X and Wang, J and Pan, Y}, title = {DIME: a novel framework for de novo metagenomic sequence assembly.}, journal = {Journal of computational biology : a journal of computational molecular cell biology}, volume = {22}, number = {2}, pages = {159-177}, pmid = {25684202}, issn = {1557-8666}, mesh = {Contig Mapping/*methods ; Metagenomics/*methods ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {The recently developed next generation sequencing platforms not only decrease the cost for metagenomics data analysis, but also greatly enlarge the size of metagenomic sequence datasets. A common bottleneck of available assemblers is that the trade-off between the noise of the resulting contigs and the gain in sequence length for better annotation has not been attended enough for large-scale sequencing projects, especially for the datasets with low coverage and a large number of nonoverlapping contigs. To address this limitation and promote both accuracy and efficiency, we develop a novel metagenomic sequence assembly framework, DIME, by taking the DIvide, conquer, and MErge strategies. In addition, we give two MapReduce implementations of DIME, DIME-cap3 and DIME-genovo, on Apache Hadoop platform. For a systematic comparison of the performance of the assembly tasks, we tested DIME and five other popular short read assembly programs, Cap3, Genovo, MetaVelvet, SOAPdenovo, and SPAdes on four synthetic and three real metagenomic sequence datasets with various reads from fifty thousand to a couple million in size. The experimental results demonstrate that our method not only partitions the sequence reads with an extremely high accuracy, but also reconstructs more bases, generates higher quality assembled consensus, and yields higher assembly scores, including corrected N50 and BLAST-score-per-base, than other tools with a nearly theoretical speed-up. Results indicate that DIME offers great improvement in assembly across a range of sequence abundances and thus is robust to decreasing coverage.}, } @article {pmid25666423, year = {2015}, author = {Torgerson, CM and Quinn, C and Dinov, I and Liu, Z and Petrosyan, P and Pelphrey, K and Haselgrove, C and Kennedy, DN and Toga, AW and Van Horn, JD}, title = {Interacting with the National Database for Autism Research (NDAR) via the LONI Pipeline workflow environment.}, journal = {Brain imaging and behavior}, volume = {9}, number = {1}, pages = {89-103}, pmid = {25666423}, issn = {1931-7565}, support = {MH083320/MH/NIMH NIH HHS/United States ; P30 HD004147/HD/NICHD NIH HHS/United States ; R01 MH100028/MH/NIMH NIH HHS/United States ; R01 MH083320/MH/NIMH NIH HHS/United States ; HD004147/HD/NICHD NIH HHS/United States ; 5R01MH100028-03/MH/NIMH NIH HHS/United States ; }, mesh = {Autism Spectrum Disorder/*pathology ; Cloud Computing ; *Databases, Factual ; Humans ; Information Dissemination/methods ; Information Storage and Retrieval/*methods ; Neuroimaging/*statistics & numerical data ; Software ; Workflow ; }, abstract = {Under the umbrella of the National Database for Clinical Trials (NDCT) related to mental illnesses, the National Database for Autism Research (NDAR) seeks to gather, curate, and make openly available neuroimaging data from NIH-funded studies of autism spectrum disorder (ASD). NDAR has recently made its database accessible through the LONI Pipeline workflow design and execution environment to enable large-scale analyses of cortical architecture and function via local, cluster, or "cloud"-based computing resources. This presents a unique opportunity to overcome many of the customary limitations to fostering biomedical neuroimaging as a science of discovery. Providing open access to primary neuroimaging data, workflow methods, and high-performance computing will increase uniformity in data collection protocols, encourage greater reliability of published data, results replication, and broaden the range of researchers now able to perform larger studies than ever before. To illustrate the use of NDAR and LONI Pipeline for performing several commonly performed neuroimaging processing steps and analyses, this paper presents example workflows useful for ASD neuroimaging researchers seeking to begin using this valuable combination of online data and computational resources. We discuss the utility of such database and workflow processing interactivity as a motivation for the sharing of additional primary data in ASD research and elsewhere.}, } @article {pmid25642185, year = {2014}, author = {Cusack, R and Vicente-Grabovetsky, A and Mitchell, DJ and Wild, CJ and Auer, T and Linke, AC and Peelle, JE}, title = {Automatic analysis (aa): efficient neuroimaging workflows and parallel processing using Matlab and XML.}, journal = {Frontiers in neuroinformatics}, volume = {8}, number = {}, pages = {90}, pmid = {25642185}, issn = {1662-5196}, support = {MC_U105579212/MRC_/Medical Research Council/United Kingdom ; MC_U105592690/MRC_/Medical Research Council/United Kingdom ; R01 AG038490/AG/NIA NIH HHS/United States ; R01 DC013063/DC/NIDCD NIH HHS/United States ; }, abstract = {Recent years have seen neuroimaging data sets becoming richer, with larger cohorts of participants, a greater variety of acquisition techniques, and increasingly complex analyses. These advances have made data analysis pipelines complicated to set up and run (increasing the risk of human error) and time consuming to execute (restricting what analyses are attempted). Here we present an open-source framework, automatic analysis (aa), to address these concerns. Human efficiency is increased by making code modular and reusable, and managing its execution with a processing engine that tracks what has been completed and what needs to be (re)done. Analysis is accelerated by optional parallel processing of independent tasks on cluster or cloud computing resources. A pipeline comprises a series of modules that each perform a specific task. The processing engine keeps track of the data, calculating a map of upstream and downstream dependencies for each module. Existing modules are available for many analysis tasks, such as SPM-based fMRI preprocessing, individual and group level statistics, voxel-based morphometry, tractography, and multi-voxel pattern analyses (MVPA). However, aa also allows for full customization, and encourages efficient management of code: new modules may be written with only a small code overhead. aa has been used by more than 50 researchers in hundreds of neuroimaging studies comprising thousands of subjects. It has been found to be robust, fast, and efficient, for simple-single subject studies up to multimodal pipelines on hundreds of subjects. It is attractive to both novice and experienced users. aa can reduce the amount of time neuroimaging laboratories spend performing analyses and reduce errors, expanding the range of scientific questions it is practical to address.}, } @article {pmid25637560, year = {2015}, author = {Fisch, KM and Meißner, T and Gioia, L and Ducom, JC and Carland, TM and Loguercio, S and Su, AI}, title = {Omics Pipe: a community-based framework for reproducible multi-omics data analysis.}, journal = {Bioinformatics (Oxford, England)}, volume = {31}, number = {11}, pages = {1724-1728}, pmid = {25637560}, issn = {1367-4811}, support = {DA030976/DA/NIDA NIH HHS/United States ; UL1TR001114/TR/NCATS NIH HHS/United States ; T32 AA013525/AA/NIAAA NIH HHS/United States ; AA013525/AA/NIAAA NIH HHS/United States ; AA007456/AA/NIAAA NIH HHS/United States ; CA92577/CA/NCI NIH HHS/United States ; }, mesh = {Breast Neoplasms/genetics ; Cluster Analysis ; Databases, Factual ; Exome ; Female ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Reproducibility of Results ; Sequence Analysis, RNA ; *Software ; }, abstract = {MOTIVATION: Omics Pipe (http://sulab.scripps.edu/omicspipe) is a computational framework that automates multi-omics data analysis pipelines on high performance compute clusters and in the cloud. It supports best practice published pipelines for RNA-seq, miRNA-seq, Exome-seq, Whole-Genome sequencing, ChIP-seq analyses and automatic processing of data from The Cancer Genome Atlas (TCGA). Omics Pipe provides researchers with a tool for reproducible, open source and extensible next generation sequencing analysis. The goal of Omics Pipe is to democratize next-generation sequencing analysis by dramatically increasing the accessibility and reproducibility of best practice computational pipelines, which will enable researchers to generate biologically meaningful and interpretable results.

RESULTS: Using Omics Pipe, we analyzed 100 TCGA breast invasive carcinoma paired tumor-normal datasets based on the latest UCSC hg19 RefSeq annotation. Omics Pipe automatically downloaded and processed the desired TCGA samples on a high throughput compute cluster to produce a results report for each sample. We aggregated the individual sample results and compared them to the analysis in the original publications. This comparison revealed high overlap between the analyses, as well as novel findings due to the use of updated annotations and methods.

Source code for Omics Pipe is freely available on the web (https://bitbucket.org/sulab/omics_pipe). Omics Pipe is distributed as a standalone Python package for installation (https://pypi.python.org/pypi/omics_pipe) and as an Amazon Machine Image in Amazon Web Services Elastic Compute Cloud that contains all necessary third-party software dependencies and databases (https://pythonhosted.org/omics_pipe/AWS_installation.html).}, } @article {pmid25634700, year = {2015}, author = {Guo, L and Yau, WC}, title = {Efficient secure-channel free public key encryption with keyword search for EMRs in cloud storage.}, journal = {Journal of medical systems}, volume = {39}, number = {2}, pages = {11}, pmid = {25634700}, issn = {1573-689X}, mesh = {Algorithms ; *Cloud Computing ; Computer Security/*instrumentation ; Confidentiality ; Electronic Health Records/*organization & administration ; Humans ; Information Storage and Retrieval/*methods ; }, abstract = {Searchable encryption is an important cryptographic primitive that enables privacy-preserving keyword search on encrypted electronic medical records (EMRs) in cloud storage. Efficiency of such searchable encryption in a medical cloud storage system is very crucial as it involves client platforms such as smartphones or tablets that only have constrained computing power and resources. In this paper, we propose an efficient secure-channel free public key encryption with keyword search (SCF-PEKS) scheme that is proven secure in the standard model. We show that our SCF-PEKS scheme is not only secure against chosen keyword and ciphertext attacks (IND-SCF-CKCA), but also secure against keyword guessing attacks (IND-KGA). Furthermore, our proposed scheme is more efficient than other recent SCF-PEKS schemes in the literature.}, } @article {pmid25631240, year = {2015}, author = {Deutsch, EW and Mendoza, L and Shteynberg, D and Slagel, J and Sun, Z and Moritz, RL}, title = {Trans-Proteomic Pipeline, a standardized data processing pipeline for large-scale reproducible proteomics informatics.}, journal = {Proteomics. Clinical applications}, volume = {9}, number = {7-8}, pages = {745-754}, pmid = {25631240}, issn = {1862-8354}, support = {RC2 HG005805/HG/NHGRI NIH HHS/United States ; U54 EB020406/EB/NIBIB NIH HHS/United States ; 0923536//Medical Research Council/United Kingdom ; R01 GM087221/GM/NIGMS NIH HHS/United States ; 2P50 GM076547/GM/NIGMS NIH HHS/United States ; P50 GM076547/GM/NIGMS NIH HHS/United States ; U54EB020406/EB/NIBIB NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; Humans ; Proteome/metabolism ; Proteomics/*methods ; Reproducibility of Results ; Software ; *Statistics as Topic ; }, abstract = {Democratization of genomics technologies has enabled the rapid determination of genotypes. More recently the democratization of comprehensive proteomics technologies is enabling the determination of the cellular phenotype and the molecular events that define its dynamic state. Core proteomic technologies include MS to define protein sequence, protein:protein interactions, and protein PTMs. Key enabling technologies for proteomics are bioinformatic pipelines to identify, quantitate, and summarize these events. The Trans-Proteomics Pipeline (TPP) is a robust open-source standardized data processing pipeline for large-scale reproducible quantitative MS proteomics. It supports all major operating systems and instrument vendors via open data formats. Here, we provide a review of the overall proteomics workflow supported by the TPP, its major tools, and how it can be used in its various modes from desktop to cloud computing. We describe new features for the TPP, including data visualization functionality. We conclude by describing some common perils that affect the analysis of MS/MS datasets, as well as some major upcoming features.}, } @article {pmid25625550, year = {2015}, author = {O'Driscoll, A and Belogrudov, V and Carroll, J and Kropp, K and Walsh, P and Ghazal, P and Sleator, RD}, title = {HBLAST: Parallelised sequence similarity--A Hadoop MapReducable basic local alignment search tool.}, journal = {Journal of biomedical informatics}, volume = {54}, number = {}, pages = {58-64}, doi = {10.1016/j.jbi.2015.01.008}, pmid = {25625550}, issn = {1532-0480}, mesh = {Algorithms ; Computing Methodologies ; Databases, Factual ; *Databases, Genetic ; Genetic Techniques ; Genomics/*methods ; Humans ; Internet ; Sequence Alignment/*methods ; *Software ; }, abstract = {The recent exponential growth of genomic databases has resulted in the common task of sequence alignment becoming one of the major bottlenecks in the field of computational biology. It is typical for these large datasets and complex computations to require cost prohibitive High Performance Computing (HPC) to function. As such, parallelised solutions have been proposed but many exhibit scalability limitations and are incapable of effectively processing "Big Data" - the name attributed to datasets that are extremely large, complex and require rapid processing. The Hadoop framework, comprised of distributed storage and a parallelised programming framework known as MapReduce, is specifically designed to work with such datasets but it is not trivial to efficiently redesign and implement bioinformatics algorithms according to this paradigm. The parallelisation strategy of "divide and conquer" for alignment algorithms can be applied to both data sets and input query sequences. However, scalability is still an issue due to memory constraints or large databases, with very large database segmentation leading to additional performance decline. Herein, we present Hadoop Blast (HBlast), a parallelised BLAST algorithm that proposes a flexible method to partition both databases and input query sequences using "virtual partitioning". HBlast presents improved scalability over existing solutions and well balanced computational work load while keeping database segmentation and recompilation to a minimum. Enhanced BLAST search performance on cheap memory constrained hardware has significant implications for in field clinical diagnostic testing; enabling faster and more accurate identification of pathogenic DNA in human blood or tissue samples.}, } @article {pmid25622335, year = {2015}, author = {Yue, K and Fang, Q and Wang, X and Li, J and Liu, W}, title = {A Parallel and Incremental Approach for Data-Intensive Learning of Bayesian Networks.}, journal = {IEEE transactions on cybernetics}, volume = {45}, number = {12}, pages = {2890-2904}, doi = {10.1109/TCYB.2015.2388791}, pmid = {25622335}, issn = {2168-2275}, abstract = {Bayesian network (BN) has been adopted as the underlying model for representing and inferring uncertain knowledge. As the basis of realistic applications centered on probabilistic inferences, learning a BN from data is a critical subject of machine learning, artificial intelligence, and big data paradigms. Currently, it is necessary to extend the classical methods for learning BNs with respect to data-intensive computing or in cloud environments. In this paper, we propose a parallel and incremental approach for data-intensive learning of BNs from massive, distributed, and dynamically changing data by extending the classical scoring and search algorithm and using MapReduce. First, we adopt the minimum description length as the scoring metric and give the two-pass MapReduce-based algorithms for computing the required marginal probabilities and scoring the candidate graphical model from sample data. Then, we give the corresponding strategy for extending the classical hill-climbing algorithm to obtain the optimal structure, as well as that for storing a BN by pairs. Further, in view of the dynamic characteristics of the changing data, we give the concept of influence degree to measure the coincidence of the current BN with new data, and then propose the corresponding two-pass MapReduce-based algorithms for BNs incremental learning. Experimental results show the efficiency, scalability, and effectiveness of our methods.}, } @article {pmid25611102, year = {2015}, author = {Yohe, S and Hauge, A and Bunjer, K and Kemmer, T and Bower, M and Schomaker, M and Onsongo, G and Wilson, J and Erdmann, J and Zhou, Y and Deshpande, A and Spears, MD and Beckman, K and Silverstein, KA and Thyagarajan, B}, title = {Clinical validation of targeted next-generation sequencing for inherited disorders.}, journal = {Archives of pathology & laboratory medicine}, volume = {139}, number = {2}, pages = {204-210}, doi = {10.5858/arpa.2013-0625-OA}, pmid = {25611102}, issn = {1543-2165}, mesh = {Computational Biology ; DNA Copy Number Variations ; DNA Mutational Analysis ; Exons/genetics ; Feasibility Studies ; Genetic Diseases, Inborn/*diagnosis/genetics ; Genetic Predisposition to Disease ; Genetic Testing/*methods/standards ; Genetic Variation ; Genotype ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Mutation ; Rare Diseases/*diagnosis/genetics ; Sequence Analysis, DNA ; }, abstract = {CONTEXT: Although next-generation sequencing (NGS) can revolutionize molecular diagnostics, several hurdles remain in the implementation of this technology in clinical laboratories.

OBJECTIVES: To validate and implement an NGS panel for genetic diagnosis of more than 100 inherited diseases, such as neurologic conditions, congenital hearing loss and eye disorders, developmental disorders, nonmalignant diseases treated by hematopoietic cell transplantation, familial cancers, connective tissue disorders, metabolic disorders, disorders of sexual development, and cardiac disorders. The diagnostic gene panels ranged from 1 to 54 genes with most of panels containing 10 genes or fewer.

DESIGN: We used a liquid hybridization-based, target-enrichment strategy to enrich 10 067 exons in 568 genes, followed by NGS with a HiSeq 2000 sequencing system (Illumina, San Diego, California).

RESULTS: We successfully sequenced 97.6% (9825 of 10 067) of the targeted exons to obtain a minimum coverage of 20× at all bases. We demonstrated 100% concordance in detecting 19 pathogenic single-nucleotide variations and 11 pathogenic insertion-deletion mutations ranging in size from 1 to 18 base pairs across 18 samples that were previously characterized by Sanger sequencing. Using 4 pairs of blinded, duplicate samples, we demonstrated a high degree of concordance (>99%) among the blinded, duplicate pairs.

CONCLUSIONS: We have successfully demonstrated the feasibility of using the NGS platform to multiplex genetic tests for several rare diseases and the use of cloud computing for bioinformatics analysis as a relatively low-cost solution for implementing NGS in clinical laboratories.}, } @article {pmid25608737, year = {2015}, author = {Lawrenz, M and Shukla, D and Pande, VS}, title = {Cloud computing approaches for prediction of ligand binding poses and pathways.}, journal = {Scientific reports}, volume = {5}, number = {}, pages = {7918}, pmid = {25608737}, issn = {2045-2322}, support = {R15 GM062828/GM/NIGMS NIH HHS/United States ; U54 GM072970/GM/NIGMS NIH HHS/United States ; R01GM62828/GM/NIGMS NIH HHS/United States ; }, mesh = {Binding Sites ; *Cloud Computing ; Crystallography, X-Ray ; *Ligands ; Molecular Dynamics Simulation ; Protein Binding ; *Protein Engineering ; Tacrolimus Binding Protein 1A/*chemistry ; Thermodynamics ; }, abstract = {We describe an innovative protocol for ab initio prediction of ligand crystallographic binding poses and highly effective analysis of large datasets generated for protein-ligand dynamics. We include a procedure for setup and performance of distributed molecular dynamics simulations on cloud computing architectures, a model for efficient analysis of simulation data, and a metric for evaluation of model convergence. We give accurate binding pose predictions for five ligands ranging in affinity from 7 nM to > 200 μM for the immunophilin protein FKBP12, for expedited results in cases where experimental structures are difficult to produce. Our approach goes beyond single, low energy ligand poses to give quantitative kinetic information that can inform protein engineering and ligand design.}, } @article {pmid25589906, year = {2014}, author = {Haux, R and Lehmann, CU}, title = {From bed to bench: bridging from informatics practice to theory: an exploratory analysis.}, journal = {Applied clinical informatics}, volume = {5}, number = {4}, pages = {907-915}, pmid = {25589906}, issn = {1869-0327}, mesh = {Medical Informatics/*statistics & numerical data ; Publications/*statistics & numerical data ; }, abstract = {BACKGROUND: In 2009, Applied Clinical Informatics (ACI)--focused on applications in clinical informatics--was launched as a companion journal to Methods of Information in Medicine (MIM). Both journals are official journals of the International Medical Informatics Association.

OBJECTIVES: To explore which congruencies and interdependencies exist in publications from theory to practice and from practice to theory and to determine existing gaps. Major topics discussed in ACI and MIM were analyzed. We explored if the intention of publishing companion journals to provide an information bridge from informatics theory to informatics practice and vice versa could be supported by this model. In this manuscript we will report on congruencies and interdependences from practice to theory and on major topics in MIM.

METHODS: Retrospective, prolective observational study on recent publications of ACI and MIM. All publications of the years 2012 and 2013 were indexed and analyzed.

RESULTS: Hundred and ninety-six publications were analyzed (ACI 87, MIM 109). In MIM publications, modelling aspects as well as methodological and evaluation approaches for the analysis of data, information, and knowledge in biomedicine and health care were frequently raised - and often discussed from an interdisciplinary point of view. Important themes were ambient-assisted living, anatomic spatial relations, biomedical informatics as scientific discipline, boosting, coding, computerized physician order entry, data analysis, grid and cloud computing, health care systems and services, health-enabling technologies, health information search, health information systems, imaging, knowledge-based decision support, patient records, signal analysis, and web science. Congruencies between journals could be found in themes, but with a different focus on content. Interdependencies from practice to theory, found in these publications, were only limited.

CONCLUSIONS: Bridging from informatics theory to practice and vice versa remains a major component of successful research and practice as well as a major challenge.}, } @article {pmid25580472, year = {2014}, author = {Ko, KD and El-Ghazawi, T and Kim, D and Morizono, H and , }, title = {Predicting the severity of motor neuron disease progression using electronic health record data with a cloud computing Big Data approach.}, journal = {IEEE Symposium on Computational Intelligence in Bioinformatics and Computational Biology proceedings. IEEE Symposium on Computational Intelligence in Bioinformatics and Computational Biology}, volume = {2014}, number = {}, pages = {}, pmid = {25580472}, support = {UL1 TR000075/TR/NCATS NIH HHS/United States ; }, abstract = {Motor neuron diseases (MNDs) are a class of progressive neurological diseases that damage the motor neurons. An accurate diagnosis is important for the treatment of patients with MNDs because there is no standard cure for the MNDs. However, the rates of false positive and false negative diagnoses are still very high in this class of diseases. In the case of Amyotrophic Lateral Sclerosis (ALS), current estimates indicate 10% of diagnoses are false-positives, while 44% appear to be false negatives. In this study, we developed a new methodology to profile specific medical information from patient medical records for predicting the progression of motor neuron diseases. We implemented a system using Hbase and the Random forest classifier of Apache Mahout to profile medical records provided by the Pooled Resource Open-Access ALS Clinical Trials Database (PRO-ACT) site, and we achieved 66% accuracy in the prediction of ALS progress.}, } @article {pmid25571105, year = {2014}, author = {Takeuchi, H and Kodama, N}, title = {Validity of association rules extracted by healthcare-data-mining.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2014}, number = {}, pages = {4960-4963}, doi = {10.1109/EMBC.2014.6944737}, pmid = {25571105}, issn = {2694-0604}, mesh = {Algorithms ; Blood Pressure/physiology ; Cell Phone ; Cloud Computing ; *Data Mining ; *Delivery of Health Care ; Energy Metabolism ; Female ; Humans ; Life Style ; Male ; Smoking ; Young Adult ; }, abstract = {A personal healthcare system used with cloud computing has been developed. It enables a daily time-series of personal health and lifestyle data to be stored in the cloud through mobile devices. The cloud automatically extracts personally useful information, such as rules and patterns concerning the user's lifestyle and health condition embedded in their personal big data, by using healthcare-data-mining. This study has verified that the extracted rules on the basis of a daily time-series data stored during a half- year by volunteer users of this system are valid.}, } @article {pmid25570786, year = {2014}, author = {Fahim, M and Lee, S and Yoon, Y}, title = {SUPAR: Smartphone as a ubiquitous physical activity recognizer for u-healthcare services.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2014}, number = {}, pages = {3666-3669}, doi = {10.1109/EMBC.2014.6944418}, pmid = {25570786}, issn = {2694-0604}, mesh = {Accelerometry/instrumentation/methods ; Algorithms ; Bayes Theorem ; *Cell Phone ; Cloud Computing ; Delivery of Health Care/*methods ; *Exercise ; Humans ; Internet ; Software ; Support Vector Machine ; }, abstract = {Current generation smartphone can be seen as one of the most ubiquitous device for physical activity recognition. In this paper we proposed a physical activity recognizer to provide u-healthcare services in a cost effective manner by utilizing cloud computing infrastructure. Our model is comprised on embedded triaxial accelerometer of the smartphone to sense the body movements and a cloud server to store and process the sensory data for numerous kind of services. We compute the time and frequency domain features over the raw signals and evaluate different machine learning algorithms to identify an accurate activity recognition model for four kinds of physical activities (i.e., walking, running, cycling and hopping). During our experiments we found Support Vector Machine (SVM) algorithm outperforms for the aforementioned physical activities as compared to its counterparts. Furthermore, we also explain how smartphone application and cloud server communicate with each other.}, } @article {pmid25570784, year = {2014}, author = {Chung, WY and Fong, EM}, title = {Seamless personal health information system in cloud computing.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2014}, number = {}, pages = {3658-3661}, doi = {10.1109/EMBC.2014.6944416}, pmid = {25570784}, issn = {2694-0604}, mesh = {Adult ; Body Mass Index ; *Cell Phone ; *Cloud Computing ; Delivery of Health Care/*methods ; Electrocardiography ; Female ; *Health Information Systems/instrumentation ; Health Status ; Humans ; Internet ; Location Directories and Signs ; Male ; Medication Adherence ; Middle Aged ; Monitoring, Physiologic ; Wireless Technology ; }, abstract = {Noncontact ECG measurement has gained popularity these days due to its noninvasive and conveniences to be applied on daily life. This approach does not require any direct contact between patient's skin and sensor for physiological signal measurement. The noncontact ECG measurement is integrated with mobile healthcare system for health status monitoring. Mobile phone acts as the personal health information system displaying health status and body mass index (BMI) tracking. Besides that, it plays an important role being the medical guidance providing medical knowledge database including symptom checker and health fitness guidance. At the same time, the system also features some unique medical functions that cater to the living demand of the patients or users, including regular medication reminders, alert alarm, medical guidance, appointment scheduling. Lastly, we demonstrate mobile healthcare system with web application for extended uses, thus health data are clouded into web server system and web database storage. This allows remote health status monitoring easily and so forth it promotes a cost effective personal healthcare system.}, } @article {pmid25570666, year = {2014}, author = {Chouvarda, I and Philip, NY and Natsiavas, P and Kilintzis, V and Sobnath, D and Kayyali, R and Henriques, J and Paiva, RP and Raptopoulos, A and Chételat, O and Maglaveras, N}, title = {WELCOME – innovative integrated care platform using wearable sensing and smart cloud computing for COPD patients with comorbidities.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2014}, number = {}, pages = {3180-3183}, doi = {10.1109/EMBC.2014.6944298}, pmid = {25570666}, issn = {2694-0604}, mesh = {Algorithms ; Anxiety/complications ; Clothing ; Comorbidity ; Depression/complications ; Diabetes Complications/diagnosis ; Diabetes Mellitus ; Disease Management ; Europe ; Expert Systems ; Health Services ; Heart Failure/complications ; Humans ; Monitoring, Physiologic/*instrumentation/methods ; Pulmonary Disease, Chronic Obstructive/*diagnosis/*physiopathology ; Software ; User-Computer Interface ; }, abstract = {We propose WELCOME, an innovative integrated care platform using wearable sensors and smart cloud computing for Chronic Obstructive Pulmonary Disease (COPD) patients with co-morbidities. WELCOME aims to bring about a change in the reactive nature of the management of chronic diseases and its comorbidities, in particular through the development of a patient centred and proactive approach to COPD management. The aim of WELCOME is to support healthcare services to give early detection of complications (potentially reducing hospitalisations) and the prevention and mitigation of comorbidities (Heart Failure, Diabetes, Anxiety and Depression). The system incorporates patient hub, where it interacts with the patient via a light vest including a large number of non-invasive chest sensors for monitoring various relevant parameters. In addition, interactive applications to monitor and manage diabetes, anxiety and lifestyle issues will be provided to the patient. Informal carers will also be supported in dealing with their patients. On the other hand, welcome smart cloud platform is the heart of the proposed system where all the medical records and the monitoring data are managed and processed via the decision support system. Healthcare professionals will be able to securely access the WELCOME applications to monitor and manage the patient's conditions and respond to alerts on personalized level.}, } @article {pmid25570336, year = {2014}, author = {Harvey, B and Soo-Yeon Ji, }, title = {Cloud-scale genomic signals processing classification analysis for gene expression microarray data.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2014}, number = {}, pages = {1843-1846}, doi = {10.1109/EMBC.2014.6943968}, pmid = {25570336}, issn = {2694-0604}, mesh = {*Cloud Computing ; Computers ; DNA/metabolism ; Databases, Genetic ; Gene Expression Profiling/*methods ; *Gene Expression Regulation, Neoplastic ; *Genomics ; Humans ; Neoplasms/genetics/*metabolism ; Oligonucleotide Array Sequence Analysis/*methods ; RNA, Messenger/metabolism ; Signal Processing, Computer-Assisted ; Wavelet Analysis ; }, abstract = {As microarray data available to scientists continues to increase in size and complexity, it has become overwhelmingly important to find multiple ways to bring inference though analysis of DNA/mRNA sequence data that is useful to scientists. Though there have been many attempts to elucidate the issue of bringing forth biological inference by means of wavelet preprocessing and classification, there has not been a research effort that focuses on a cloud-scale classification analysis of microarray data using Wavelet thresholding in a Cloud environment to identify significantly expressed features. This paper proposes a novel methodology that uses Wavelet based Denoising to initialize a threshold for determination of significantly expressed genes for classification. Additionally, this research was implemented and encompassed within cloud-based distributed processing environment. The utilization of Cloud computing and Wavelet thresholding was used for the classification 14 tumor classes from the Global Cancer Map (GCM). The results proved to be more accurate than using a predefined p-value for differential expression classification. This novel methodology analyzed Wavelet based threshold features of gene expression in a Cloud environment, furthermore classifying the expression of samples by analyzing gene patterns, which inform us of biological processes. Moreover, enabling researchers to face the present and forthcoming challenges that may arise in the analysis of data in functional genomics of large microarray datasets.}, } @article {pmid25570014, year = {2014}, author = {Neves Tafula, SM and Moreira da Silva, N and Rozanski, VE and Silva Cunha, JP}, title = {ABrIL - Advanced Brain Imaging Lab : a cloud based computation environment for cooperative neuroimaging projects.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2014}, number = {}, pages = {534-537}, doi = {10.1109/EMBC.2014.6943646}, pmid = {25570014}, issn = {2694-0604}, mesh = {Brain/anatomy & histology/physiology ; Brain Mapping ; Cloud Computing ; Diffusion Tensor Imaging ; Humans ; *Neuroimaging ; *User-Computer Interface ; }, abstract = {Neuroscience is an increasingly multidisciplinary and highly cooperative field where neuroimaging plays an important role. Neuroimaging rapid evolution is demanding for a growing number of computing resources and skills that need to be put in place at every lab. Typically each group tries to setup their own servers and workstations to support their neuroimaging needs, having to learn from Operating System management to specific neuroscience software tools details before any results can be obtained from each setup. This setup and learning process is replicated in every lab, even if a strong collaboration among several groups is going on. In this paper we present a new cloud service model - Brain Imaging Application as a Service (BiAaaS) - and one of its implementation - Advanced Brain Imaging Lab (ABrIL) - in the form of an ubiquitous virtual desktop remote infrastructure that offers a set of neuroimaging computational services in an interactive neuroscientist-friendly graphical user interface (GUI). This remote desktop has been used for several multi-institution cooperative projects with different neuroscience objectives that already achieved important results, such as the contribution to a high impact paper published in the January issue of the Neuroimage journal. The ABrIL system has shown its applicability in several neuroscience projects with a relatively low-cost, promoting truly collaborative actions and speeding up project results and their clinical applicability.}, } @article {pmid25569088, year = {2015}, author = {Hung, CL and Chen, WP and Hua, GJ and Zheng, H and Tsai, SJ and Lin, YL}, title = {Cloud computing-based TagSNP selection algorithm for human genome data.}, journal = {International journal of molecular sciences}, volume = {16}, number = {1}, pages = {1096-1110}, pmid = {25569088}, issn = {1422-0067}, mesh = {*Algorithms ; *Computational Biology ; *Genome, Human ; Genome-Wide Association Study ; Haplotypes ; Humans ; Linkage Disequilibrium ; Polymorphism, Single Nucleotide ; }, abstract = {Single nucleotide polymorphisms (SNPs) play a fundamental role in human genetic variation and are used in medical diagnostics, phylogeny construction, and drug design. They provide the highest-resolution genetic fingerprint for identifying disease associations and human features. Haplotypes are regions of linked genetic variants that are closely spaced on the genome and tend to be inherited together. Genetics research has revealed SNPs within certain haplotype blocks that introduce few distinct common haplotypes into most of the population. Haplotype block structures are used in association-based methods to map disease genes. In this paper, we propose an efficient algorithm for identifying haplotype blocks in the genome. In chromosomal haplotype data retrieved from the HapMap project website, the proposed algorithm identified longer haplotype blocks than an existing algorithm. To enhance its performance, we extended the proposed algorithm into a parallel algorithm that copies data in parallel via the Hadoop MapReduce framework. The proposed MapReduce-paralleled combinatorial algorithm performed well on real-world data obtained from the HapMap dataset; the improvement in computational efficiency was proportional to the number of processors used.}, } @article {pmid25563475, year = {2015}, author = {Zelenyuk, A and Imre, D and Wilson, J and Zhang, Z and Wang, J and Mueller, K}, title = {Airborne single particle mass spectrometers (SPLAT II & miniSPLAT) and new software for data visualization and analysis in a geo-spatial context.}, journal = {Journal of the American Society for Mass Spectrometry}, volume = {26}, number = {2}, pages = {257-270}, pmid = {25563475}, issn = {1879-1123}, abstract = {Understanding the effect of aerosols on climate requires knowledge of the size and chemical composition of individual aerosol particles-two fundamental properties that determine an aerosol's optical properties and ability to serve as cloud condensation or ice nuclei. Here we present our aircraft-compatible single particle mass spectrometers, SPLAT II and its new, miniaturized version, miniSPLAT that measure in-situ and in real-time the size and chemical composition of individual aerosol particles with extremely high sensitivity, temporal resolution, and sizing precision on the order of a monolayer. Although miniSPLAT's size, weight, and power consumption are significantly smaller, its performance is on par with SPLAT II. Both instruments operate in dual data acquisition mode to measure, in addition to single particle size and composition, particle number concentrations, size distributions, density, and asphericity with high temporal resolution. We also present ND-Scope, our newly developed interactive visual analytics software package. ND-Scope is designed to explore and visualize the vast amount of complex, multidimensional data acquired by our single particle mass spectrometers, along with other aerosol and cloud characterization instruments on-board aircraft. We demonstrate that ND-Scope makes it possible to visualize the relationships between different observables and to view the data in a geo-spatial context, using the interactive and fully coupled Google Earth and Parallel Coordinates displays. Here we illustrate the utility of ND-Scope to visualize the spatial distribution of atmospheric particles of different compositions, and explore the relationship between individual particle compositions and their activity as cloud condensation nuclei.}, } @article {pmid25559934, year = {2015}, author = {Hussain, M and Ali, T and Khan, WA and Afzal, M and Lee, S and Latif, K}, title = {Recommendations service for chronic disease patient in multimodel sensors home environment.}, journal = {Telemedicine journal and e-health : the official journal of the American Telemedicine Association}, volume = {21}, number = {3}, pages = {185-199}, pmid = {25559934}, issn = {1556-3669}, mesh = {Aged ; Aged, 80 and over ; Caregivers/statistics & numerical data ; Chronic Disease ; Cloud Computing/statistics & numerical data ; Decision Support Systems, Clinical/*instrumentation ; Dementia/diagnosis/*rehabilitation ; Female ; Geriatric Assessment/methods ; Home Care Services/organization & administration ; Home Nursing/methods ; Humans ; Male ; *Outcome Assessment, Health Care ; Patient Safety ; *Practice Guidelines as Topic ; Republic of Korea ; Telerehabilitation/*instrumentation/methods ; }, abstract = {With advanced technologies in hand, there exist potential applications and services built around monitoring activities of daily living (ADL) of elderly people at nursing homes. Most of the elderly people in these facilities are suffering from different chronic diseases such as dementia. Existing technologies are mainly focusing on non-medication interventions and monitoring of ADL for addressing loss of autonomy or well-being. Monitoring and managing ADL related to cognitive behaviors for non-medication intervention are very effective in improving dementia patients' conditions. However, cognitive functions of patients can be improved if appropriate recommendations of medications are delivered at a particular time. Previously we developed the Secured Wireless Sensor Network Integrated Cloud Computing for Ubiquitous-Life Care (SC(3)). SC(3) services were limited to monitoring ADL of elderly people with Alzheimer's disease and providing non-medication recommendations to the patient. In this article, we propose a system called the Smart Clinical Decision Support System (CDSS) as an integral part of the SC(3) platform. Using the Smart CDSS, patients are provided with access to medication recommendations of expert physicians. Physicians are provided with an interface to create clinical knowledge for medication recommendations and to observe the patient's condition. The clinical knowledge created by physicians as the knowledge base of the Smart CDSS produces recommendations to the caregiver for medications based on each patient's symptoms.}, } @article {pmid25493610, year = {2015}, author = {Moghadam, BT and Alvarsson, J and Holm, M and Eklund, M and Carlsson, L and Spjuth, O}, title = {Scaling predictive modeling in drug development with cloud computing.}, journal = {Journal of chemical information and modeling}, volume = {55}, number = {1}, pages = {19-25}, doi = {10.1021/ci500580y}, pmid = {25493610}, issn = {1549-960X}, mesh = {Computational Biology/*methods ; *Computing Methodologies ; *Databases, Chemical ; Databases, Factual ; Drug Discovery/*methods ; Internet ; Ligands ; *Quantitative Structure-Activity Relationship ; Software ; }, abstract = {Growing data sets with increased time for analysis is hampering predictive modeling in drug discovery. Model building can be carried out on high-performance computer clusters, but these can be expensive to purchase and maintain. We have evaluated ligand-based modeling on cloud computing resources where computations are parallelized and run on the Amazon Elastic Cloud. We trained models on open data sets of varying sizes for the end points logP and Ames mutagenicity and compare with model building parallelized on a traditional high-performance computing cluster. We show that while high-performance computing results in faster model building, the use of cloud computing resources is feasible for large data sets and scales well within cloud instances. An additional advantage of cloud computing is that the costs of predictive models can be easily quantified, and a choice can be made between speed and economy. The easy access to computational resources with no up-front investments makes cloud computing an attractive alternative for scientists, especially for those without access to a supercomputer, and our study shows that it enables cost-efficient modeling of large data sets on demand within reasonable time.}, } @article {pmid25488429, year = {2015}, author = {Oubel, E and Bonnard, E and Sueoka-Aragane, N and Kobayashi, N and Charbonnier, C and Yamamichi, J and Mizobe, H and Kimura, S}, title = {Volume-based response evaluation with consensual lesion selection: a pilot study by using cloud solutions and comparison to RECIST 1.1.}, journal = {Academic radiology}, volume = {22}, number = {2}, pages = {217-225}, doi = {10.1016/j.acra.2014.09.008}, pmid = {25488429}, issn = {1878-4046}, mesh = {*Algorithms ; Female ; Humans ; Imaging, Three-Dimensional/*methods ; Internet ; Lung Neoplasms/*diagnostic imaging ; Male ; Middle Aged ; Observer Variation ; Pattern Recognition, Automated/methods ; Pilot Projects ; Radiographic Image Enhancement/methods ; Radiographic Image Interpretation, Computer-Assisted/*methods ; Reproducibility of Results ; *Response Evaluation Criteria in Solid Tumors ; Sensitivity and Specificity ; Software ; Tomography, X-Ray Computed/*methods ; Treatment Outcome ; Tumor Burden ; }, abstract = {RATIONALE AND OBJECTIVES: Lesion volume is considered as a promising alternative to Response Evaluation Criteria in Solid Tumors (RECIST) to make tumor measurements more accurate and consistent, which would enable an earlier detection of temporal changes. In this article, we report the results of a pilot study aiming at evaluating the effects of a consensual lesion selection on volume-based response (VBR) assessments.

MATERIALS AND METHODS: Eleven patients with lung computed tomography scans acquired at three time points were selected from Reference Image Database to Evaluate Response to therapy in lung cancer (RIDER) and proprietary databases. Images were analyzed according to RECIST 1.1 and VBR criteria by three readers working in different geographic locations. Cloud solutions were used to connect readers and carry out a consensus process on the selection of lesions used for computing response. Because there are not currently accepted thresholds for computing VBR, we have applied a set of thresholds based on measurement variability (-35% and +55%). The benefit of this consensus was measured in terms of multiobserver agreement by using Fleiss kappa (κfleiss) and corresponding standard errors (SE).

RESULTS: VBR after consensual selection of target lesions allowed to obtain κfleiss = 0.85 (SE = 0.091), which increases up to 0.95 (SE = 0.092), if an extra consensus on new lesions is added. As a reference, the agreement when applying RECIST without consensus was κfleiss = 0.72 (SE = 0.088). These differences were found to be statistically significant according to a z-test.

CONCLUSIONS: An agreement on the selection of lesions allows reducing the inter-reader variability when computing VBR. Cloud solutions showed to be an interesting and feasible strategy for standardizing response evaluations, reducing variability, and increasing consistency of results in multicenter clinical trials.}, } @article {pmid25480377, year = {2015}, author = {Hwang, YC and Lin, CF and Valladares, O and Malamon, J and Kuksa, PP and Zheng, Q and Gregory, BD and Wang, LS}, title = {HIPPIE: a high-throughput identification pipeline for promoter interacting enhancer elements.}, journal = {Bioinformatics (Oxford, England)}, volume = {31}, number = {8}, pages = {1290-1292}, pmid = {25480377}, issn = {1367-4811}, support = {P30 AG010124/AG/NIA NIH HHS/United States ; R01 GM099962/GM/NIGMS NIH HHS/United States ; R01-GM099962/GM/NIGMS NIH HHS/United States ; U24-AG041689/AG/NIA NIH HHS/United States ; }, mesh = {DNA/*genetics/*metabolism ; Enhancer Elements, Genetic/*genetics ; Humans ; Programming Languages ; Promoter Regions, Genetic/*genetics ; Sequence Analysis, DNA/*methods ; }, abstract = {UNLABELLED: We implemented a high-throughput identification pipeline for promoter interacting enhancer element to streamline the workflow from mapping raw Hi-C reads, identifying DNA-DNA interacting fragments with high confidence and quality control, detecting histone modifications and DNase hypersensitive enrichments in putative enhancer elements, to ultimately extracting possible intra- and inter-chromosomal enhancer-target gene relationships.

This software package is designed to run on high-performance computing clusters with Oracle Grid Engine. The source code is freely available under the MIT license for academic and nonprofit use. The source code and instructions are available at the Wang lab website (http://wanglab.pcbi.upenn.edu/hippie/). It is also provided as an Amazon Machine Image to be used directly on Amazon Cloud with minimal installation.

CONTACT: lswang@mail.med.upenn.edu or bdgregor@sas.upenn.edu

SUPPLEMENTARY INFORMATION: Supplementary Material is available at Bioinformatics online.}, } @article {pmid25478562, year = {2014}, author = {Ladoukakis, E and Kolisis, FN and Chatziioannou, AA}, title = {Integrative workflows for metagenomic analysis.}, journal = {Frontiers in cell and developmental biology}, volume = {2}, number = {}, pages = {70}, pmid = {25478562}, issn = {2296-634X}, abstract = {The rapid evolution of all sequencing technologies, described by the term Next Generation Sequencing (NGS), have revolutionized metagenomic analysis. They constitute a combination of high-throughput analytical protocols, coupled to delicate measuring techniques, in order to potentially discover, properly assemble and map allelic sequences to the correct genomes, achieving particularly high yields for only a fraction of the cost of traditional processes (i.e., Sanger). From a bioinformatic perspective, this boils down to many GB of data being generated from each single sequencing experiment, rendering the management or even the storage, critical bottlenecks with respect to the overall analytical endeavor. The enormous complexity is even more aggravated by the versatility of the processing steps available, represented by the numerous bioinformatic tools that are essential, for each analytical task, in order to fully unveil the genetic content of a metagenomic dataset. These disparate tasks range from simple, nonetheless non-trivial, quality control of raw data to exceptionally complex protein annotation procedures, requesting a high level of expertise for their proper application or the neat implementation of the whole workflow. Furthermore, a bioinformatic analysis of such scale, requires grand computational resources, imposing as the sole realistic solution, the utilization of cloud computing infrastructures. In this review article we discuss different, integrative, bioinformatic solutions available, which address the aforementioned issues, by performing a critical assessment of the available automated pipelines for data management, quality control, and annotation of metagenomic data, embracing various, major sequencing technologies and applications.}, } @article {pmid25475637, year = {2014}, author = {Wilson, CA and Simonyan, V}, title = {FDA's Activities Supporting Regulatory Application of "Next Gen" Sequencing Technologies.}, journal = {PDA journal of pharmaceutical science and technology}, volume = {68}, number = {6}, pages = {626-630}, doi = {10.5731/pdajpst.2014.01024}, pmid = {25475637}, issn = {1948-2124}, mesh = {Cloud Computing/legislation & jurisprudence ; Computational Biology/*legislation & jurisprudence ; Computer Security/legislation & jurisprudence ; Data Mining/legislation & jurisprudence ; Databases, Genetic/*legislation & jurisprudence ; *High-Throughput Nucleotide Sequencing ; Humans ; *Policy Making ; United States ; United States Food and Drug Administration/*legislation & jurisprudence ; Workflow ; }, abstract = {UNLABELLED: Applications of next-generation sequencing (NGS) technologies require availability and access to an information technology (IT) infrastructure and bioinformatics tools for large amounts of data storage and analyses. The U.S. Food and Drug Administration (FDA) anticipates that the use of NGS data to support regulatory submissions will continue to increase as the scientific and clinical communities become more familiar with the technologies and identify more ways to apply these advanced methods to support development and evaluation of new biomedical products. FDA laboratories are conducting research on different NGS platforms and developing the IT infrastructure and bioinformatics tools needed to enable regulatory evaluation of the technologies and the data sponsors will submit. A High-performance Integrated Virtual Environment, or HIVE, has been launched, and development and refinement continues as a collaborative effort between the FDA and George Washington University to provide the tools to support these needs. The use of a highly parallelized environment facilitated by use of distributed cloud storage and computation has resulted in a platform that is both rapid and responsive to changing scientific needs. The FDA plans to further develop in-house capacity in this area, while also supporting engagement by the external community, by sponsoring an open, public workshop to discuss NGS technologies and data formats standardization, and to promote the adoption of interoperability protocols in September 2014.

LAY ABSTRACT: Next-generation sequencing (NGS) technologies are enabling breakthroughs in how the biomedical community is developing and evaluating medical products. One example is the potential application of this method to the detection and identification of microbial contaminants in biologic products. In order for the U.S. Food and Drug Administration (FDA) to be able to evaluate the utility of this technology, we need to have the information technology infrastructure and bioinformatics tools to be able to store and analyze large amounts of data. To address this need, we have developed the High-performance Integrated Virtual Environment, or HIVE. HIVE uses a combination of distributed cloud storage and distributed cloud computations to provide a platform that is both rapid and responsive to support the growing and increasingly diverse scientific and regulatory needs of FDA scientists in their evaluation of NGS in research and ultimately for evaluation of NGS data in regulatory submissions.}, } @article {pmid25420151, year = {2014}, author = {Hussain, S and Bang, JH and Han, M and Ahmed, MI and Amin, MB and Lee, S and Nugent, C and McClean, S and Scotney, B and Parr, G}, title = {Behavior life style analysis for mobile sensory data in cloud computing through MapReduce.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {11}, pages = {22001-22020}, pmid = {25420151}, issn = {1424-8220}, abstract = {Cloud computing has revolutionized healthcare in today's world as it can be seamlessly integrated into a mobile application and sensor devices. The sensory data is then transferred from these devices to the public and private clouds. In this paper, a hybrid and distributed environment is built which is capable of collecting data from the mobile phone application and store it in the cloud. We developed an activity recognition application and transfer the data to the cloud for further processing. Big data technology Hadoop MapReduce is employed to analyze the data and create user timeline of user's activities. These activities are visualized to find useful health analytics and trends. In this paper a big data solution is proposed to analyze the sensory data and give insights into user behavior and lifestyle trends.}, } @article {pmid25419094, year = {2014}, author = {Meng, X and Saunders, MA and Mahoney, MW}, title = {LSRN: A PARALLEL ITERATIVE SOLVER FOR STRONGLY OVER- OR UNDERDETERMINED SYSTEMS.}, journal = {SIAM journal on scientific computing : a publication of the Society for Industrial and Applied Mathematics}, volume = {36}, number = {2}, pages = {C95-C118}, pmid = {25419094}, issn = {1064-8275}, support = {U01 GM102098/GM/NIGMS NIH HHS/United States ; }, abstract = {We describe a parallel iterative least squares solver named LSRN that is based on random normal projection. LSRN computes the min-length solution to min x∈ℝ [n] ‖Ax - b‖2, where A ∈ ℝ [m × n] with m ≫ n or m ≪ n, and where A may be rank-deficient. Tikhonov regularization may also be included. Since A is involved only in matrix-matrix and matrix-vector multiplications, it can be a dense or sparse matrix or a linear operator, and LSRN automatically speeds up when A is sparse or a fast linear operator. The preconditioning phase consists of a random normal projection, which is embarrassingly parallel, and a singular value decomposition of size ⌈γ min(m, n)⌉ × min(m, n), where γ is moderately larger than 1, e.g., γ = 2. We prove that the preconditioned system is well-conditioned, with a strong concentration result on the extreme singular values, and hence that the number of iterations is fully predictable when we apply LSQR or the Chebyshev semi-iterative method. As we demonstrate, the Chebyshev method is particularly efficient for solving large problems on clusters with high communication cost. Numerical results show that on a shared-memory machine, LSRN is very competitive with LAPACK's DGELSD and a fast randomized least squares solver called Blendenpik on large dense problems, and it outperforms the least squares solver from SuiteSparseQR on sparse problems without sparsity patterns that can be exploited to reduce fill-in. Further experiments show that LSRN scales well on an Amazon Elastic Compute Cloud cluster.}, } @article {pmid25418363, year = {2015}, author = {Slagel, J and Mendoza, L and Shteynberg, D and Deutsch, EW and Moritz, RL}, title = {Processing shotgun proteomics data on the Amazon cloud with the trans-proteomic pipeline.}, journal = {Molecular & cellular proteomics : MCP}, volume = {14}, number = {2}, pages = {399-404}, pmid = {25418363}, issn = {1535-9484}, support = {S10RR027584/RR/NCRR NIH HHS/United States ; RC2 HG005805/HG/NHGRI NIH HHS/United States ; U54 EB020406/EB/NIBIB NIH HHS/United States ; R01 GM087221/GM/NIGMS NIH HHS/United States ; 2P50 GM076547/GM/NIGMS NIH HHS/United States ; P50 GM076547/GM/NIGMS NIH HHS/United States ; S10 RR027584/RR/NCRR NIH HHS/United States ; U54EB020406/EB/NIBIB NIH HHS/United States ; }, mesh = {Computers ; *Internet ; Proteomics/*methods ; *Software ; *Statistics as Topic ; User-Computer Interface ; }, abstract = {Cloud computing, where scalable, on-demand compute cycles and storage are available as a service, has the potential to accelerate mass spectrometry-based proteomics research by providing simple, expandable, and affordable large-scale computing to all laboratories regardless of location or information technology expertise. We present new cloud computing functionality for the Trans-Proteomic Pipeline, a free and open-source suite of tools for the processing and analysis of tandem mass spectrometry datasets. Enabled with Amazon Web Services cloud computing, the Trans-Proteomic Pipeline now accesses large scale computing resources, limited only by the available Amazon Web Services infrastructure, for all users. The Trans-Proteomic Pipeline runs in an environment fully hosted on Amazon Web Services, where all software and data reside on cloud resources to tackle large search studies. In addition, it can also be run on a local computer with computationally intensive tasks launched onto the Amazon Elastic Compute Cloud service to greatly decrease analysis times. We describe the new Trans-Proteomic Pipeline cloud service components, compare the relative performance and costs of various Elastic Compute Cloud service instance types, and present on-line tutorials that enable users to learn how to deploy cloud computing technology rapidly with the Trans-Proteomic Pipeline. We provide tools for estimating the necessary computing resources and costs given the scale of a job and demonstrate the use of cloud enabled Trans-Proteomic Pipeline by performing over 1100 tandem mass spectrometry files through four proteomic search engines in 9 h and at a very low cost.}, } @article {pmid25417085, year = {2015}, author = {Fernandez-Llatas, C and Pileggi, SF and Ibañez, G and Valero, Z and Sala, P}, title = {Cloud computing for context-aware enhanced m-Health services.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {1246}, number = {}, pages = {147-155}, doi = {10.1007/978-1-4939-1985-7_10}, pmid = {25417085}, issn = {1940-6029}, mesh = {*Cell Phone ; Electronic Health Records ; Health Records, Personal ; *Health Services ; *Internet ; Telemedicine/*methods ; }, abstract = {m-Health services are increasing its presence in our lives due to the high penetration of new smartphone devices. This new scenario proposes new challenges in terms of information accessibility that require new paradigms which enable the new applications to access the data in a continuous and ubiquitous way, ensuring the privacy required depending on the kind of data accessed. This paper proposes an architecture based on cloud computing paradigms in order to empower new m-Health applications to enrich their results by providing secure access to user data.}, } @article {pmid25417028, year = {2015}, author = {Xing, W and Tsoumakos, D and Ghanem, M}, title = {A cloud-based data network approach for translational cancer research.}, journal = {Advances in experimental medicine and biology}, volume = {820}, number = {}, pages = {229-238}, doi = {10.1007/978-3-319-09012-2_16}, pmid = {25417028}, issn = {0065-2598}, mesh = {Computational Biology/methods/statistics & numerical data ; Data Mining/*methods/statistics & numerical data ; Humans ; Neoplasms/*diagnosis/*therapy ; Reproducibility of Results ; Research/statistics & numerical data ; Translational Research, Biomedical/*methods/statistics & numerical data ; }, abstract = {We develop a new model and associated technology for constructing and managing self-organizing data to support translational cancer research studies. We employ a semantic content network approach to address the challenges of managing cancer research data. Such data is heterogeneous, large, decentralized, growing and continually being updated. Moreover, the data originates from different information sources that may be partially overlapping, creating redundancies as well as contradictions and inconsistencies. Building on the advantages of elasticity of cloud computing, we deploy the cancer data networks on top of the CELAR Cloud platform to enable more effective processing and analysis of Big cancer data.}, } @article {pmid25394535, year = {2015}, author = {Řezanka, T and Matoulková, D and Kolouchová, I and Masák, J and Viden, I and Sigler, K}, title = {Extraction of brewer's yeasts using different methods of cell disruption for practical biodiesel production.}, journal = {Folia microbiologica}, volume = {60}, number = {3}, pages = {225-234}, pmid = {25394535}, issn = {1874-9356}, mesh = {Biofuels/*analysis ; Fatty Acids/chemistry/metabolism ; Fermentation ; Industrial Microbiology/*methods ; Lipids/chemistry ; Nitrogen/metabolism ; Saccharomyces cerevisiae/chemistry/*metabolism ; }, abstract = {The methods of preparation of fatty acids from brewer's yeast and its use in production of biofuels and in different branches of industry are described. Isolation of fatty acids from cell lipids includes cell disintegration (e.g., with liquid nitrogen, KOH, NaOH, petroleum ether, nitrogenous basic compounds, etc.) and subsequent processing of extracted lipids, including analysis of fatty acid and computing of biodiesel properties such as viscosity, density, cloud point, and cetane number. Methyl esters obtained from brewer's waste yeast are well suited for the production of biodiesel. All 49 samples (7 breweries and 7 methods) meet the requirements for biodiesel quality in both the composition of fatty acids and the properties of the biofuel required by the US and EU standards.}, } @article {pmid25377309, year = {2015}, author = {Christoph, J and Griebel, L and Leb, I and Engel, I and Köpcke, F and Toddenroth, D and Prokosch, HU and Laufer, J and Marquardt, K and Sedlmayr, M}, title = {Secure Secondary Use of Clinical Data with Cloud-based NLP Services. Towards a Highly Scalable Research Infrastructure.}, journal = {Methods of information in medicine}, volume = {54}, number = {3}, pages = {276-282}, doi = {10.3414/ME13-01-0133}, pmid = {25377309}, issn = {2511-705X}, mesh = {*Cloud Computing ; Data Mining ; Humans ; Internet ; *Medical Informatics ; *Natural Language Processing ; *Privacy ; Software Design ; }, abstract = {OBJECTIVES: The secondary use of clinical data provides large opportunities for clinical and translational research as well as quality assurance projects. For such purposes, it is necessary to provide a flexible and scalable infrastructure that is compliant with privacy requirements. The major goals of the cloud4health project are to define such an architecture, to implement a technical prototype that fulfills these requirements and to evaluate it with three use cases.

METHODS: The architecture provides components for multiple data provider sites such as hospitals to extract free text as well as structured data from local sources and de-identify such data for further anonymous or pseudonymous processing. Free text documentation is analyzed and transformed into structured information by text-mining services, which are provided within a cloud-computing environment. Thus, newly gained annotations can be integrated along with the already available structured data items and the resulting data sets can be uploaded to a central study portal for further analysis.

RESULTS: Based on the architecture design, a prototype has been implemented and is under evaluation in three clinical use cases. Data from several hundred patients provided by a University Hospital and a private hospital chain have already been processed.

CONCLUSIONS: Cloud4health has shown how existing components for secondary use of structured data can be complemented with text-mining in a privacy compliant manner. The cloud-computing paradigm allows a flexible and dynamically adaptable service provision that facilitates the adoption of services by data providers without own investments in respective hardware resources and software tools.}, } @article {pmid25374542, year = {2014}, author = {Ermakov, S and Forster, P and Pagidala, J and Miladinov, M and Wang, A and Baillie, R and Bartlett, D and Reed, M and Leil, TA}, title = {Virtual Systems Pharmacology (ViSP) software for simulation from mechanistic systems-level models.}, journal = {Frontiers in pharmacology}, volume = {5}, number = {}, pages = {232}, pmid = {25374542}, issn = {1663-9812}, abstract = {Multiple software programs are available for designing and running large scale system-level pharmacology models used in the drug development process. Depending on the problem, scientists may be forced to use several modeling tools that could increase model development time, IT costs and so on. Therefore, it is desirable to have a single platform that allows setting up and running large-scale simulations for the models that have been developed with different modeling tools. We developed a workflow and a software platform in which a model file is compiled into a self-contained executable that is no longer dependent on the software that was used to create the model. At the same time the full model specifics is preserved by presenting all model parameters as input parameters for the executable. This platform was implemented as a model agnostic, therapeutic area agnostic and web-based application with a database back-end that can be used to configure, manage and execute large-scale simulations for multiple models by multiple users. The user interface is designed to be easily configurable to reflect the specifics of the model and the user's particular needs and the back-end database has been implemented to store and manage all aspects of the systems, such as Models, Virtual Patients, User Interface Settings, and Results. The platform can be adapted and deployed on an existing cluster or cloud computing environment. Its use was demonstrated with a metabolic disease systems pharmacology model that simulates the effects of two antidiabetic drugs, metformin and fasiglifam, in type 2 diabetes mellitus patients.}, } @article {pmid25343866, year = {2015}, author = {Tobes, R and Pareja-Tobes, P and Manrique, M and Pareja-Tobes, E and Kovach, E and Alekhin, A and Pareja, E}, title = {Gene calling and bacterial genome annotation with BG7.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {1231}, number = {}, pages = {177-189}, doi = {10.1007/978-1-4939-1720-4_12}, pmid = {25343866}, issn = {1940-6029}, mesh = {Bacteria/genetics ; Base Sequence ; Contig Mapping/*methods ; Electronic Data Processing ; *Genome, Bacterial ; *High-Throughput Nucleotide Sequencing ; Metagenomics ; Molecular Sequence Annotation/*methods/statistics & numerical data ; Molecular Sequence Data ; Sequence Alignment ; Sequence Analysis, DNA/*instrumentation/methods ; *Software ; }, abstract = {New massive sequencing technologies are providing many bacterial genome sequences from diverse taxa but a refined annotation of these genomes is crucial for obtaining scientific findings and new knowledge. Thus, bacterial genome annotation has emerged as a key point to investigate in bacteria. Any efficient tool designed specifically to annotate bacterial genomes sequenced with massively parallel technologies has to consider the specific features of bacterial genomes (absence of introns and scarcity of nonprotein-coding sequence) and of next-generation sequencing (NGS) technologies (presence of errors and not perfectly assembled genomes). These features make it convenient to focus on coding regions and, hence, on protein sequences that are the elements directly related with biological functions. In this chapter we describe how to annotate bacterial genomes with BG7, an open-source tool based on a protein-centered gene calling/annotation paradigm. BG7 is specifically designed for the annotation of bacterial genomes sequenced with NGS. This tool is sequence error tolerant maintaining their capabilities for the annotation of highly fragmented genomes or for annotating mixed sequences coming from several genomes (as those obtained through metagenomics samples). BG7 has been designed with scalability as a requirement, with a computing infrastructure completely based on cloud computing (Amazon Web Services).}, } @article {pmid25342933, year = {2014}, author = {Madduri, RK and Sulakhe, D and Lacinski, L and Liu, B and Rodriguez, A and Chard, K and Dave, UJ and Foster, IT}, title = {Experiences Building Globus Genomics: A Next-Generation Sequencing Analysis Service using Galaxy, Globus, and Amazon Web Services.}, journal = {Concurrency and computation : practice & experience}, volume = {26}, number = {13}, pages = {2266-2279}, pmid = {25342933}, issn = {1532-0626}, support = {R01 LM010132/LM/NLM NIH HHS/United States ; R24 HL085343/HL/NHLBI NIH HHS/United States ; S10 RR029030/RR/NCRR NIH HHS/United States ; }, abstract = {We describe Globus Genomics, a system that we have developed for rapid analysis of large quantities of next-generation sequencing (NGS) genomic data. This system achieves a high degree of end-to-end automation that encompasses every stage of data analysis including initial data retrieval from remote sequencing centers or storage (via the Globus file transfer system); specification, configuration, and reuse of multi-step processing pipelines (via the Galaxy workflow system); creation of custom Amazon Machine Images and on-demand resource acquisition via a specialized elastic provisioner (on Amazon EC2); and efficient scheduling of these pipelines over many processors (via the HTCondor scheduler). The system allows biomedical researchers to perform rapid analysis of large NGS datasets in a fully automated manner, without software installation or a need for any local computing infrastructure. We report performance and cost results for some representative workloads.}, } @article {pmid25340542, year = {2014}, author = {Dickson, ED and Hamby, DM}, title = {Cloud immersion building shielding factors for US residential structures.}, journal = {Journal of radiological protection : official journal of the Society for Radiological Protection}, volume = {34}, number = {4}, pages = {853-871}, doi = {10.1088/0952-4746/34/4/853}, pmid = {25340542}, issn = {1361-6498}, mesh = {Air Pollution, Indoor/prevention & control/*statistics & numerical data ; Air Pollution, Radioactive/*statistics & numerical data ; Computer Simulation ; Construction Materials/*analysis/statistics & numerical data ; Facility Design and Construction/methods ; *Housing ; *Models, Statistical ; Monte Carlo Method ; Radiation Protection/*instrumentation/methods ; Radioactive Fallout/statistics & numerical data ; Risk Assessment ; Scattering, Radiation ; United States ; *Weather ; }, abstract = {This paper presents validated building shielding factors designed for contemporary US housing-stock under an idealized, yet realistic, exposure scenario within a semi-infinite cloud of radioactive material. The building shielding factors are intended for use in emergency planning and level three probabilistic risk assessments for a variety of postulated radiological events in which a realistic assessment is necessary to better understand the potential risks for accident mitigation and emergency response planning. Factors are calculated from detailed computational housing-units models using the general-purpose Monte Carlo N-Particle computational code, MCNP5, and are benchmarked from a series of narrow- and broad-beam measurements analyzing the shielding effectiveness of ten common general-purpose construction materials and ten shielding models representing the primary weather barriers (walls and roofs) of likely US housing-stock. Each model was designed to scale based on common residential construction practices and include, to the extent practical, all structurally significant components important for shielding against ionizing radiation. Calculations were performed for floor-specific locations as well as for computing a weighted-average representative building shielding factor for single- and multi-story detached homes, both with and without basement, as well for single-wide manufactured housing-units.}, } @article {pmid25335902, year = {2016}, author = {Yao, T and Jiang, N and Grana, R and Ling, PM and Glantz, SA}, title = {A content analysis of electronic cigarette manufacturer websites in China.}, journal = {Tobacco control}, volume = {25}, number = {2}, pages = {188-194}, pmid = {25335902}, issn = {1468-3318}, support = {CA-113710/CA/NCI NIH HHS/United States ; R25 CA113710/CA/NCI NIH HHS/United States ; R01 TW09295/TW/FIC NIH HHS/United States ; R01 TW009295/TW/FIC NIH HHS/United States ; T32 CA113710/CA/NCI NIH HHS/United States ; }, mesh = {*Advertising ; Age Factors ; China ; *Commerce/economics ; *Electronic Nicotine Delivery Systems/adverse effects/economics ; Female ; Health Behavior ; Health Knowledge, Attitudes, Practice ; Humans ; *Internet ; Male ; *Manufacturing Industry/economics ; Risk Assessment ; Risk Factors ; Sex Factors ; Smoking/economics ; *Smoking Prevention ; Taste ; Tobacco Use Disorder/economics/psychology/*rehabilitation ; }, abstract = {OBJECTIVE: The goal of this study was to summarise the websites of electronic cigarette (e-cigarette) manufacturers in China and describe how they market their products.

METHODS: From March to April 2013, we used two search keywords 'electronic cigarette' (Dian Zi Xiang Yan in Chinese) and 'manufacturer' (Sheng Chan Chang Jia in Chinese) to search e-cigarette manufacturers in China on Alibaba, an internet-based e-commerce business that covers business-to-business online marketplaces, retail and payment platforms, shopping search engine and data-centric cloud computing services. A total of 18 websites of 12 e-cigarette manufacturers in China were analysed by using a coding guide which includes 14 marketing claims.

RESULTS: Health-related benefits were claimed most frequently (89%), followed by the claims of no secondhand smoke (SHS) exposure (78%), and utility for smoking cessation (67%). A wide variety of flavours, celebrity endorsements and e-cigarettes specifically for women were presented. None of the websites had any age restriction on access, references to government regulation or lawsuits. Instruction on how to use e-cigarettes was on 17% of the websites.

CONCLUSIONS: Better regulation of e-cigarette marketing messages on manufacturers' websites is needed in China. The frequent claims of health benefits, smoking cessation, strategies appealing to youth and women are concerning, especially targeting women. Regulators should prohibit marketing claims of health benefits, no SHS exposure and value for smoking cessation in China until health-related, quality and safety issues have been adequately addressed. To avoid e-cigarette use for initiation to nicotine addiction, messages targeting youth and women should be prohibited.}, } @article {pmid25326627, year = {2015}, author = {Martin, L and Cook, C and Matasci, N and Williams, J and Bastow, R}, title = {Data mining with iPlant: a meeting report from the 2013 GARNet workshop, Data mining with iPlant.}, journal = {Journal of experimental botany}, volume = {66}, number = {1}, pages = {1-6}, doi = {10.1093/jxb/eru402}, pmid = {25326627}, issn = {1460-2431}, support = {BB/G021481/1//Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {Botany/*methods ; *Computational Biology ; *Data Mining ; *Plants ; }, abstract = {High-throughput sequencing technologies have rapidly moved from large international sequencing centres to individual laboratory benchtops. These changes have driven the 'data deluge' of modern biology. Submissions of nucleotide sequences to GenBank, for example, have doubled in size every year since 1982, and individual data sets now frequently reach terabytes in size. While 'big data' present exciting opportunities for scientific discovery, data analysis skills are not part of the typical wet bench biologist's experience. Knowing what to do with data, how to visualize and analyse them, make predictions, and test hypotheses are important barriers to success. Many researchers also lack adequate capacity to store and share these data, creating further bottlenecks to effective collaboration between groups and institutes. The US National Science Foundation-funded iPlant Collaborative was established in 2008 to form part of the data collection and analysis pipeline and help alleviate the bottlenecks associated with the big data challenge in plant science. Leveraging the power of high-performance computing facilities, iPlant provides free-to-use cyberinfrastructure to enable terabytes of data storage, improve analysis, and facilitate collaborations. To help train UK plant science researchers to use the iPlant platform and understand how it can be exploited to further research, GARNet organized a four-day Data mining with iPlant workshop at Warwick University in September 2013. This report provides an overview of the workshop, and highlights the power of the iPlant environment for lowering barriers to using complex bioinformatics resources, furthering discoveries in plant science research and providing a platform for education and outreach programmes.}, } @article {pmid25317275, year = {2014}, author = {Butler, WE and Atai, N and Carter, B and Hochberg, F}, title = {Informatic system for a global tissue-fluid biorepository with a graph theory-oriented graphical user interface.}, journal = {Journal of extracellular vesicles}, volume = {3}, number = {}, pages = {}, pmid = {25317275}, issn = {2001-3078}, support = {P01 CA069246/CA/NCI NIH HHS/United States ; }, abstract = {The Richard Floor Biorepository supports collaborative studies of extracellular vesicles (EVs) found in human fluids and tissue specimens. The current emphasis is on biomarkers for central nervous system neoplasms but its structure may serve as a template for collaborative EV translational studies in other fields. The informatic system provides specimen inventory tracking with bar codes assigned to specimens and containers and projects, is hosted on globalized cloud computing resources, and embeds a suite of shared documents, calendars, and video-conferencing features. Clinical data are recorded in relation to molecular EV attributes and may be tagged with terms drawn from a network of externally maintained ontologies thus offering expansion of the system as the field matures. We fashioned the graphical user interface (GUI) around a web-based data visualization package. This system is now in an early stage of deployment, mainly focused on specimen tracking and clinical, laboratory, and imaging data capture in support of studies to optimize detection and analysis of brain tumour-specific mutations. It currently includes 4,392 specimens drawn from 611 subjects, the majority with brain tumours. As EV science evolves, we plan biorepository changes which may reflect multi-institutional collaborations, proteomic interfaces, additional biofluids, changes in operating procedures and kits for specimen handling, novel procedures for detection of tumour-specific EVs, and for RNA extraction and changes in the taxonomy of EVs. We have used an ontology-driven data model and web-based architecture with a graph theory-driven GUI to accommodate and stimulate the semantic web of EV science.}, } @article {pmid25313495, year = {2014}, author = {Sabit, H and Al-Anbuky, A}, title = {Multivariate spatial condition mapping using subtractive fuzzy cluster means.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {10}, pages = {18960-18981}, pmid = {25313495}, issn = {1424-8220}, abstract = {Wireless sensor networks are usually deployed for monitoring given physical phenomena taking place in a specific space and over a specific duration of time. The spatio-temporal distribution of these phenomena often correlates to certain physical events. To appropriately characterise these events-phenomena relationships over a given space for a given time frame, we require continuous monitoring of the conditions. WSNs are perfectly suited for these tasks, due to their inherent robustness. This paper presents a subtractive fuzzy cluster means algorithm and its application in data stream mining for wireless sensor systems over a cloud-computing-like architecture, which we call sensor cloud data stream mining. Benchmarking on standard mining algorithms, the k-means and the FCM algorithms, we have demonstrated that the subtractive fuzzy cluster means model can perform high quality distributed data stream mining tasks comparable to centralised data stream mining.}, } @article {pmid25286837, year = {2014}, author = {Meyer, GR and Aragão, D and Mudie, NJ and Caradoc-Davies, TT and McGowan, S and Bertling, PJ and Groenewegen, D and Quenette, SM and Bond, CS and Buckle, AM and Androulakis, S}, title = {Operation of the Australian Store.Synchrotron for macromolecular crystallography.}, journal = {Acta crystallographica. Section D, Biological crystallography}, volume = {70}, number = {Pt 10}, pages = {2510-2519}, pmid = {25286837}, issn = {1399-0047}, mesh = {Australia ; *Crystallography, X-Ray ; Data Curation/*methods ; Synchrotrons ; Workflow ; }, abstract = {The Store.Synchrotron service, a fully functional, cloud computing-based solution to raw X-ray data archiving and dissemination at the Australian Synchrotron, is described. The service automatically receives and archives raw diffraction data, related metadata and preliminary results of automated data-processing workflows. Data are able to be shared with collaborators and opened to the public. In the nine months since its deployment in August 2013, the service has handled over 22.4 TB of raw data (∼1.7 million diffraction images). Several real examples from the Australian crystallographic community are described that illustrate the advantages of the approach, which include real-time online data access and fully redundant, secure storage. Discoveries in biological sciences increasingly require multidisciplinary approaches. With this in mind, Store.Synchrotron has been developed as a component within a greater service that can combine data from other instruments at the Australian Synchrotron, as well as instruments at the Australian neutron source ANSTO. It is therefore envisaged that this will serve as a model implementation of raw data archiving and dissemination within the structural biology research community.}, } @article {pmid25279021, year = {2014}, author = {Viana-Ferreira, C and Ribeiro, LS and Costa, C}, title = {A framework for integration of heterogeneous medical imaging networks.}, journal = {The open medical informatics journal}, volume = {8}, number = {}, pages = {20-32}, pmid = {25279021}, issn = {1874-4311}, abstract = {Medical imaging is increasing its importance in matters of medical diagnosis and in treatment support. Much is due to computers that have revolutionized medical imaging not only in acquisition process but also in the way it is visualized, stored, exchanged and managed. Picture Archiving and Communication Systems (PACS) is an example of how medical imaging takes advantage of computers. To solve problems of interoperability of PACS and medical imaging equipment, the Digital Imaging and Communications in Medicine (DICOM) standard was defined and widely implemented in current solutions. More recently, the need to exchange medical data between distinct institutions resulted in Integrating the Healthcare Enterprise (IHE) initiative that contains a content profile especially conceived for medical imaging exchange: Cross Enterprise Document Sharing for imaging (XDS-i). Moreover, due to application requirements, many solutions developed private networks to support their services. For instance, some applications support enhanced query and retrieve over DICOM objects metadata. This paper proposes anintegration framework to medical imaging networks that provides protocols interoperability and data federation services. It is an extensible plugin system that supports standard approaches (DICOM and XDS-I), but is also capable of supporting private protocols. The framework is being used in the Dicoogle Open Source PACS.}, } @article {pmid25273594, year = {2015}, author = {Smith, DF and Schulz, C and Konijnenburg, M and Kilic, M and Heeren, RM}, title = {Distributed computing strategies for processing of FT-ICR MS imaging datasets for continuous mode data visualization.}, journal = {Analytical and bioanalytical chemistry}, volume = {407}, number = {8}, pages = {2321-2327}, doi = {10.1007/s00216-014-8210-0}, pmid = {25273594}, issn = {1618-2650}, mesh = {Cyclotrons ; Data Mining/*methods ; *Database Management Systems ; Fourier Analysis ; Mass Spectrometry ; }, abstract = {High-resolution Fourier transform ion cyclotron resonance (FT-ICR) mass spectrometry imaging enables the spatial mapping and identification of biomolecules from complex surfaces. The need for long time-domain transients, and thus large raw file sizes, results in a large amount of raw data ("big data") that must be processed efficiently and rapidly. This can be compounded by large-area imaging and/or high spatial resolution imaging. For FT-ICR, data processing and data reduction must not compromise the high mass resolution afforded by the mass spectrometer. The continuous mode "Mosaic Datacube" approach allows high mass resolution visualization (0.001 Da) of mass spectrometry imaging data, but requires additional processing as compared to feature-based processing. We describe the use of distributed computing for processing of FT-ICR MS imaging datasets with generation of continuous mode Mosaic Datacubes for high mass resolution visualization. An eight-fold improvement in processing time is demonstrated using a Dutch nationally available cloud service.}, } @article {pmid25254350, year = {2015}, author = {Mills, KL and Filliben, JJ and Haines, AL}, title = {Determining Relative Importance and Effective Settings for Genetic Algorithm Control Parameters.}, journal = {Evolutionary computation}, volume = {23}, number = {2}, pages = {309-342}, doi = {10.1162/EVCO_a_00137}, pmid = {25254350}, issn = {1530-9304}, mesh = {*Algorithms ; Biological Evolution ; Computer Simulation ; *Computer Systems ; *Models, Theoretical ; Mutation ; Population Density ; }, abstract = {Setting the control parameters of a genetic algorithm to obtain good results is a long-standing problem. We define an experiment design and analysis method to determine relative importance and effective settings for control parameters of any evolutionary algorithm, and we apply this method to a classic binary-encoded genetic algorithm (GA). Subsequently, as reported elsewhere, we applied the GA, with the control parameter settings determined here, to steer a population of cloud-computing simulators toward behaviors that reveal degraded performance and system collapse. GA-steered simulators could serve as a design tool, empowering system engineers to identify and mitigate low-probability, costly failure scenarios. In the existing GA literature, we uncovered conflicting opinions and evidence regarding key GA control parameters and effective settings to adopt. Consequently, we designed and executed an experiment to determine relative importance and effective settings for seven GA control parameters, when applied across a set of numerical optimization problems drawn from the literature. This paper describes our experiment design, analysis, and results. We found that crossover most significantly influenced GA success, followed by mutation rate and population size and then by rerandomization point and elite selection. Selection method and the precision used within the chromosome to represent numerical values had least influence. Our findings are robust over 60 numerical optimization problems.}, } @article {pmid25251339, year = {2014}, author = {Zhao, J and Hu, L and Ding, Y and Xu, G and Hu, M}, title = {A heuristic placement selection of live virtual machine migration for energy-saving in cloud computing environment.}, journal = {PloS one}, volume = {9}, number = {9}, pages = {e108275}, pmid = {25251339}, issn = {1932-6203}, mesh = {*Algorithms ; Artificial Intelligence/economics ; Computers/*economics ; *Computing Methodologies ; Internet/economics ; }, abstract = {The field of live VM (virtual machine) migration has been a hotspot problem in green cloud computing. Live VM migration problem is divided into two research aspects: live VM migration mechanism and live VM migration policy. In the meanwhile, with the development of energy-aware computing, we have focused on the VM placement selection of live migration, namely live VM migration policy for energy saving. In this paper, a novel heuristic approach PS-ES is presented. Its main idea includes two parts. One is that it combines the PSO (particle swarm optimization) idea with the SA (simulated annealing) idea to achieve an improved PSO-based approach with the better global search's ability. The other one is that it uses the Probability Theory and Mathematical Statistics and once again utilizes the SA idea to deal with the data obtained from the improved PSO-based process to get the final solution. And thus the whole approach achieves a long-term optimization for energy saving as it has considered not only the optimization of the current problem scenario but also that of the future problem. The experimental results demonstrate that PS-ES evidently reduces the total incremental energy consumption and better protects the performance of VM running and migrating compared with randomly migrating and optimally migrating. As a result, the proposed PS-ES approach has capabilities to make the result of live VM migration events more high-effective and valuable.}, } @article {pmid25250374, year = {2014}, author = {Lee, K and Rho, S and Lee, SW}, title = {A method of extracting ontology module using concept relations for sharing knowledge in mobile cloud computing environment.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {382797}, doi = {10.1155/2014/382797}, pmid = {25250374}, issn = {1537-744X}, mesh = {Algorithms ; Computational Biology/methods ; *Computer Systems ; Humans ; Information Dissemination/*methods ; Information Storage and Retrieval/*methods ; *Knowledge Bases ; }, abstract = {In mobile cloud computing environment, the cooperation of distributed computing objects is one of the most important requirements for providing successful cloud services. To satisfy this requirement, all the members, who are employed in the cooperation group, need to share the knowledge for mutual understanding. Even if ontology can be the right tool for this goal, there are several issues to make a right ontology. As the cost and complexity of managing knowledge increase according to the scale of the knowledge, reducing the size of ontology is one of the critical issues. In this paper, we propose a method of extracting ontology module to increase the utility of knowledge. For the given signature, this method extracts the ontology module, which is semantically self-contained to fulfill the needs of the service, by considering the syntactic structure and semantic relation of concepts. By employing this module, instead of the original ontology, the cooperation of computing objects can be performed with less computing load and complexity. In particular, when multiple external ontologies need to be combined for more complex services, this method can be used to optimize the size of shared knowledge.}, } @article {pmid25248396, year = {2015}, author = {Dove, ES and Joly, Y and Tassé, AM and , and , and Knoppers, BM}, title = {Genomic cloud computing: legal and ethical points to consider.}, journal = {European journal of human genetics : EJHG}, volume = {23}, number = {10}, pages = {1271-1278}, pmid = {25248396}, issn = {1476-5438}, support = {103360//Wellcome Trust/United Kingdom ; }, mesh = {Biomedical Research/methods ; *Cloud Computing ; Computer Security ; Confidentiality ; Data Mining/methods ; Genomics/*methods ; Humans ; Internet ; Software ; }, abstract = {The biggest challenge in twenty-first century data-intensive genomic science, is developing vast computer infrastructure and advanced software tools to perform comprehensive analyses of genomic data sets for biomedical research and clinical practice. Researchers are increasingly turning to cloud computing both as a solution to integrate data from genomics, systems biology and biomedical data mining and as an approach to analyze data to solve biomedical problems. Although cloud computing provides several benefits such as lower costs and greater efficiency, it also raises legal and ethical issues. In this article, we discuss three key 'points to consider' (data control; data security, confidentiality and transfer; and accountability) based on a preliminary review of several publicly available cloud service providers' Terms of Service. These 'points to consider' should be borne in mind by genomic research organizations when negotiating legal arrangements to store genomic data on a large commercial cloud service provider's servers. Diligent genomic cloud computing means leveraging security standards and evaluation processes as a means to protect data and entails many of the same good practices that researchers should always consider in securing their local infrastructure.}, } @article {pmid25247298, year = {2014}, author = {Yazar, S and Gooden, GE and Mackey, DA and Hewitt, AW}, title = {Benchmarking undedicated cloud computing providers for analysis of genomic datasets.}, journal = {PloS one}, volume = {9}, number = {9}, pages = {e108490}, pmid = {25247298}, issn = {1932-6203}, mesh = {*Benchmarking/economics ; *Cloud Computing/economics ; Cost-Benefit Analysis ; *Datasets as Topic ; Escherichia coli/genetics ; Genome ; Genome, Human ; Humans ; Male ; Polymorphism, Single Nucleotide ; *Sequence Alignment ; }, abstract = {A major bottleneck in biological discovery is now emerging at the computational level. Cloud computing offers a dynamic means whereby small and medium-sized laboratories can rapidly adjust their computational capacity. We benchmarked two established cloud computing services, Amazon Web Services Elastic MapReduce (EMR) on Amazon EC2 instances and Google Compute Engine (GCE), using publicly available genomic datasets (E.coli CC102 strain and a Han Chinese male genome) and a standard bioinformatic pipeline on a Hadoop-based platform. Wall-clock time for complete assembly differed by 52.9% (95% CI: 27.5-78.2) for E.coli and 53.5% (95% CI: 34.4-72.6) for human genome, with GCE being more efficient than EMR. The cost of running this experiment on EMR and GCE differed significantly, with the costs on EMR being 257.3% (95% CI: 211.5-303.1) and 173.9% (95% CI: 134.6-213.1) more expensive for E.coli and human assemblies respectively. Thus, GCE was found to outperform EMR both in terms of cost and wall-clock time. Our findings confirm that cloud computing is an efficient and potentially cost-effective alternative for analysis of large genomic datasets. In addition to releasing our cost-effectiveness comparison, we present available ready-to-use scripts for establishing Hadoop instances with Ganglia monitoring on EC2 or GCE.}, } @article {pmid25244727, year = {2014}, author = {Slabodkin, G}, title = {Where health care is going in the cloud. The computing model continues to win converts in a market reluctant to take security risks.}, journal = {Health data management}, volume = {22}, number = {8}, pages = {18-20}, pmid = {25244727}, issn = {1079-9869}, mesh = {*Computer Security ; Delivery of Health Care ; *Diffusion of Innovation ; Information Storage and Retrieval/*methods ; *Internet ; United States ; }, } @article {pmid25238847, year = {2014}, author = {García, CG and Sebastià, N and Blasco, E and Soriano, JM}, title = {[Dietopro.com: a new tool for dietotherapeutical management based on cloud computing technology].}, journal = {Nutricion hospitalaria}, volume = {30}, number = {3}, pages = {678-685}, doi = {10.3305/nh.2014.30.3.7627}, pmid = {25238847}, issn = {1699-5198}, mesh = {*Cloud Computing ; Diet Therapy/*methods ; Humans ; *Mobile Applications ; }, abstract = {INTRODUCTION: dietotherapeutical softwares are now a basic tool in the dietary management of patients, either from a physiological point of view and / or pathological. New technologies and research in this regard, have favored the emergence of new applications for the dietary and nutritional management that facilitate the management of the dietotherapeutical company.

OBJECTIVES: To comparatively study the main dietotherapeutical applications on the market to give criteria to the professional users of diet and nutrition in the selection of one of the main tools for these.

RESULTS: Dietopro.com is, from our point of view, one of the most comprehensive management of patients dietotherapeutical applications.

CONCLUSION: Based on the need of the user, it has different dietary sofwares choice.We conclude that there is no better or worse than another application, but applications roughly adapted to the needs of professionals.}, } @article {pmid25233306, year = {2014}, author = {Burns, R and Vogelstein, JT and Szalay, AS}, title = {From cosmos to connectomes: the evolution of data-intensive science.}, journal = {Neuron}, volume = {83}, number = {6}, pages = {1249-1252}, doi = {10.1016/j.neuron.2014.08.045}, pmid = {25233306}, issn = {1097-4199}, support = {R01 NS092474/NS/NINDS NIH HHS/United States ; }, mesh = {Animals ; Computational Biology/methods ; *Computers ; Connectome/*methods ; Humans ; *Information Systems ; *Software ; Statistics as Topic/*methods ; }, abstract = {The analysis of data requires computation: originally by hand and more recently by computers. Different models of computing are designed and optimized for different kinds of data. In data-intensive science, the scale and complexity of data exceeds the comfort zone of local data stores on scientific workstations. Thus, cloud computing emerges as the preeminent model, utilizing data centers and high-performance clusters, enabling remote users to access and query subsets of the data efficiently. We examine how data-intensive computational systems originally built for cosmology, the Sloan Digital Sky Survey (SDSS), are now being used in connectomics, at the Open Connectome Project. We list lessons learned and outline the top challenges we expect to face. Success in computational connectomics would drastically reduce the time between idea and discovery, as SDSS did in cosmology.}, } @article {pmid25225874, year = {2014}, author = {Mehmood, I and Sajjad, M and Baik, SW}, title = {Mobile-cloud assisted video summarization framework for efficient management of remote sensing data generated by wireless capsule sensors.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {9}, pages = {17112-17145}, pmid = {25225874}, issn = {1424-8220}, mesh = {Capsule Endoscopy/*methods ; Data Compression/*methods ; Humans ; Image Enhancement/*methods ; Image Interpretation, Computer-Assisted/*methods ; *Signal Processing, Computer-Assisted ; Telemedicine/*methods ; Video Recording/*methods ; }, abstract = {Wireless capsule endoscopy (WCE) has great advantages over traditional endoscopy because it is portable and easy to use, especially in remote monitoring health-services. However, during the WCE process, the large amount of captured video data demands a significant deal of computation to analyze and retrieve informative video frames. In order to facilitate efficient WCE data collection and browsing task, we present a resource- and bandwidth-aware WCE video summarization framework that extracts the representative keyframes of the WCE video contents by removing redundant and non-informative frames. For redundancy elimination, we use Jeffrey-divergence between color histograms and inter-frame Boolean series-based correlation of color channels. To remove non-informative frames, multi-fractal texture features are extracted to assist the classification using an ensemble-based classifier. Owing to the limited WCE resources, it is impossible for the WCE system to perform computationally intensive video summarization tasks. To resolve computational challenges, mobile-cloud architecture is incorporated, which provides resizable computing capacities by adaptively offloading video summarization tasks between the client and the cloud server. The qualitative and quantitative results are encouraging and show that the proposed framework saves information transmission cost and bandwidth, as well as the valuable time of data analysts in browsing remote sensing data.}, } @article {pmid25218122, year = {2014}, author = {Latif, R and Abbas, H and Assar, S}, title = {Distributed denial of service (DDoS) attack in cloud- assisted wireless body area networks: a systematic literature review.}, journal = {Journal of medical systems}, volume = {38}, number = {11}, pages = {128}, pmid = {25218122}, issn = {1573-689X}, mesh = {*Computer Communication Networks ; Humans ; Information Systems/*organization & administration ; *Wireless Technology ; }, abstract = {Wireless Body Area Networks (WBANs) have emerged as a promising technology that has shown enormous potential in improving the quality of healthcare, and has thus found a broad range of medical applications from ubiquitous health monitoring to emergency medical response systems. The huge amount of highly sensitive data collected and generated by WBAN nodes requires an ascendable and secure storage and processing infrastructure. Given the limited resources of WBAN nodes for storage and processing, the integration of WBANs and cloud computing may provide a powerful solution. However, despite the benefits of cloud-assisted WBAN, several security issues and challenges remain. Among these, data availability is the most nagging security issue. The most serious threat to data availability is a distributed denial of service (DDoS) attack that directly affects the all-time availability of a patient's data. The existing solutions for standalone WBANs and sensor networks are not applicable in the cloud. The purpose of this review paper is to identify the most threatening types of DDoS attacks affecting the availability of a cloud-assisted WBAN and review the state-of-the-art detection mechanisms for the identified DDoS attacks.}, } @article {pmid25207854, year = {2014}, author = {Abdul-Wahid, B and Feng, H and Rajan, D and Costaouec, R and Darve, E and Thain, D and Izaguirre, JA}, title = {AWE-WQ: fast-forwarding molecular dynamics using the accelerated weighted ensemble.}, journal = {Journal of chemical information and modeling}, volume = {54}, number = {10}, pages = {3033-3043}, pmid = {25207854}, issn = {1549-960X}, support = {R01 GM101935/GM/NIGMS NIH HHS/United States ; 7R01AI039071/AI/NIAID NIH HHS/United States ; 1R01 GM101935-01/GM/NIGMS NIH HHS/United States ; }, mesh = {*Algorithms ; *Computer Systems ; *Molecular Dynamics Simulation ; Protein Folding ; Protein Structure, Tertiary ; Protein Unfolding ; Proteins/*chemistry ; Thermodynamics ; Tryptophan/chemistry ; }, abstract = {A limitation of traditional molecular dynamics (MD) is that reaction rates are difficult to compute. This is due to the rarity of observing transitions between metastable states since high energy barriers trap the system in these states. Recently the weighted ensemble (WE) family of methods have emerged which can flexibly and efficiently sample conformational space without being trapped and allow calculation of unbiased rates. However, while WE can sample correctly and efficiently, a scalable implementation applicable to interesting biomolecular systems is not available. We provide here a GPLv2 implementation called AWE-WQ of a WE algorithm using the master/worker distributed computing WorkQueue (WQ) framework. AWE-WQ is scalable to thousands of nodes and supports dynamic allocation of computer resources, heterogeneous resource usage (such as central processing units (CPU) and graphical processing units (GPUs) concurrently), seamless heterogeneous cluster usage (i.e., campus grids and cloud providers), and support for arbitrary MD codes such as GROMACS, while ensuring that all statistics are unbiased. We applied AWE-WQ to a 34 residue protein which simulated 1.5 ms over 8 months with peak aggregate performance of 1000 ns/h. Comparison was done with a 200 μs simulation collected on a GPU over a similar timespan. The folding and unfolded rates were of comparable accuracy.}, } @article {pmid25205499, year = {2014}, author = {Qian, K and Guo, J and Xu, H and Zhu, Z and Zhang, G}, title = {Snore related signals processing in a private cloud computing system.}, journal = {Interdisciplinary sciences, computational life sciences}, volume = {6}, number = {3}, pages = {216-221}, doi = {10.1007/s12539-013-0203-8}, pmid = {25205499}, issn = {1867-1462}, mesh = {Acoustics ; Computing Methodologies ; Diagnosis, Computer-Assisted/*methods ; Sleep Apnea, Obstructive/physiopathology ; *Snoring ; *Sound ; }, abstract = {Snore related signals (SRS) have been demonstrated to carry important information about the obstruction site and degree in the upper airway of Obstructive Sleep Apnea-Hypopnea Syndrome (OSAHS) patients in recent years. To make this acoustic signal analysis method more accurate and robust, big SRS data processing is inevitable. As an emerging concept and technology, cloud computing has motivated numerous researchers and engineers to exploit applications both in academic and industry field, which could have an ability to implement a huge blue print in biomedical engineering. Considering the security and transferring requirement of biomedical data, we designed a system based on private cloud computing to process SRS. Then we set the comparable experiments of processing a 5-hour audio recording of an OSAHS patient by a personal computer, a server and a private cloud computing system to demonstrate the efficiency of the infrastructure we proposed.}, } @article {pmid25202715, year = {2014}, author = {Lacuesta, R and Lloret, J and Sendra, S and Peñalver, L}, title = {Spontaneous ad hoc mobile cloud computing network.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {232419}, doi = {10.1155/2014/232419}, pmid = {25202715}, issn = {1537-744X}, mesh = {Algorithms ; *Computer Communication Networks ; Models, Theoretical ; *Wireless Technology ; }, abstract = {Cloud computing helps users and companies to share computing resources instead of having local servers or personal devices to handle the applications. Smart devices are becoming one of the main information processing devices. Their computing features are reaching levels that let them create a mobile cloud computing network. But sometimes they are not able to create it and collaborate actively in the cloud because it is difficult for them to build easily a spontaneous network and configure its parameters. For this reason, in this paper, we are going to present the design and deployment of a spontaneous ad hoc mobile cloud computing network. In order to perform it, we have developed a trusted algorithm that is able to manage the activity of the nodes when they join and leave the network. The paper shows the network procedures and classes that have been designed. Our simulation results using Castalia show that our proposal presents a good efficiency and network performance even by using high number of nodes.}, } @article {pmid25196014, year = {2014}, author = {Gelenbe, E and Bi, H}, title = {Emergency navigation without an infrastructure.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {8}, pages = {15142-15162}, pmid = {25196014}, issn = {1424-8220}, mesh = {Algorithms ; Computer Communication Networks/*instrumentation ; Construction Industry/instrumentation ; Electric Power Supplies ; Emergencies ; Humans ; Reproducibility of Results ; Time Factors ; Wireless Technology/*instrumentation ; }, abstract = {Emergency navigation systems for buildings and other built environments, such as sport arenas or shopping centres, typically rely on simple sensor networks to detect emergencies and, then, provide automatic signs to direct the evacuees. The major drawbacks of such static wireless sensor network (WSN)-based emergency navigation systems are the very limited computing capacity, which makes adaptivity very difficult, and the restricted battery power, due to the low cost of sensor nodes for unattended operation. If static wireless sensor networks and cloud-computing can be integrated, then intensive computations that are needed to determine optimal evacuation routes in the presence of time-varying hazards can be offloaded to the cloud, but the disadvantages of limited battery life-time at the client side, as well as the high likelihood of system malfunction during an emergency still remain. By making use of the powerful sensing ability of smart phones, which are increasingly ubiquitous, this paper presents a cloud-enabled indoor emergency navigation framework to direct evacuees in a coordinated fashion and to improve the reliability and resilience for both communication and localization. By combining social potential fields (SPF) and a cognitive packet network (CPN)-based algorithm, evacuees are guided to exits in dynamic loose clusters. Rather than relying on a conventional telecommunications infrastructure, we suggest an ad hoc cognitive packet network (AHCPN)-based protocol to adaptively search optimal communication routes between portable devices and the network egress nodes that provide access to cloud servers, in a manner that spares the remaining battery power of smart phones and minimizes the time latency. Experimental results through detailed simulations indicate that smart human motion and smart network management can increase the survival rate of evacuees and reduce the number of drained smart phones in an evacuation process.}, } @article {pmid25195583, year = {2014}, author = {Dalpé, G and Joly, Y}, title = {Opportunities and challenges provided by cloud repositories for bioinformatics-enabled drug discovery.}, journal = {Drug development research}, volume = {75}, number = {6}, pages = {393-401}, doi = {10.1002/ddr.21211}, pmid = {25195583}, issn = {1098-2299}, mesh = {Computational Biology/organization & administration ; Computer Security/ethics/legislation & jurisprudence ; Drug Discovery/ethics/legislation & jurisprudence/*methods ; Drug Repositioning ; Genome, Human ; Humans ; Information Storage and Retrieval/*ethics/*legislation & jurisprudence/methods ; Precision Medicine ; Web Browser ; }, abstract = {Healthcare-related bioinformatics databases are increasingly offering the possibility to maintain, organize, and distribute DNA sequencing data. Different national and international institutions are currently hosting such databases that offer researchers website platforms where they can obtain sequencing data on which they can perform different types of analysis. Until recently, this process remained mostly one-dimensional, with most analysis concentrated on a limited amount of data. However, newer genome sequencing technology is producing a huge amount of data that current computer facilities are unable to handle. An alternative approach has been to start adopting cloud computing services for combining the information embedded in genomic and model system biology data, patient healthcare records, and clinical trials' data. In this new technological paradigm, researchers use virtual space and computing power from existing commercial or not-for-profit cloud service providers to access, store, and analyze data via different application programming interfaces. Cloud services are an alternative to the need of larger data storage; however, they raise different ethical, legal, and social issues. The purpose of this Commentary is to summarize how cloud computing can contribute to bioinformatics-based drug discovery and to highlight some of the outstanding legal, ethical, and social issues that are inherent in the use of cloud services.}, } @article {pmid25170937, year = {2014}, author = {Gui, Z and Yang, C and Xia, J and Huang, Q and Liu, K and Li, Z and Yu, M and Sun, M and Zhou, N and Jin, B}, title = {A service brokering and recommendation mechanism for better selecting cloud services.}, journal = {PloS one}, volume = {9}, number = {8}, pages = {e105297}, doi = {10.1371/journal.pone.0105297}, pmid = {25170937}, issn = {1932-6203}, mesh = {*Computer Systems ; *Information Storage and Retrieval ; *Internet ; Software ; Workflow ; }, abstract = {Cloud computing is becoming the new generation computing infrastructure, and many cloud vendors provide different types of cloud services. How to choose the best cloud services for specific applications is very challenging. Addressing this challenge requires balancing multiple factors, such as business demands, technologies, policies and preferences in addition to the computing requirements. This paper recommends a mechanism for selecting the best public cloud service at the levels of Infrastructure as a Service (IaaS) and Platform as a Service (PaaS). A systematic framework and associated workflow include cloud service filtration, solution generation, evaluation, and selection of public cloud services. Specifically, we propose the following: a hierarchical information model for integrating heterogeneous cloud information from different providers and a corresponding cloud information collecting mechanism; a cloud service classification model for categorizing and filtering cloud services and an application requirement schema for providing rules for creating application-specific configuration solutions; and a preference-aware solution evaluation mode for evaluating and recommending solutions according to the preferences of application providers. To test the proposed framework and methodologies, a cloud service advisory tool prototype was developed after which relevant experiments were conducted. The results show that the proposed system collects/updates/records the cloud information from multiple mainstream public cloud services in real-time, generates feasible cloud configuration solutions according to user specifications and acceptable cost predication, assesses solutions from multiple aspects (e.g., computing capability, potential cost and Service Level Agreement, SLA) and offers rational recommendations based on user preferences and practical cloud provisioning; and visually presents and compares solutions through an interactive web Graphical User Interface (GUI).}, } @article {pmid25160298, year = {2014}, author = {Stoicu-Tivadar, L and Stoicu-Tivadar, V and Berian, D and Drăgan, S and Serban, A and Serban, C}, title = {eduCRATE--a Virtual Hospital architecture.}, journal = {Studies in health technology and informatics}, volume = {205}, number = {}, pages = {803-807}, pmid = {25160298}, issn = {1879-8365}, mesh = {Computer Simulation ; Computer-Assisted Instruction/*methods ; Delivery of Health Care/*organization & administration ; Hospital Administration/*methods ; Internet/organization & administration ; Models, Organizational ; *Models, Theoretical ; Patient Care Management/*organization & administration ; *Patient Simulation ; *User-Computer Interface ; }, abstract = {eduCRATE is a complex project proposal which aims to develop a virtual learning environment offering interactive digital content through original and integrated solutions using cloud computing, complex multimedia systems in virtual space and personalized design with avatars. Compared to existing similar products the project brings the novelty of using languages for medical guides in order to ensure a maximum of flexibility. The Virtual Hospital simulations will create interactive clinical scenarios for which students will find solutions for positive diagnosis and therapeutic management. The solution based on cloud computing and immersive multimedia is an attractive option in education because is economical and it matches the current working style of the young generation to whom it addresses.}, } @article {pmid25160218, year = {2014}, author = {Kimura, E and Kobayashi, S and Ishihara, K}, title = {HTML5 microdata as a semantic container for medical information exchange.}, journal = {Studies in health technology and informatics}, volume = {205}, number = {}, pages = {418-422}, pmid = {25160218}, issn = {1879-8365}, mesh = {Electronic Health Records/*standards ; *Guidelines as Topic ; Health Level Seven/*standards ; Information Storage and Retrieval/standards ; Internet/*standards ; Medical Record Linkage/*standards ; *Natural Language Processing ; *Programming Languages ; Semantics ; Vocabulary, Controlled ; }, abstract = {Achieving interoperability between clinical electronic medical records (EMR) systems and cloud computing systems is challenging because of the lack of a universal reference method as a standard for information exchange with a secure connection. Here we describe an information exchange scheme using HTML5 microdata, where the standard semantic container is an HTML document. We embed HL7 messages describing laboratory test results in the microdata. We also annotate items in the clinical research report with the microdata. We mapped the laboratory test result data into the clinical research report using an HL7 selector specified in the microdata. This scheme can provide secure cooperation between the cloud-based service and the EMR system.}, } @article {pmid25155691, year = {2014}, author = {Qi, X and Wang, D and Rodero, I and Diaz-Montes, J and Gensure, RH and Xing, F and Zhong, H and Goodell, L and Parashar, M and Foran, DJ and Yang, L}, title = {Content-based histopathology image retrieval using CometCloud.}, journal = {BMC bioinformatics}, volume = {15}, number = {1}, pages = {287}, pmid = {25155691}, issn = {1471-2105}, support = {UL1 TR000117/TR/NCATS NIH HHS/United States ; 5R01CA156386-09/CA/NCI NIH HHS/United States ; P30 CA072720/CA/NCI NIH HHS/United States ; R01 LM009239/LM/NLM NIH HHS/United States ; R01 CA156386/CA/NCI NIH HHS/United States ; R01 CA161375/CA/NCI NIH HHS/United States ; KL2 TR000116/TR/NCATS NIH HHS/United States ; 2R01LM009239-05/LM/NLM NIH HHS/United States ; 5R01LM011119-03/LM/NLM NIH HHS/United States ; R01 AR065479/AR/NIAMS NIH HHS/United States ; 5R01CA161375-03/CA/NCI NIH HHS/United States ; R01 LM011119/LM/NLM NIH HHS/United States ; TL1 TR000115/TR/NCATS NIH HHS/United States ; UL1TR000117/TR/NCATS NIH HHS/United States ; }, mesh = {*Algorithms ; *Diagnostic Imaging ; Feedback ; Information Storage and Retrieval/*methods ; *Pathology ; Pattern Recognition, Automated ; Reproducibility of Results ; }, abstract = {BACKGROUND: The development of digital imaging technology is creating extraordinary levels of accuracy that provide support for improved reliability in different aspects of the image analysis, such as content-based image retrieval, image segmentation, and classification. This has dramatically increased the volume and rate at which data are generated. Together these facts make querying and sharing non-trivial and render centralized solutions unfeasible. Moreover, in many cases this data is often distributed and must be shared across multiple institutions requiring decentralized solutions. In this context, a new generation of data/information driven applications must be developed to take advantage of the national advanced cyber-infrastructure (ACI) which enable investigators to seamlessly and securely interact with information/data which is distributed across geographically disparate resources. This paper presents the development and evaluation of a novel content-based image retrieval (CBIR) framework. The methods were tested extensively using both peripheral blood smears and renal glomeruli specimens. The datasets and performance were evaluated by two pathologists to determine the concordance.

RESULTS: The CBIR algorithms that were developed can reliably retrieve the candidate image patches exhibiting intensity and morphological characteristics that are most similar to a given query image. The methods described in this paper are able to reliably discriminate among subtle staining differences and spatial pattern distributions. By integrating a newly developed dual-similarity relevance feedback module into the CBIR framework, the CBIR results were improved substantially. By aggregating the computational power of high performance computing (HPC) and cloud resources, we demonstrated that the method can be successfully executed in minutes on the Cloud compared to weeks using standard computers.

CONCLUSIONS: In this paper, we present a set of newly developed CBIR algorithms and validate them using two different pathology applications, which are regularly evaluated in the practice of pathology. Comparative experimental results demonstrate excellent performance throughout the course of a set of systematic studies. Additionally, we present and evaluate a framework to enable the execution of these algorithms across distributed resources. We show how parallel searching of content-wise similar images in the dataset significantly reduces the overall computational time to ensure the practical utility of the proposed CBIR algorithms.}, } @article {pmid25148662, year = {2014}, author = {Zhang, H and Wang, J and Fang, T and Quan, L}, title = {Joint segmentation of images and scanned point cloud in large-scale street scenes with low-annotation cost.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {23}, number = {11}, pages = {4763-4772}, doi = {10.1109/TIP.2014.2348795}, pmid = {25148662}, issn = {1941-0042}, abstract = {We propose a novel method for the parsing of images and scanned point cloud in large-scale street environment. The proposed method significantly reduces the intensive labeling cost in previous works by automatically generating training data from the input data. The automatic generation of training data begins with the initialization of training data with weak priors in the street environment, followed by a filtering scheme to remove mislabeled training samples. We formulate the filtering as a binary labeling optimization problem over a conditional random filed that we call object graph, simultaneously integrating spatial smoothness preference and label consistency between 2D and 3D. Toward the final parsing, with the automatically generated training data, a CRF-based parsing method that integrates the coordination of image appearance and 3D geometry is adopted to perform the parsing of large-scale street scenes. The proposed approach is evaluated on city-scale Google Street View data, with an encouraging parsing performance demonstrated.}, } @article {pmid25147832, year = {2014}, author = {Jeong, HY and Yi, G}, title = {A service based adaptive U-learning system using UX.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {109435}, doi = {10.1155/2014/109435}, pmid = {25147832}, issn = {1537-744X}, mesh = {Humans ; *Internet ; *Learning ; *Software ; }, abstract = {In recent years, traditional development techniques for e-learning systems have been changing to become more convenient and efficient. One new technology in the development of application systems includes both cloud and ubiquitous computing. Cloud computing can support learning system processes by using services while ubiquitous computing can provide system operation and management via a high performance technical process and network. In the cloud computing environment, a learning service application can provide a business module or process to the user via the internet. This research focuses on providing the learning material and processes of courses by learning units using the services in a ubiquitous computing environment. And we also investigate functions that support users' tailored materials according to their learning style. That is, we analyzed the user's data and their characteristics in accordance with their user experience. We subsequently applied the learning process to fit on their learning performance and preferences. Finally, we demonstrate how the proposed system outperforms learning effects to learners better than existing techniques.}, } @article {pmid25146114, year = {2014}, author = {Korb, O and Finn, PW and Jones, G}, title = {The cloud and other new computational methods to improve molecular modelling.}, journal = {Expert opinion on drug discovery}, volume = {9}, number = {10}, pages = {1121-1131}, doi = {10.1517/17460441.2014.941800}, pmid = {25146114}, issn = {1746-045X}, mesh = {Computational Biology/*methods ; Computer Graphics ; Drug Discovery/*methods ; *Molecular Dynamics Simulation ; }, abstract = {INTRODUCTION: Industrial, as well as academic, drug discovery efforts are usually supported by computational modelling techniques. Many of these techniques, such as virtual high-throughput docking, pharmacophore-based screening of conformer databases and molecular dynamics simulations, are computationally very demanding. Depending on the parallelisation strategy applicable to the respective method, recent technologies based on central processing units, for example, cloud and grid computing, or graphics processing units (GPUs), can be employed to accelerate their execution times considerably. This allows the molecular modeller to look at larger data sets, or to use more accurate methods.

AREAS COVERED: The article introduces the recent developments in grid, cloud and GPU computing. The authors provide an overview of molecular modelling applications running on the above-mentioned hardware platforms and highlight caveats of the respective architectures, both from a theoretical and a practical point of view.

EXPERT OPINION: The architectures described can improve the molecular modelling process considerably, if the appropriate technologies are selected for the respective application. Despite these improvements, each of the individual computational platforms suffers from specific issues, which will need to be addressed in the future. Furthermore, current endeavours have focused on improving the performance of existing algorithms, rather than the development of new methods that explicitly harness these new technologies.}, } @article {pmid25127245, year = {2014}, author = {Shiraz, M and Gani, A and Ahmad, RW and Adeel Ali Shah, S and Karim, A and Rahman, ZA}, title = {A lightweight distributed framework for computational offloading in mobile cloud computing.}, journal = {PloS one}, volume = {9}, number = {8}, pages = {e102270}, pmid = {25127245}, issn = {1932-6203}, mesh = {*Computer Communication Networks ; *Computer Storage Devices ; *Internet ; *Mobile Applications ; }, abstract = {The latest developments in mobile computing technology have enabled intensive applications on the modern Smartphones. However, such applications are still constrained by limitations in processing potentials, storage capacity and battery lifetime of the Smart Mobile Devices (SMDs). Therefore, Mobile Cloud Computing (MCC) leverages the application processing services of computational clouds for mitigating resources limitations in SMDs. Currently, a number of computational offloading frameworks are proposed for MCC wherein the intensive components of the application are outsourced to computational clouds. Nevertheless, such frameworks focus on runtime partitioning of the application for computational offloading, which is time consuming and resources intensive. The resource constraint nature of SMDs require lightweight procedures for leveraging computational clouds. Therefore, this paper presents a lightweight framework which focuses on minimizing additional resources utilization in computational offloading for MCC. The framework employs features of centralized monitoring, high availability and on demand access services of computational clouds for computational offloading. As a result, the turnaround time and execution cost of the application are reduced. The framework is evaluated by testing prototype application in the real MCC environment. The lightweight nature of the proposed framework is validated by employing computational offloading for the proposed framework and the latest existing frameworks. Analysis shows that by employing the proposed framework for computational offloading, the size of data transmission is reduced by 91%, energy consumption cost is minimized by 81% and turnaround time of the application is decreased by 83.5% as compared to the existing offloading frameworks. Hence, the proposed framework minimizes additional resources utilization and therefore offers lightweight solution for computational offloading in MCC.}, } @article {pmid25125162, year = {2014}, author = {Feng, RC and Yeh, YT}, title = {[A new vision of nursing: the evolution and development of nursing informatics].}, journal = {Hu li za zhi The journal of nursing}, volume = {61}, number = {4 Suppl}, pages = {78-84}, doi = {10.6224/JN.61.4S.78}, pmid = {25125162}, issn = {0047-262X}, mesh = {Humans ; Nurse's Role ; *Nursing Informatics/trends ; Taiwan ; }, abstract = {Technology development trends in the 21st century are increasingly focused on the development of interdisciplinary applications. Advanced information technology may be applied to integrate nursing care information, simplify nursing processes, and reduce the time spent on work tasks, thereby increasing the amount of time that clinical personnel are available to care for patients and ensuring that patients are provided with high-quality and personalized care services. The development of nursing information began in Taiwan in 2003 and has since expanded and thrived. The ability of nursing information to connect formerly insular national nursing communities promotes the international visibility of Taiwan. The rapid development of nursing information in Taiwan, resulting in the production of informative and outstanding results, has received worldwide attention. The Taiwan Nursing Informatics Association was established in 2006 to nurture nursing information professionals, develop and apply information technology in the health care domain, and facilitate international nursing information exchanges. The association actively promotes nursing information in the areas of administration, education, research, and clinical practice, thereby integrating nursing with empirical applications to enhance the service quality and management of nursing and increase the benefits of nursing teaching and research. To convert information into knowledge, the association develops individualized strategies for managing mobile care and employs an interagency network to exchange and reintegrate resources, establishing active, intelligent nursing based on network characteristics and an empirical foundation. The mid- and long-term objectives of the association involve introducing cloud computing and facilitating the meaningful use of nursing information in both public and government settings, thereby creating a milestone of developing and expanding nursing information unique to Taiwan.}, } @article {pmid25123734, year = {2014}, author = {Vimarlund, V and Wass, S}, title = {Big data, smart homes and ambient assisted living.}, journal = {Yearbook of medical informatics}, volume = {9}, number = {1}, pages = {143-149}, pmid = {25123734}, issn = {2364-0502}, mesh = {*Assisted Living Facilities ; *Computer Systems ; *Datasets as Topic ; *Housing ; Humans ; Medical Informatics Applications ; Remote Sensing Technology ; }, abstract = {OBJECTIVES: To discuss how current research in the area of smart homes and ambient assisted living will be influenced by the use of big data.

METHODS: A scoping review of literature published in scientific journals and conference proceedings was performed, focusing on smart homes, ambient assisted living and big data over the years 2011-2014.

RESULTS: The health and social care market has lagged behind other markets when it comes to the introduction of innovative IT solutions and the market faces a number of challenges as the use of big data will increase. First, there is a need for a sustainable and trustful information chain where the needed information can be transferred from all producers to all consumers in a structured way. Second, there is a need for big data strategies and policies to manage the new situation where information is handled and transferred independently of the place of the expertise. Finally, there is a possibility to develop new and innovative business models for a market that supports cloud computing, social media, crowdsourcing etc.

CONCLUSIONS: The interdisciplinary area of big data, smart homes and ambient assisted living is no longer only of interest for IT developers, it is also of interest for decision makers as customers make more informed choices among today's services. In the future it will be of importance to make information usable for managers and improve decision making, tailor smart home services based on big data, develop new business models, increase competition and identify policies to ensure privacy, security and liability.}, } @article {pmid25123721, year = {2014}, author = {Koutkias, V and Thiessard, F}, title = {Big data - smart health strategies. Findings from the yearbook 2014 special theme.}, journal = {Yearbook of medical informatics}, volume = {9}, number = {1}, pages = {48-51}, pmid = {25123721}, issn = {2364-0502}, mesh = {Cloud Computing ; *Computational Biology ; *Data Mining ; *Databases, Factual ; *Electronic Health Records ; Medical Informatics ; Precision Medicine ; }, abstract = {OBJECTIVES: To select best papers published in 2013 in the field of big data and smart health strategies, and summarize outstanding research efforts.

METHODS: A systematic search was performed using two major bibliographic databases for relevant journal papers. The references obtained were reviewed in a two-stage process, starting with a blinded review performed by the two section editors, and followed by a peer review process operated by external reviewers recognized as experts in the field.

RESULTS: The complete review process selected four best papers, illustrating various aspects of the special theme, among them: (a) using large volumes of unstructured data and, specifically, clinical notes from Electronic Health Records (EHRs) for pharmacovigilance; (b) knowledge discovery via querying large volumes of complex (both structured and unstructured) biological data using big data technologies and relevant tools; (c) methodologies for applying cloud computing and big data technologies in the field of genomics, and (d) system architectures enabling high-performance access to and processing of large datasets extracted from EHRs.

CONCLUSIONS: The potential of big data in biomedicine has been pinpointed in various viewpoint papers and editorials. The review of current scientific literature illustrated a variety of interesting methods and applications in the field, but still the promises exceed the current outcomes. As we are getting closer towards a solid foundation with respect to common understanding of relevant concepts and technical aspects, and the use of standardized technologies and tools, we can anticipate to reach the potential that big data offer for personalized medicine and smart health strategies in the near future.}, } @article {pmid25123718, year = {2014}, author = {Liyanage, H and de Lusignan, S and Liaw, ST and Kuziemsky, CE and Mold, F and Krause, P and Fleming, D and Jones, S}, title = {Big Data Usage Patterns in the Health Care Domain: A Use Case Driven Approach Applied to the Assessment of Vaccination Benefits and Risks. Contribution of the IMIA Primary Healthcare Working Group.}, journal = {Yearbook of medical informatics}, volume = {9}, number = {1}, pages = {27-35}, pmid = {25123718}, issn = {2364-0502}, mesh = {*Computational Biology ; *Data Mining ; *Databases, Factual ; Epidemics ; Humans ; Medical Informatics ; Medical Records Systems, Computerized ; Population Surveillance/*methods ; *Vaccination/adverse effects/statistics & numerical data ; }, abstract = {BACKGROUND: Generally benefits and risks of vaccines can be determined from studies carried out as part of regulatory compliance, followed by surveillance of routine data; however there are some rarer and more long term events that require new methods. Big data generated by increasingly affordable personalised computing, and from pervasive computing devices is rapidly growing and low cost, high volume, cloud computing makes the processing of these data inexpensive.

OBJECTIVE: To describe how big data and related analytical methods might be applied to assess the benefits and risks of vaccines.

METHOD: We reviewed the literature on the use of big data to improve health, applied to generic vaccine use cases, that illustrate benefits and risks of vaccination. We defined a use case as the interaction between a user and an information system to achieve a goal. We used flu vaccination and pre-school childhood immunisation as exemplars.

RESULTS: We reviewed three big data use cases relevant to assessing vaccine benefits and risks: (i) Big data processing using crowdsourcing, distributed big data processing, and predictive analytics, (ii) Data integration from heterogeneous big data sources, e.g. the increasing range of devices in the "internet of things", and (iii) Real-time monitoring for the direct monitoring of epidemics as well as vaccine effects via social media and other data sources.

CONCLUSIONS: Big data raises new ethical dilemmas, though its analysis methods can bring complementary real-time capabilities for monitoring epidemics and assessing vaccine benefit-risk balance.}, } @article {pmid25123456, year = {2014}, author = {Almashaqbeh, G and Hayajneh, T and Vasilakos, AV and Mohd, BJ}, title = {QoS-aware health monitoring system using cloud-based WBANs.}, journal = {Journal of medical systems}, volume = {38}, number = {10}, pages = {121}, pmid = {25123456}, issn = {1573-689X}, mesh = {Biosensing Techniques ; Computer Simulation ; Computer Systems ; Humans ; *Information Storage and Retrieval ; *Internet ; Monitoring, Physiologic/*instrumentation ; *Telemedicine ; *Wireless Technology ; }, abstract = {Wireless Body Area Networks (WBANs) are amongst the best options for remote health monitoring. However, as standalone systems WBANs have many limitations due to the large amount of processed data, mobility of monitored users, and the network coverage area. Integrating WBANs with cloud computing provides effective solutions to these problems and promotes the performance of WBANs based systems. Accordingly, in this paper we propose a cloud-based real-time remote health monitoring system for tracking the health status of non-hospitalized patients while practicing their daily activities. Compared with existing cloud-based WBAN frameworks, we divide the cloud into local one, that includes the monitored users and local medical staff, and a global one that includes the outer world. The performance of the proposed framework is optimized by reducing congestion, interference, and data delivery delay while supporting users' mobility. Several novel techniques and algorithms are proposed to accomplish our objective. First, the concept of data classification and aggregation is utilized to avoid clogging the network with unnecessary data traffic. Second, a dynamic channel assignment policy is developed to distribute the WBANs associated with the users on the available frequency channels to manage interference. Third, a delay-aware routing metric is proposed to be used by the local cloud in its multi-hop communication to speed up the reporting process of the health-related data. Fourth, the delay-aware metric is further utilized by the association protocols used by the WBANs to connect with the local cloud. Finally, the system with all the proposed techniques and algorithms is evaluated using extensive ns-2 simulations. The simulation results show superior performance of the proposed architecture in optimizing the end-to-end delay, handling the increased interference levels, maximizing the network capacity, and tracking user's mobility.}, } @article {pmid25121114, year = {2014}, author = {Sookhak, M and Akhunzada, A and Gani, A and Khurram Khan, M and Anuar, NB}, title = {Towards dynamic remote data auditing in computational clouds.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {269357}, doi = {10.1155/2014/269357}, pmid = {25121114}, issn = {1537-744X}, mesh = {*Algorithms ; *Computer Security ; Computer Simulation ; Information Management/*methods ; Information Storage and Retrieval/*methods ; *Models, Theoretical ; *Research Design ; }, abstract = {Cloud computing is a significant shift of computational paradigm where computing as a utility and storing data remotely have a great potential. Enterprise and businesses are now more interested in outsourcing their data to the cloud to lessen the burden of local data storage and maintenance. However, the outsourced data and the computation outcomes are not continuously trustworthy due to the lack of control and physical possession of the data owners. To better streamline this issue, researchers have now focused on designing remote data auditing (RDA) techniques. The majority of these techniques, however, are only applicable for static archive data and are not subject to audit the dynamically updated outsourced data. We propose an effectual RDA technique based on algebraic signature properties for cloud storage system and also present a new data structure capable of efficiently supporting dynamic data operations like append, insert, modify, and delete. Moreover, this data structure empowers our method to be applicable for large-scale data with minimum computation cost. The comparative analysis with the state-of-the-art RDA schemes shows that the proposed scheme is secure and highly efficient in terms of the computation and communication overhead on the auditor and server.}, } @article {pmid25097880, year = {2014}, author = {Khan, S and Shiraz, M and Wahab, AW and Gani, A and Han, Q and Rahman, ZB}, title = {A comprehensive review on adaptability of network forensics frameworks for mobile cloud computing.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {547062}, doi = {10.1155/2014/547062}, pmid = {25097880}, issn = {1537-744X}, mesh = {*Computer Systems ; Forensic Sciences/*methods ; Information Storage and Retrieval/*methods ; }, abstract = {Network forensics enables investigation and identification of network attacks through the retrieved digital content. The proliferation of smartphones and the cost-effective universal data access through cloud has made Mobile Cloud Computing (MCC) a congenital target for network attacks. However, confines in carrying out forensics in MCC is interrelated with the autonomous cloud hosting companies and their policies for restricted access to the digital content in the back-end cloud platforms. It implies that existing Network Forensic Frameworks (NFFs) have limited impact in the MCC paradigm. To this end, we qualitatively analyze the adaptability of existing NFFs when applied to the MCC. Explicitly, the fundamental mechanisms of NFFs are highlighted and then analyzed using the most relevant parameters. A classification is proposed to help understand the anatomy of existing NFFs. Subsequently, a comparison is given that explores the functional similarities and deviations among NFFs. The paper concludes by discussing research challenges for progressive network forensics in MCC.}, } @article {pmid25097872, year = {2014}, author = {Dong, YS and Xu, GC and Fu, XD}, title = {A distributed parallel genetic algorithm of placement strategy for virtual machines deployment on cloud platform.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {259139}, doi = {10.1155/2014/259139}, pmid = {25097872}, issn = {1537-744X}, mesh = {*Algorithms ; Information Storage and Retrieval/*methods ; }, abstract = {The cloud platform provides various services to users. More and more cloud centers provide infrastructure as the main way of operating. To improve the utilization rate of the cloud center and to decrease the operating cost, the cloud center provides services according to requirements of users by sharding the resources with virtualization. Considering both QoS for users and cost saving for cloud computing providers, we try to maximize performance and minimize energy cost as well. In this paper, we propose a distributed parallel genetic algorithm (DPGA) of placement strategy for virtual machines deployment on cloud platform. It executes the genetic algorithm parallelly and distributedly on several selected physical hosts in the first stage. Then it continues to execute the genetic algorithm of the second stage with solutions obtained from the first stage as the initial population. The solution calculated by the genetic algorithm of the second stage is the optimal one of the proposed approach. The experimental results show that the proposed placement strategy of VM deployment can ensure QoS for users and it is more effective and more energy efficient than other placement strategies on the cloud platform.}, } @article {pmid25097865, year = {2014}, author = {Tian, F and Gui, X and An, J and Yang, P and Zhao, J and Zhang, X}, title = {Protecting location privacy for outsourced spatial data in cloud storage.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {108072}, doi = {10.1155/2014/108072}, pmid = {25097865}, issn = {1537-744X}, mesh = {Algorithms ; *Computer Security ; Information Storage and Retrieval/*methods ; }, abstract = {As cloud computing services and location-aware devices are fully developed, a large amount of spatial data needs to be outsourced to the cloud storage provider, so the research on privacy protection for outsourced spatial data gets increasing attention from academia and industry. As a kind of spatial transformation method, Hilbert curve is widely used to protect the location privacy for spatial data. But sufficient security analysis for standard Hilbert curve (SHC) is seldom proceeded. In this paper, we propose an index modification method for SHC (SHC(∗)) and a density-based space filling curve (DSC) to improve the security of SHC; they can partially violate the distance-preserving property of SHC, so as to achieve better security. We formally define the indistinguishability and attack model for measuring the privacy disclosure risk of spatial transformation methods. The evaluation results indicate that SHC(∗) and DSC are more secure than SHC, and DSC achieves the best index generation performance.}, } @article {pmid25093693, year = {2014}, author = {Tošner, Z and Andersen, R and Stevensson, B and Edén, M and Nielsen, NC and Vosegaard, T}, title = {Computer-intensive simulation of solid-state NMR experiments using SIMPSON.}, journal = {Journal of magnetic resonance (San Diego, Calif. : 1997)}, volume = {246}, number = {}, pages = {79-93}, doi = {10.1016/j.jmr.2014.07.002}, pmid = {25093693}, issn = {1096-0856}, abstract = {Conducting large-scale solid-state NMR simulations requires fast computer software potentially in combination with efficient computational resources to complete within a reasonable time frame. Such simulations may involve large spin systems, multiple-parameter fitting of experimental spectra, or multiple-pulse experiment design using parameter scan, non-linear optimization, or optimal control procedures. To efficiently accommodate such simulations, we here present an improved version of the widely distributed open-source SIMPSON NMR simulation software package adapted to contemporary high performance hardware setups. The software is optimized for fast performance on standard stand-alone computers, multi-core processors, and large clusters of identical nodes. We describe the novel features for fast computation including internal matrix manipulations, propagator setups and acquisition strategies. For efficient calculation of powder averages, we implemented interpolation method of Alderman, Solum, and Grant, as well as recently introduced fast Wigner transform interpolation technique. The potential of the optimal control toolbox is greatly enhanced by higher precision gradients in combination with the efficient optimization algorithm known as limited memory Broyden-Fletcher-Goldfarb-Shanno. In addition, advanced parallelization can be used in all types of calculations, providing significant time reductions. SIMPSON is thus reflecting current knowledge in the field of numerical simulations of solid-state NMR experiments. The efficiency and novel features are demonstrated on the representative simulations.}, } @article {pmid25093343, year = {2014}, author = {Cubo, J and Nieto, A and Pimentel, E}, title = {A cloud-based Internet of Things platform for ambient assisted living.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {8}, pages = {14070-14105}, pmid = {25093343}, issn = {1424-8220}, mesh = {*Artificial Intelligence ; *Computer Systems ; *Internet ; *Software ; }, abstract = {A common feature of ambient intelligence is that many objects are inter-connected and act in unison, which is also a challenge in the Internet of Things. There has been a shift in research towards integrating both concepts, considering the Internet of Things as representing the future of computing and communications. However, the efficient combination and management of heterogeneous things or devices in the ambient intelligence domain is still a tedious task, and it presents crucial challenges. Therefore, to appropriately manage the inter-connection of diverse devices in these systems requires: (1) specifying and efficiently implementing the devices (e.g., as services); (2) handling and verifying their heterogeneity and composition; and (3) standardizing and managing their data, so as to tackle large numbers of systems together, avoiding standalone applications on local servers. To overcome these challenges, this paper proposes a platform to manage the integration and behavior-aware orchestration of heterogeneous devices as services, stored and accessed via the cloud, with the following contributions: (i) we describe a lightweight model to specify the behavior of devices, to determine the order of the sequence of exchanged messages during the composition of devices; (ii) we define a common architecture using a service-oriented standard environment, to integrate heterogeneous devices by means of their interfaces, via a gateway, and to orchestrate them according to their behavior; (iii) we design a framework based on cloud computing technology, connecting the gateway in charge of acquiring the data from the devices with a cloud platform, to remotely access and monitor the data at run-time and react to emergency situations; and (iv) we implement and generate a novel cloud-based IoT platform of behavior-aware devices as services for ambient intelligence systems, validating the whole approach in real scenarios related to a specific ambient assisted living application.}, } @article {pmid25068736, year = {2014}, author = {Freeman, J and Vladimirov, N and Kawashima, T and Mu, Y and Sofroniew, NJ and Bennett, DV and Rosen, J and Yang, CT and Looger, LL and Ahrens, MB}, title = {Mapping brain activity at scale with cluster computing.}, journal = {Nature methods}, volume = {11}, number = {9}, pages = {941-950}, pmid = {25068736}, issn = {1548-7105}, support = {//Howard Hughes Medical Institute/United States ; }, mesh = {Action Potentials/*physiology ; Animals ; Brain/physiology ; Brain Mapping/*methods ; Computer Simulation ; Computing Methodologies ; Data Interpretation, Statistical ; Database Management Systems ; Databases, Factual ; Humans ; Information Storage and Retrieval/*methods ; *Models, Neurological ; Nerve Net/*physiology ; Neurons/*physiology ; Programming Languages ; *Software ; }, abstract = {Understanding brain function requires monitoring and interpreting the activity of large networks of neurons during behavior. Advances in recording technology are greatly increasing the size and complexity of neural data. Analyzing such data will pose a fundamental bottleneck for neuroscience. We present a library of analytical tools called Thunder built on the open-source Apache Spark platform for large-scale distributed computing. The library implements a variety of univariate and multivariate analyses with a modular, extendable structure well-suited to interactive exploration and analysis development. We demonstrate how these analyses find structure in large-scale neural data, including whole-brain light-sheet imaging data from fictively behaving larval zebrafish, and two-photon imaging data from behaving mouse. The analyses relate neuronal responses to sensory input and behavior, run in minutes or less and can be used on a private cluster or in the cloud. Our open-source framework thus holds promise for turning brain activity mapping efforts into biological insights.}, } @article {pmid25055368, year = {2013}, author = {Bahga, A and Madisetti, VK}, title = {A cloud-based approach for interoperable electronic health records (EHRs).}, journal = {IEEE journal of biomedical and health informatics}, volume = {17}, number = {5}, pages = {894-906}, doi = {10.1109/JBHI.2013.2257818}, pmid = {25055368}, issn = {2168-2208}, mesh = {*Electronic Health Records ; Humans ; Information Storage and Retrieval ; *Internet ; *Medical Informatics Applications ; }, abstract = {We present a cloud-based approach for the design of interoperable electronic health record (EHR) systems. Cloud computing environments provide several benefits to all the stakeholders in the healthcare ecosystem (patients, providers, payers, etc.). Lack of data interoperability standards and solutions has been a major obstacle in the exchange of healthcare data between different stakeholders. We propose an EHR system - cloud health information systems technology architecture (CHISTAR) that achieves semantic interoperability through the use of a generic design methodology which uses a reference model that defines a general purpose set of data structures and an archetype model that defines the clinical data attributes. CHISTAR application components are designed using the cloud component model approach that comprises of loosely coupled components that communicate asynchronously. In this paper, we describe the high-level design of CHISTAR and the approaches for semantic interoperability, data integration, and security.}, } @article {pmid25050811, year = {2014}, author = {Shanahan, HP and Owen, AM and Harrison, AP}, title = {Bioinformatics on the cloud computing platform Azure.}, journal = {PloS one}, volume = {9}, number = {7}, pages = {e102642}, pmid = {25050811}, issn = {1932-6203}, mesh = {Computational Biology ; Databases, Genetic ; Humans ; Internet ; Oligonucleotide Array Sequence Analysis ; *Software ; }, abstract = {We discuss the applicability of the Microsoft cloud computing platform, Azure, for bioinformatics. We focus on the usability of the resource rather than its performance. We provide an example of how R can be used on Azure to analyse a large amount of microarray expression data deposited at the public database ArrayExpress. We provide a walk through to demonstrate explicitly how Azure can be used to perform these analyses in Appendix S1 and we offer a comparison with a local computation. We note that the use of the Platform as a Service (PaaS) offering of Azure can represent a steep learning curve for bioinformatics developers who will usually have a Linux and scripting language background. On the other hand, the presence of an additional set of libraries makes it easier to deploy software in a parallel (scalable) fashion and explicitly manage such a production run with only a few hundred lines of code, most of which can be incorporated from a template. We propose that this environment is best suited for running stable bioinformatics software by users not involved with its development.}, } @article {pmid25032243, year = {2014}, author = {Whaiduzzaman, M and Haque, MN and Rejaul Karim Chowdhury, M and Gani, A}, title = {A study on strategic provisioning of cloud computing services.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {894362}, pmid = {25032243}, issn = {1537-744X}, mesh = {Information Storage and Retrieval/*methods/standards ; Internet/standards ; }, abstract = {Cloud computing is currently emerging as an ever-changing, growing paradigm that models "everything-as-a-service." Virtualised physical resources, infrastructure, and applications are supplied by service provisioning in the cloud. The evolution in the adoption of cloud computing is driven by clear and distinct promising features for both cloud users and cloud providers. However, the increasing number of cloud providers and the variety of service offerings have made it difficult for the customers to choose the best services. By employing successful service provisioning, the essential services required by customers, such as agility and availability, pricing, security and trust, and user metrics can be guaranteed by service provisioning. Hence, continuous service provisioning that satisfies the user requirements is a mandatory feature for the cloud user and vitally important in cloud computing service offerings. Therefore, we aim to review the state-of-the-art service provisioning objectives, essential services, topologies, user requirements, necessary metrics, and pricing mechanisms. We synthesize and summarize different provision techniques, approaches, and models through a comprehensive literature review. A thematic taxonomy of cloud service provisioning is presented after the systematic review. Finally, future research directions and open research issues are identified.}, } @article {pmid25015761, year = {2014}, author = {Yao, Q and Han, X and Ma, XK and Xue, YF and Chen, YJ and Li, JS}, title = {Cloud-based hospital information system as a service for grassroots healthcare institutions.}, journal = {Journal of medical systems}, volume = {38}, number = {9}, pages = {104}, pmid = {25015761}, issn = {1573-689X}, mesh = {China ; Computer Systems ; *Hospital Information Systems ; Hospitals, Community/*organization & administration ; Information Storage and Retrieval/*methods ; *Internet ; Organizational Case Studies ; }, abstract = {Grassroots healthcare institutions (GHIs) are the smallest administrative levels of medical institutions, where most patients access health services. The latest report from the National Bureau of Statistics of China showed that 96.04 % of 950,297 medical institutions in China were at the grassroots level in 2012, including county-level hospitals, township central hospitals, community health service centers, and rural clinics. In developing countries, these institutions are facing challenges involving a shortage of funds and talent, inconsistent medical standards, inefficient information sharing, and difficulties in management during the adoption of health information technologies (HIT). Because of the necessity and gravity for GHIs, our aim is to provide hospital information services for GHIs using Cloud computing technologies and service modes. In this medical scenario, the computing resources are pooled by means of a Cloud-based Virtual Desktop Infrastructure (VDI) to serve multiple GHIs, with different hospital information systems dynamically assigned and reassigned according to demand. This paper is concerned with establishing a Cloud-based Hospital Information Service Center to provide hospital information software as a service (HI-SaaS) with the aim of providing GHIs with an attractive and high-performance medical information service. Compared with individually establishing all hospital information systems, this approach is more cost-effective and affordable for GHIs and does not compromise HIT performance.}, } @article {pmid25014943, year = {2014}, author = {Abbas, A and Khan, SU}, title = {A review on the state-of-the-art privacy-preserving approaches in the e-health clouds.}, journal = {IEEE journal of biomedical and health informatics}, volume = {18}, number = {4}, pages = {1431-1441}, doi = {10.1109/JBHI.2014.2300846}, pmid = {25014943}, issn = {2168-2208}, mesh = {*Computer Security ; *Confidentiality ; *Electronic Health Records ; Humans ; *Internet ; Medical Informatics Computing ; }, abstract = {Cloud computing is emerging as a new computing paradigm in the healthcare sector besides other business domains. Large numbers of health organizations have started shifting the electronic health information to the cloud environment. Introducing the cloud services in the health sector not only facilitates the exchange of electronic medical records among the hospitals and clinics, but also enables the cloud to act as a medical record storage center. Moreover, shifting to the cloud environment relieves the healthcare organizations of the tedious tasks of infrastructure management and also minimizes development and maintenance costs. Nonetheless, storing the patient health data in the third-party servers also entails serious threats to data privacy. Because of probable disclosure of medical records stored and exchanged in the cloud, the patients' privacy concerns should essentially be considered when designing the security and privacy mechanisms. Various approaches have been used to preserve the privacy of the health information in the cloud environment. This survey aims to encompass the state-of-the-art privacy-preserving approaches employed in the e-Health clouds. Moreover, the privacy-preserving approaches are classified into cryptographic and noncryptographic approaches and taxonomy of the approaches is also presented. Furthermore, the strengths and weaknesses of the presented approaches are reported and some open issues are highlighted.}, } @article {pmid25000061, year = {2014}, author = {Crişan-Vida, M and Serban, A and Ghihor-Izdrăilă, I and Mirea, A and Stoicu-Tivadar, L}, title = {User friendly IT Services for Monitoring and Prevention during Pregnancy.}, journal = {Studies in health technology and informatics}, volume = {202}, number = {}, pages = {241-244}, pmid = {25000061}, issn = {1879-8365}, mesh = {Data Mining/methods ; Electronic Health Records/*organization & administration ; Female ; Humans ; *Medical Records ; Mobile Applications ; Pregnancy ; Pregnancy Complications/diagnosis/*prevention & control ; Remote Consultation/*organization & administration ; *Smartphone ; *Social Support ; }, abstract = {A healthy lifestyle for a mother and monitoring both mother and fetus activities are crucial factors for a normal pregnancy without hazardous conditions. This paper proposes a cloud computing solution and a mobile application which collect data from the sensors to be used in Obstetrics-Gynecology Department. This application monitors the dietary plan of the pregnant and gives her the possibility to socialize and share pregnancy experience with the rest of women from the social network from the hospital. The physicians can access the information's of the patient in real time and they can alert mothers in some situations. Using this cloud computing device, the health condition of the pregnant women may be improved.}, } @article {pmid25000049, year = {2014}, author = {Koumaditis, K and Themistocleous, M and Vassilacopoulos, G and Prentza, A and Kyriazis, D and Malamateniou, F and Maglaveras, N and Chouvarda, I and Mourouzis, A}, title = {Patient-Centered e-Health Record over the Cloud.}, journal = {Studies in health technology and informatics}, volume = {202}, number = {}, pages = {193-196}, pmid = {25000049}, issn = {1879-8365}, mesh = {*Cloud Computing ; Electronic Health Records/*organization & administration ; Greece ; *Models, Organizational ; Organizational Objectives ; *Patient Portals ; Patient-Centered Care/*organization & administration ; Telemedicine/*organization & administration ; }, abstract = {The purpose of this paper is to introduce the Patient-Centered e-Health (PCEH) conceptual aspects alongside a multidisciplinary project that combines state-of-the-art technologies like cloud computing. The project, by combining several aspects of PCEH, such as: (a) electronic Personal Healthcare Record (e-PHR), (b) homecare telemedicine technologies, (c) e-prescribing, e-referral, e-learning, with advanced technologies like cloud computing and Service Oriented Architecture (SOA), will lead to an innovative integrated e-health platform of many benefits to the society, the economy, the industry, and the research community. To achieve this, a consortium of experts, both from industry (two companies, one hospital and one healthcare organization) and academia (three universities), was set to investigate, analyse, design, build and test the new platform. This paper provides insights to the PCEH concept and to the current stage of the project. In doing so, we aim at increasing the awareness of this important endeavor and sharing the lessons learned so far throughout our work.}, } @article {pmid25000009, year = {2014}, author = {Poulymenopoulou, M and Malamateniou, F and Vassilacopoulos, G}, title = {Machine Learning for Knowledge Extraction from PHR Big Data.}, journal = {Studies in health technology and informatics}, volume = {202}, number = {}, pages = {36-39}, pmid = {25000009}, issn = {1879-8365}, mesh = {*Cloud Computing ; Data Mining/*methods ; *Datasets as Topic ; Electronic Health Records/*organization & administration ; Health Records, Personal ; Knowledge Management ; *Machine Learning ; *Natural Language Processing ; Pattern Recognition, Automated/methods ; }, abstract = {Cloud computing, Internet of things (IOT) and NoSQL database technologies can support a new generation of cloud-based PHR services that contain heterogeneous (unstructured, semi-structured and structured) patient data (health, social and lifestyle) from various sources, including automatically transmitted data from Internet connected devices of patient living space (e.g. medical devices connected to patients at home care). The patient data stored in such PHR systems constitute big data whose analysis with the use of appropriate machine learning algorithms is expected to improve diagnosis and treatment accuracy, to cut healthcare costs and, hence, to improve the overall quality and efficiency of healthcare provided. This paper describes a health data analytics engine which uses machine learning algorithms for analyzing cloud based PHR big health data towards knowledge extraction to support better healthcare delivery as regards disease diagnosis and prognosis. This engine comprises of the data preparation, the model generation and the data analysis modules and runs on the cloud taking advantage from the map/reduce paradigm provided by Apache Hadoop.}, } @article {pmid24989862, year = {2014}, author = {Thrasher, A and Musgrave, Z and Kachmarck, B and Thain, D and Emrich, S}, title = {Scaling up genome annotation using MAKER and work queue.}, journal = {International journal of bioinformatics research and applications}, volume = {10}, number = {4-5}, pages = {447-460}, doi = {10.1504/IJBRA.2014.062994}, pmid = {24989862}, issn = {1744-5485}, mesh = {Algorithms ; Animals ; Anopheles/genetics ; Caenorhabditis/genetics ; Cluster Analysis ; Computational Biology/*methods ; Computer Systems ; *Genome ; High-Throughput Nucleotide Sequencing/*methods ; Software ; Tsetse Flies/genetics ; }, abstract = {Next generation sequencing technologies have enabled sequencing many genomes. Because of the overall increasing demand and the inherent parallelism available in many required analyses, these bioinformatics applications should ideally run on clusters, clouds and/or grids. We present a modified annotation framework that achieves a speed-up of 45x using 50 workers using a Caenorhabditis japonica test case. We also evaluate these modifications within the Amazon EC2 cloud framework. The underlying genome annotation (MAKER) is parallelised as an MPI application. Our framework enables it to now run without MPI while utilising a wide variety of distributed computing resources. This parallel framework also allows easy explicit data transfer, which helps overcome a major limitation of bioinformatics tools that often rely on shared file systems. Combined, our proposed framework can be used, even during early stages of development, to easily run sequence analysis tools on clusters, grids and clouds.}, } @article {pmid24987731, year = {2014}, author = {Ning, ZH and Shen, CX and Zhao, Y and Liang, P}, title = {Trusted measurement model based on multitenant behaviors.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {384967}, doi = {10.1155/2014/384967}, pmid = {24987731}, issn = {1537-744X}, mesh = {Algorithms ; *Models, Theoretical ; }, abstract = {With a fast growing pervasive computing, especially cloud computing, the behaviour measurement is at the core and plays a vital role. A new behaviour measurement tailored for Multitenants in cloud computing is needed urgently to fundamentally establish trust relationship. Based on our previous research, we propose an improved trust relationship scheme which captures the world of cloud computing where multitenants share the same physical computing platform. Here, we first present the related work on multitenant behaviour; secondly, we give the scheme of behaviour measurement where decoupling of multitenants is taken into account; thirdly, we explicitly explain our decoupling algorithm for multitenants; fourthly, we introduce a new way of similarity calculation for deviation control, which fits the coupled multitenants under study well; lastly, we design the experiments to test our scheme.}, } @article {pmid24982971, year = {2014}, author = {Zhong, L and Tang, K and Li, L and Yang, G and Ye, J}, title = {An improved clustering algorithm of tunnel monitoring data for cloud computing.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {630986}, doi = {10.1155/2014/630986}, pmid = {24982971}, issn = {1537-744X}, mesh = {*Algorithms ; *Cluster Analysis ; *Models, Theoretical ; }, abstract = {With the rapid development of urban construction, the number of urban tunnels is increasing and the data they produce become more and more complex. It results in the fact that the traditional clustering algorithm cannot handle the mass data of the tunnel. To solve this problem, an improved parallel clustering algorithm based on k-means has been proposed. It is a clustering algorithm using the MapReduce within cloud computing that deals with data. It not only has the advantage of being used to deal with mass data but also is more efficient. Moreover, it is able to compute the average dissimilarity degree of each cluster in order to clean the abnormal data.}, } @article {pmid24982949, year = {2014}, author = {Sun, Y and Wen, Q and Zhang, Y and Zhang, H and Jin, Z and Li, W}, title = {Two-cloud-servers-assisted secure outsourcing multiparty computation.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {413265}, doi = {10.1155/2014/413265}, pmid = {24982949}, issn = {1537-744X}, mesh = {*Algorithms ; *Computer Security ; Outsourced Services ; }, abstract = {We focus on how to securely outsource computation task to the cloud and propose a secure outsourcing multiparty computation protocol on lattice-based encrypted data in two-cloud-servers scenario. Our main idea is to transform the outsourced data respectively encrypted by different users' public keys to the ones that are encrypted by the same two private keys of the two assisted servers so that it is feasible to operate on the transformed ciphertexts to compute an encrypted result following the function to be computed. In order to keep the privacy of the result, the two servers cooperatively produce a custom-made result for each user that is authorized to get the result so that all authorized users can recover the desired result while other unauthorized ones including the two servers cannot. Compared with previous research, our protocol is completely noninteractive between any users, and both of the computation and the communication complexities of each user in our solution are independent of the computing function.}, } @article {pmid24982428, year = {2014}, author = {Gafni, E and Luquette, LJ and Lancaster, AK and Hawkins, JB and Jung, JY and Souilmi, Y and Wall, DP and Tonellato, PJ}, title = {COSMOS: Python library for massively parallel workflows.}, journal = {Bioinformatics (Oxford, England)}, volume = {30}, number = {20}, pages = {2956-2958}, pmid = {24982428}, issn = {1367-4811}, support = {T15 LM007092/LM/NLM NIH HHS/United States ; 1R01LM011566/LM/NLM NIH HHS/United States ; 5T15LM007092/LM/NLM NIH HHS/United States ; R01 LM011566/LM/NLM NIH HHS/United States ; 1R01MH090611-01A1/MH/NIMH NIH HHS/United States ; }, mesh = {Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; *Programming Languages ; }, abstract = {SUMMARY: Efficient workflows to shepherd clinically generated genomic data through the multiple stages of a next-generation sequencing pipeline are of critical importance in translational biomedical science. Here we present COSMOS, a Python library for workflow management that allows formal description of pipelines and partitioning of jobs. In addition, it includes a user interface for tracking the progress of jobs, abstraction of the queuing system and fine-grained control over the workflow. Workflows can be created on traditional computing clusters as well as cloud-based services.

Source code is available for academic non-commercial research purposes. Links to code and documentation are provided at http://lpm.hms.harvard.edu and http://wall-lab.stanford.edu.

CONTACT: dpwall@stanford.edu or peter_tonellato@hms.harvard.edu.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid24980620, year = {2014}, author = {Flanagan, K and Cockell, S and Harwood, C and Hallinan, J and Nakjang, S and Lawry, B and Wipat, A}, title = {A distributed computational search strategy for the identification of diagnostics targets: application to finding aptamer targets for methicillin-resistant staphylococci.}, journal = {Journal of integrative bioinformatics}, volume = {11}, number = {2}, pages = {242}, doi = {10.2390/biecoll-jib-2014-242}, pmid = {24980620}, issn = {1613-4516}, support = {BBS/B/13799/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {Algorithms ; Automation ; Bacterial Proteins/genetics ; Computational Biology/*methods ; Computer Communication Networks ; Computer Systems ; DNA/chemistry ; Epitopes/chemistry ; Genome, Bacterial ; Ligands ; Methicillin-Resistant Staphylococcus aureus/drug effects/*genetics ; Peptides/chemistry ; RNA/chemistry ; Staphylococcal Infections/diagnosis/*microbiology ; }, abstract = {The rapid and cost-effective identification of bacterial species is crucial, especially for clinical diagnosis and treatment. Peptide aptamers have been shown to be valuable for use as a component of novel, direct detection methods. These small peptides have a number of advantages over antibodies, including greater specificity and longer shelf life. These properties facilitate their use as the detector components of biosensor devices. However, the identification of suitable aptamer targets for particular groups of organisms is challenging. We present a semi-automated processing pipeline for the identification of candidate aptamer targets from whole bacterial genome sequences. The pipeline can be configured to search for protein sequence fragments that uniquely identify a set of strains of interest. The system is also capable of identifying additional organisms that may be of interest due to their possession of protein fragments in common with the initial set. Through the use of Cloud computing technology and distributed databases, our system is capable of scaling with the rapidly growing genome repositories, and consequently of keeping the resulting data sets up-to-date. The system described is also more generically applicable to the discovery of specific targets for other diagnostic approaches such as DNA probes, PCR primers and antibodies.}, } @article {pmid24971059, year = {2014}, author = {Rautenberg, PL and Kumaraswamy, A and Tejero-Cantero, A and Doblander, C and Norouzian, MR and Kai, K and Jacobsen, HA and Ai, H and Wachtler, T and Ikeno, H}, title = {NeuronDepot: keeping your colleagues in sync by combining modern cloud storage services, the local file system, and simple web applications.}, journal = {Frontiers in neuroinformatics}, volume = {8}, number = {}, pages = {55}, pmid = {24971059}, issn = {1662-5196}, abstract = {Neuroscience today deals with a "data deluge" derived from the availability of high-throughput sensors of brain structure and brain activity, and increased computational resources for detailed simulations with complex output. We report here (1) a novel approach to data sharing between collaborating scientists that brings together file system tools and cloud technologies, (2) a service implementing this approach, called NeuronDepot, and (3) an example application of the service to a complex use case in the neurosciences. The main drivers for our approach are to facilitate collaborations with a transparent, automated data flow that shields scientists from having to learn new tools or data structuring paradigms. Using NeuronDepot is simple: one-time data assignment from the originator and cloud based syncing-thus making experimental and modeling data available across the collaboration with minimum overhead. Since data sharing is cloud based, our approach opens up the possibility of using new software developments and hardware scalabitliy which are associated with elastic cloud computing. We provide an implementation that relies on existing synchronization services and is usable from all devices via a reactive web interface. We are motivating our solution by solving the practical problems of the GinJang project, a collaboration of three universities across eight time zones with a complex workflow encompassing data from electrophysiological recordings, imaging, morphological reconstructions, and simulations.}, } @article {pmid24961214, year = {2014}, author = {Fekr, AR and Janidarmian, M and Radecka, K and Zilic, Z}, title = {A medical cloud-based platform for respiration rate measurement and hierarchical classification of breath disorders.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {6}, pages = {11204-11224}, pmid = {24961214}, issn = {1424-8220}, mesh = {Accelerometry/instrumentation/*methods ; Artificial Intelligence ; Diagnosis, Computer-Assisted/instrumentation/*methods ; Humans ; *Internet ; Pattern Recognition, Automated/methods ; Reproducibility of Results ; Respiration Disorders/*diagnosis/physiopathology ; *Respiratory Rate ; Sensitivity and Specificity ; Telemedicine/*instrumentation/*methods ; }, abstract = {The measurement of human respiratory signals is crucial in cyberbiological systems. A disordered breathing pattern can be the first symptom of different physiological, mechanical, or psychological dysfunctions. Therefore, a real-time monitoring of the respiration patterns, as well as respiration rate is a critical need in medical applications. There are several methods for respiration rate measurement. However, despite their accuracy, these methods are expensive and could not be integrated in a body sensor network. In this work, we present a real-time cloud-based platform for both monitoring the respiration rate and breath pattern classification, remotely. The proposed system is designed particularly for patients with breathing problems (e.g., respiratory complications after surgery) or sleep disorders. Our system includes calibrated accelerometer sensor, Bluetooth Low Energy (BLE) and cloud-computing model. We also suggest a procedure to improve the accuracy of respiration rate for patients at rest positions. The overall error in the respiration rate calculation is obtained 0.53% considering SPR-BTA spirometer as the reference. Five types of respiration disorders, Bradapnea, Tachypnea, Cheyn-stokes, Kaussmal, and Biot's breathing are classified based on hierarchical Support Vector Machine (SVM) with seven different features. We have evaluated the performance of the proposed classification while it is individualized to every subject (case 1) as well as considering all subjects (case 2). Since the selection of kernel function is a key factor to decide SVM's performance, in this paper three different kernel functions are evaluated. The experiments are conducted with 11 subjects and the average accuracy of 94.52% for case 1 and the accuracy of 81.29% for case 2 are achieved based on Radial Basis Function (RBF). Finally, a performance evaluation has been done for normal and impaired subjects considering sensitivity, specificity and G-mean parameters of different kernel functions.}, } @article {pmid24959631, year = {2014}, author = {Xie, K and Yang, Y and Zhang, L and Jing, M and Xin, Y and Li, Z}, title = {Global detection of live virtual machine migration based on cellular neural networks.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {829614}, pmid = {24959631}, issn = {1537-744X}, mesh = {Algorithms ; *Artificial Intelligence ; *Neural Networks, Computer ; }, abstract = {In order to meet the demands of operation monitoring of large scale, autoscaling, and heterogeneous virtual resources in the existing cloud computing, a new method of live virtual machine (VM) migration detection algorithm based on the cellular neural networks (CNNs), is presented. Through analyzing the detection process, the parameter relationship of CNN is mapped as an optimization problem, in which improved particle swarm optimization algorithm based on bubble sort is used to solve the problem. Experimental results demonstrate that the proposed method can display the VM migration processing intuitively. Compared with the best fit heuristic algorithm, this approach reduces the processing time, and emerging evidence has indicated that this new approach is affordable to parallelism and analog very large scale integration (VLSI) implementation allowing the VM migration detection to be performed better.}, } @article {pmid24957398, year = {2014}, author = {Ratnam, KA and Dominic, PD and Ramayah, T}, title = {A structural equation modeling approach for the adoption of cloud computing to enhance the Malaysian healthcare sector.}, journal = {Journal of medical systems}, volume = {38}, number = {8}, pages = {82}, pmid = {24957398}, issn = {1573-689X}, mesh = {Computer Security ; Cooperative Behavior ; Efficiency, Organizational ; Health Care Sector/*organization & administration ; Information Management/*organization & administration ; *Internet ; Malaysia ; Models, Theoretical ; *Systems Integration ; }, abstract = {The investments and costs of infrastructure, communication, medical-related equipments, and software within the global healthcare ecosystem portray a rather significant increase. The emergence of this proliferation is then expected to grow. As a result, information and cross-system communication became challenging due to the detached independent systems and subsystems which are not connected. The overall model fit expending over a sample size of 320 were tested with structural equation modelling (SEM) using AMOS 20.0 as the modelling tool. SPSS 20.0 is used to analyse the descriptive statistics and dimension reliability. Results of the study show that system utilisation and system impact dimension influences the overall level of services of the healthcare providers. In addition to that, the findings also suggest that systems integration and security plays a pivotal role for IT resources in healthcare organisations. Through this study, a basis for investigation on the need to improvise the Malaysian healthcare ecosystem and the introduction of a cloud computing platform to host the national healthcare information exchange has been successfully established.}, } @article {pmid24948997, year = {2014}, author = {Bajwa, M}, title = {Emerging 21(st) Century Medical Technologies.}, journal = {Pakistan journal of medical sciences}, volume = {30}, number = {3}, pages = {649-655}, pmid = {24948997}, issn = {1682-024X}, abstract = {Although several medical technologies have been around since decades and are in the continuous process of development, some latest technologies are changing the way medicine would be practiced in the future. These technologies would allow medical practice from anywhere, any time and from any device. These include smart phones, Tablet PCs, Touch screens, digital ink, voice recognition, Electronic Health Records (EHRs), Health Information Exchange (HIE), Nationwide Health Information Network (NwHIN), Personal Health Records (PHRs), patient portals, Nanomedicine, genome-based personalized medicine, Geographical Positioning System (GPS), Radiofrequency Identification (RFID), Telemedicine, clinical decision support (CDS), mobile home healthcare, cloud computing, and social media, to name a few significant.}, } @article {pmid24943526, year = {2014}, author = {Kuo, MH and Borycki, E and Kushniruk, A and Huang, YM and Hung, SH}, title = {A cloud computing based platform for sleep behavior and chronic diseases collaborative research.}, journal = {Studies in health technology and informatics}, volume = {201}, number = {}, pages = {63-70}, pmid = {24943526}, issn = {1879-8365}, mesh = {Biomedical Research/*organization & administration ; Chronic Disease/*classification ; Cooperative Behavior ; Electronic Health Records/*organization & administration ; Humans ; Information Storage and Retrieval/methods ; Internet/*organization & administration ; Polysomnography/*methods ; *Sleep Stages ; Sleep Wake Disorders/diagnosis/*physiopathology ; Telemedicine/organization & administration ; }, abstract = {The objective of this study is to propose a Cloud Computing based platform for sleep behavior and chronic disease collaborative research. The platform consists of two main components: (1) a sensing bed sheet with textile sensors to automatically record patient's sleep behaviors and vital signs, and (2) a service-oriented cloud computing architecture (SOCCA) that provides a data repository and allows for sharing and analysis of collected data. Also, we describe our systematic approach to implementing the SOCCA. We believe that the new cloud-based platform can provide nurse and other health professional researchers located in differing geographic locations with a cost effective, flexible, secure and privacy-preserved research environment.}, } @article {pmid24941546, year = {2014}, author = {Chow, F and Muftu, A and Shorter, R}, title = {Virtualization and cloud computing in dentistry.}, journal = {Journal of the Massachusetts Dental Society}, volume = {63}, number = {1}, pages = {14-17}, pmid = {24941546}, issn = {0025-4800}, mesh = {Computer Security ; Computer Systems ; Electronic Health Records ; *Health Information Management ; Health Insurance Portability and Accountability Act ; Humans ; Information Storage and Retrieval ; *Internet ; *Practice Management, Dental ; *Software ; United States ; *User-Computer Interface ; }, abstract = {The use of virtualization and cloud computing has changed the way we use computers. Virtualization is a method of placing software called a hypervisor on the hardware of a computer or a host operating system. It allows a guest operating system to run on top of the physical computer with a virtual machine (i.e., virtual computer). Virtualization allows multiple virtual computers to run on top of one physical computer and to share its hardware resources, such as printers, scanners, and modems. This increases the efficient use of the computer by decreasing costs (e.g., hardware, electricity administration, and management) since only one physical computer is needed and running. This virtualization platform is the basis for cloud computing. It has expanded into areas of server and storage virtualization. One of the commonly used dental storage systems is cloud storage. Patient information is encrypted as required by the Health Insurance Portability and Accountability Act (HIPAA) and stored on off-site private cloud services for a monthly service fee. As computer costs continue to increase, so too will the need for more storage and processing power. Virtual and cloud computing will be a method for dentists to minimize costs and maximize computer efficiency in the near future. This article will provide some useful information on current uses of cloud computing.}, } @article {pmid24932699, year = {2014}, author = {Chen, GH and Chen, WY and Yen, YC and Wang, CW and Chang, HT and Chen, CF}, title = {Detection of mercury(II) ions using colorimetric gold nanoparticles on paper-based analytical devices.}, journal = {Analytical chemistry}, volume = {86}, number = {14}, pages = {6843-6849}, doi = {10.1021/ac5008688}, pmid = {24932699}, issn = {1520-6882}, mesh = {Cell Phone ; Colorimetry/instrumentation/*methods ; DNA, Single-Stranded ; Gold/chemistry ; Limit of Detection ; Mercury/*analysis ; Metal Nanoparticles/*chemistry ; Microfluidic Analytical Techniques ; Paper ; Rivers ; Thymine/chemistry ; Water/analysis/chemistry ; Water Pollutants, Chemical/analysis ; }, abstract = {An on-field colorimetric sensing strategy employing gold nanoparticles (AuNPs) and a paper-based analytical platform was investigated for mercury ion (Hg(2+)) detection at water sources. By utilizing thymine-Hg(2+)-thymine (T-Hg(2+)-T) coordination chemistry, label-free detection oligonucleotide sequences were attached to unmodified gold nanoparticles to provide rapid mercury ion sensing without complicated and time-consuming thiolated or other costly labeled probe preparation processes. Not only is this strategy's sensing mechanism specific toward Hg(2+), rather than other metal ions, but also the conformational change in the detection oligonucleotide sequences introduces different degrees of AuNP aggregation that causes the color of AuNPs to exhibit a mixture variance. To eliminate the use of sophisticated equipment and minimize the power requirement for data analysis and transmission, the color variance of multiple detection results were transferred and concentrated on cellulose-based paper analytical devices, and the data were subsequently transmitted for the readout and storage of results using cloud computing via a smartphone. As a result, a detection limit of 50 nM for Hg(2+) spiked pond and river water could be achieved. Furthermore, multiple tests could be performed simultaneously with a 40 min turnaround time. These results suggest that the proposed platform possesses the capability for sensitive and high-throughput on-site mercury pollution monitoring in resource-constrained settings.}, } @article {pmid24930141, year = {2014}, author = {Mrozek, D and Małysiak-Mrozek, B and Kłapciński, A}, title = {Cloud4Psi: cloud computing for 3D protein structure similarity searching.}, journal = {Bioinformatics (Oxford, England)}, volume = {30}, number = {19}, pages = {2822-2825}, pmid = {24930141}, issn = {1367-4811}, mesh = {Algorithms ; Computational Biology/*methods ; Computer Systems ; *Protein Conformation ; Proteins/*chemistry ; Software ; }, abstract = {SUMMARY: Popular methods for 3D protein structure similarity searching, especially those that generate high-quality alignments such as Combinatorial Extension (CE) and Flexible structure Alignment by Chaining Aligned fragment pairs allowing Twists (FATCAT) are still time consuming. As a consequence, performing similarity searching against large repositories of structural data requires increased computational resources that are not always available. Cloud computing provides huge amounts of computational power that can be provisioned on a pay-as-you-go basis. We have developed the cloud-based system that allows scaling of the similarity searching process vertically and horizontally. Cloud4Psi (Cloud for Protein Similarity) was tested in the Microsoft Azure cloud environment and provided good, almost linearly proportional acceleration when scaled out onto many computational units.

Cloud4Psi is available as Software as a Service for testing purposes at: http://cloud4psi.cloudapp.net/. For source code and software availability, please visit the Cloud4Psi project home page at http://zti.polsl.pl/dmrozek/science/cloud4psi.htm.}, } @article {pmid24917804, year = {2014}, author = {Zao, JK and Gan, TT and You, CK and Chung, CE and Wang, YT and Rodríguez Méndez, SJ and Mullen, T and Yu, C and Kothe, C and Hsiao, CT and Chu, SL and Shieh, CK and Jung, TP}, title = {Pervasive brain monitoring and data sharing based on multi-tier distributed computing and linked data technology.}, journal = {Frontiers in human neuroscience}, volume = {8}, number = {}, pages = {370}, pmid = {24917804}, issn = {1662-5161}, abstract = {EEG-based Brain-computer interfaces (BCI) are facing basic challenges in real-world applications. The technical difficulties in developing truly wearable BCI systems that are capable of making reliable real-time prediction of users' cognitive states in dynamic real-life situations may seem almost insurmountable at times. Fortunately, recent advances in miniature sensors, wireless communication and distributed computing technologies offered promising ways to bridge these chasms. In this paper, we report an attempt to develop a pervasive on-line EEG-BCI system using state-of-art technologies including multi-tier Fog and Cloud Computing, semantic Linked Data search, and adaptive prediction/classification models. To verify our approach, we implement a pilot system by employing wireless dry-electrode EEG headsets and MEMS motion sensors as the front-end devices, Android mobile phones as the personal user interfaces, compact personal computers as the near-end Fog Servers and the computer clusters hosted by the Taiwan National Center for High-performance Computing (NCHC) as the far-end Cloud Servers. We succeeded in conducting synchronous multi-modal global data streaming in March and then running a multi-player on-line EEG-BCI game in September, 2013. We are currently working with the ARL Translational Neuroscience Branch to use our system in real-life personal stress monitoring and the UCSD Movement Disorder Center to conduct in-home Parkinson's disease patient monitoring experiments. We shall proceed to develop the necessary BCI ontology and introduce automatic semantic annotation and progressive model refinement capability to our system.}, } @article {pmid24913605, year = {2014}, author = {Wilkinson, SR and Almeida, JS}, title = {QMachine: commodity supercomputing in web browsers.}, journal = {BMC bioinformatics}, volume = {15}, number = {}, pages = {176}, pmid = {24913605}, issn = {1471-2105}, support = {5T32CA096520-05/CA/NCI NIH HHS/United States ; 5UL1RR025777-03/RR/NCRR NIH HHS/United States ; }, mesh = {Computational Biology/methods ; Genome ; Genomics/methods ; Humans ; *Software Design ; Streptococcus pneumoniae/genetics ; *Web Browser ; }, abstract = {BACKGROUND: Ongoing advancements in cloud computing provide novel opportunities in scientific computing, especially for distributed workflows. Modern web browsers can now be used as high-performance workstations for querying, processing, and visualizing genomics' "Big Data" from sources like The Cancer Genome Atlas (TCGA) and the International Cancer Genome Consortium (ICGC) without local software installation or configuration. The design of QMachine (QM) was driven by the opportunity to use this pervasive computing model in the context of the Web of Linked Data in Biomedicine.

RESULTS: QM is an open-sourced, publicly available web service that acts as a messaging system for posting tasks and retrieving results over HTTP. The illustrative application described here distributes the analyses of 20 Streptococcus pneumoniae genomes for shared suffixes. Because all analytical and data retrieval tasks are executed by volunteer machines, few server resources are required. Any modern web browser can submit those tasks and/or volunteer to execute them without installing any extra plugins or programs. A client library provides high-level distribution templates including MapReduce. This stark departure from the current reliance on expensive server hardware running "download and install" software has already gathered substantial community interest, as QM received more than 2.2 million API calls from 87 countries in 12 months.

CONCLUSIONS: QM was found adequate to deliver the sort of scalable bioinformatics solutions that computation- and data-intensive workflows require. Paradoxically, the sandboxed execution of code by web browsers was also found to enable them, as compute nodes, to address critical privacy concerns that characterize biomedical environments.}, } @article {pmid24913249, year = {2014}, author = {Britton, D and Lloyd, SL}, title = {How to deal with petabytes of data: the LHC Grid project.}, journal = {Reports on progress in physics. Physical Society (Great Britain)}, volume = {77}, number = {6}, pages = {065902}, doi = {10.1088/0034-4885/77/6/065902}, pmid = {24913249}, issn = {1361-6633}, abstract = {We review the Grid computing system developed by the international community to deal with the petabytes of data coming from the Large Hadron Collider at CERN in Geneva with particular emphasis on the ATLAS experiment and the UK Grid project, GridPP. Although these developments were started over a decade ago, this article explains their continued relevance as part of the 'Big Data' problem and how the Grid has been forerunner of today's cloud computing.}, } @article {pmid24910506, year = {2014}, author = {Yoshida, H and Wu, Y and Cai, W}, title = {Analysis of scalability of high-performance 3D image processing platform for virtual colonoscopy.}, journal = {Proceedings of SPIE--the International Society for Optical Engineering}, volume = {9039}, number = {}, pages = {90390U}, pmid = {24910506}, issn = {0277-786X}, support = {R01 CA131718/CA/NCI NIH HHS/United States ; R01 CA166816/CA/NCI NIH HHS/United States ; }, abstract = {One of the key challenges in three-dimensional (3D) medical imaging is to enable the fast turn-around time, which is often required for interactive or real-time response. This inevitably requires not only high computational power but also high memory bandwidth due to the massive amount of data that need to be processed. For this purpose, we previously developed a software platform for high-performance 3D medical image processing, called HPC 3D-MIP platform, which employs increasingly available and affordable commodity computing systems such as the multicore, cluster, and cloud computing systems. To achieve scalable high-performance computing, the platform employed size-adaptive, distributable block volumes as a core data structure for efficient parallelization of a wide range of 3D-MIP algorithms, supported task scheduling for efficient load distribution and balancing, and consisted of a layered parallel software libraries that allow image processing applications to share the common functionalities. We evaluated the performance of the HPC 3D-MIP platform by applying it to computationally intensive processes in virtual colonoscopy. Experimental results showed a 12-fold performance improvement on a workstation with 12-core CPUs over the original sequential implementation of the processes, indicating the efficiency of the platform. Analysis of performance scalability based on the Amdahl's law for symmetric multicore chips showed the potential of a high performance scalability of the HPC 3D-MIP platform when a larger number of cores is available.}, } @article {pmid24909817, year = {2014}, author = {Jiménez, J and López, AM and Cruz, J and Esteban, FJ and Navas, J and Villoslada, P and Ruiz de Miras, J}, title = {A Web platform for the interactive visualization and analysis of the 3D fractal dimension of MRI data.}, journal = {Journal of biomedical informatics}, volume = {51}, number = {}, pages = {176-190}, doi = {10.1016/j.jbi.2014.05.011}, pmid = {24909817}, issn = {1532-0480}, mesh = {Algorithms ; Brain/*pathology ; Brain Diseases/*pathology ; Fractals ; Humans ; Image Interpretation, Computer-Assisted/methods ; Imaging, Three-Dimensional/*methods ; *Internet ; Pattern Recognition, Automated/*methods ; Reproducibility of Results ; Sensitivity and Specificity ; *Software ; *User-Computer Interface ; }, abstract = {This study presents a Web platform (http://3dfd.ujaen.es) for computing and analyzing the 3D fractal dimension (3DFD) from volumetric data in an efficient, visual and interactive way. The Web platform is specially designed for working with magnetic resonance images (MRIs) of the brain. The program estimates the 3DFD by calculating the 3D box-counting of the entire volume of the brain, and also of its 3D skeleton. All of this is done in a graphical, fast and optimized way by using novel technologies like CUDA and WebGL. The usefulness of the Web platform presented is demonstrated by its application in a case study where an analysis and characterization of groups of 3D MR images is performed for three neurodegenerative diseases: Multiple Sclerosis, Intrauterine Growth Restriction and Alzheimer's disease. To the best of our knowledge, this is the first Web platform that allows the users to calculate, visualize, analyze and compare the 3DFD from MRI images in the cloud.}, } @article {pmid24904400, year = {2014}, author = {Sherif, T and Rioux, P and Rousseau, ME and Kassis, N and Beck, N and Adalat, R and Das, S and Glatard, T and Evans, AC}, title = {CBRAIN: a web-based, distributed computing platform for collaborative neuroimaging research.}, journal = {Frontiers in neuroinformatics}, volume = {8}, number = {}, pages = {54}, pmid = {24904400}, issn = {1662-5196}, abstract = {The Canadian Brain Imaging Research Platform (CBRAIN) is a web-based collaborative research platform developed in response to the challenges raised by data-heavy, compute-intensive neuroimaging research. CBRAIN offers transparent access to remote data sources, distributed computing sites, and an array of processing and visualization tools within a controlled, secure environment. Its web interface is accessible through any modern browser and uses graphical interface idioms to reduce the technical expertise required to perform large-scale computational analyses. CBRAIN's flexible meta-scheduling has allowed the incorporation of a wide range of heterogeneous computing sites, currently including nine national research High Performance Computing (HPC) centers in Canada, one in Korea, one in Germany, and several local research servers. CBRAIN leverages remote computing cycles and facilitates resource-interoperability in a transparent manner for the end-user. Compared with typical grid solutions available, our architecture was designed to be easily extendable and deployed on existing remote computing sites with no tool modification, administrative intervention, or special software/hardware configuration. As October 2013, CBRAIN serves over 200 users spread across 53 cities in 17 countries. The platform is built as a generic framework that can accept data and analysis tools from any discipline. However, its current focus is primarily on neuroimaging research and studies of neurological diseases such as Autism, Parkinson's and Alzheimer's diseases, Multiple Sclerosis as well as on normal brain structure and development. This technical report presents the CBRAIN Platform, its current deployment and usage and future direction.}, } @article {pmid24897343, year = {2014}, author = {Chung, WC and Chen, CC and Ho, JM and Lin, CY and Hsu, WL and Wang, YC and Lee, DT and Lai, F and Huang, CW and Chang, YJ}, title = {CloudDOE: a user-friendly tool for deploying Hadoop clouds and analyzing high-throughput sequencing data with MapReduce.}, journal = {PloS one}, volume = {9}, number = {6}, pages = {e98146}, pmid = {24897343}, issn = {1932-6203}, mesh = {Algorithms ; Computational Biology/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {BACKGROUND: Explosive growth of next-generation sequencing data has resulted in ultra-large-scale data sets and ensuing computational problems. Cloud computing provides an on-demand and scalable environment for large-scale data analysis. Using a MapReduce framework, data and workload can be distributed via a network to computers in the cloud to substantially reduce computational latency. Hadoop/MapReduce has been successfully adopted in bioinformatics for genome assembly, mapping reads to genomes, and finding single nucleotide polymorphisms. Major cloud providers offer Hadoop cloud services to their users. However, it remains technically challenging to deploy a Hadoop cloud for those who prefer to run MapReduce programs in a cluster without built-in Hadoop/MapReduce.

RESULTS: We present CloudDOE, a platform-independent software package implemented in Java. CloudDOE encapsulates technical details behind a user-friendly graphical interface, thus liberating scientists from having to perform complicated operational procedures. Users are guided through the user interface to deploy a Hadoop cloud within in-house computing environments and to run applications specifically targeted for bioinformatics, including CloudBurst, CloudBrush, and CloudRS. One may also use CloudDOE on top of a public cloud. CloudDOE consists of three wizards, i.e., Deploy, Operate, and Extend wizards. Deploy wizard is designed to aid the system administrator to deploy a Hadoop cloud. It installs Java runtime environment version 1.6 and Hadoop version 0.20.203, and initiates the service automatically. Operate wizard allows the user to run a MapReduce application on the dashboard list. To extend the dashboard list, the administrator may install a new MapReduce application using Extend wizard.

CONCLUSIONS: CloudDOE is a user-friendly tool for deploying a Hadoop cloud. Its smart wizards substantially reduce the complexity and costs of deployment, execution, enhancement, and management. Interested users may collaborate to improve the source code of CloudDOE to further incorporate more MapReduce bioinformatics tools into CloudDOE and support next-generation big data open source tools, e.g., Hadoop BigTop and Spark.

AVAILABILITY: CloudDOE is distributed under Apache License 2.0 and is freely available at http://clouddoe.iis.sinica.edu.tw/.}, } @article {pmid24896333, year = {2014}, author = {Brown, DV and Mantamadiotis, T}, title = {Insights into the next generation of cancer stem cell research.}, journal = {Frontiers in bioscience (Landmark edition)}, volume = {19}, number = {7}, pages = {1015-1027}, doi = {10.2741/4264}, pmid = {24896333}, issn = {2768-6698}, mesh = {Biomedical Research/methods/trends ; Computational Biology/methods ; Gene Expression Profiling/*methods ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Neoplasms/*genetics/pathology/therapy ; Neoplastic Stem Cells/*metabolism/pathology ; }, abstract = {The understanding of how cancer stem cells (CSCs) or tumor-initiating cells (TICs) behave is important in understanding how tumors are initiated and how they recur following initial treatment. More specifically to understand how CSCs behave, the different signaling mechanisms orchestrating their growth, cell cycle dynamics, differentiation, trans-differentiation and survival following cytotoxic challenges need to be deciphered. Ultimately this will advance the ability to predict how these cells will behave in individual patients and under different therapeutic conditions. Second or next-generation sequencing (NGS) capabilities have provided researchers a window into the molecular and genetic clockwork of CSCs at an unprecedented resolution and depth, with throughput capabilities allowing sequencing of hundreds of samples in relatively short timeframes and at relatively modest costs More specifically NGS gives us the ability to accurately determine the genomic and transcriptomic nature of CSCs. These technologies and the publicly available cancer genome databases, together with the ever increasing computing power available to researchers locally or via cloud-based servers are changing the way biomedical cancer research is approached.}, } @article {pmid24894600, year = {2014}, author = {Maji, RK and Sarkar, A and Khatua, S and Dasgupta, S and Ghosh, Z}, title = {PVT: an efficient computational procedure to speed up next-generation sequence analysis.}, journal = {BMC bioinformatics}, volume = {15}, number = {}, pages = {167}, pmid = {24894600}, issn = {1471-2105}, mesh = {Genomics/methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Software ; Time Factors ; }, abstract = {BACKGROUND: High-throughput Next-Generation Sequencing (NGS) techniques are advancing genomics and molecular biology research. This technology generates substantially large data which puts up a major challenge to the scientists for an efficient, cost and time effective solution to analyse such data. Further, for the different types of NGS data, there are certain common challenging steps involved in analysing those data. Spliced alignment is one such fundamental step in NGS data analysis which is extremely computational intensive as well as time consuming. There exists serious problem even with the most widely used spliced alignment tools. TopHat is one such widely used spliced alignment tools which although supports multithreading, does not efficiently utilize computational resources in terms of CPU utilization and memory. Here we have introduced PVT (Pipelined Version of TopHat) where we take up a modular approach by breaking TopHat's serial execution into a pipeline of multiple stages, thereby increasing the degree of parallelization and computational resource utilization. Thus we address the discrepancies in TopHat so as to analyze large NGS data efficiently.

RESULTS: We analysed the SRA dataset (SRX026839 and SRX026838) consisting of single end reads and SRA data SRR1027730 consisting of paired-end reads. We used TopHat v2.0.8 to analyse these datasets and noted the CPU usage, memory footprint and execution time during spliced alignment. With this basic information, we designed PVT, a pipelined version of TopHat that removes the redundant computational steps during 'spliced alignment' and breaks the job into a pipeline of multiple stages (each comprising of different step(s)) to improve its resource utilization, thus reducing the execution time.

CONCLUSIONS: PVT provides an improvement over TopHat for spliced alignment of NGS data analysis. PVT thus resulted in the reduction of the execution time to ~23% for the single end read dataset. Further, PVT designed for paired end reads showed an improved performance of ~41% over TopHat (for the chosen data) with respect to execution time. Moreover we propose PVT-Cloud which implements PVT pipeline in cloud computing system.}, } @article {pmid24892070, year = {2014}, author = {Stantchev, V and Colomo-Palacios, R and Niedermayer, M}, title = {Cloud computing based systems for healthcare.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {692619}, doi = {10.1155/2014/692619}, pmid = {24892070}, issn = {1537-744X}, mesh = {*Health Information Systems ; *Information Storage and Retrieval ; }, } @article {pmid24885806, year = {2014}, author = {Onsongo, G and Erdmann, J and Spears, MD and Chilton, J and Beckman, KB and Hauge, A and Yohe, S and Schomaker, M and Bower, M and Silverstein, KA and Thyagarajan, B}, title = {Implementation of Cloud based next generation sequencing data analysis in a clinical laboratory.}, journal = {BMC research notes}, volume = {7}, number = {}, pages = {314}, pmid = {24885806}, issn = {1756-0500}, mesh = {*Clinical Laboratory Techniques/economics ; High-Throughput Nucleotide Sequencing/economics/*methods ; Humans ; *Internet/economics ; Reproducibility of Results ; Sequence Analysis, DNA/economics/*methods ; *Statistics as Topic ; }, abstract = {BACKGROUND: The introduction of next generation sequencing (NGS) has revolutionized molecular diagnostics, though several challenges remain limiting the widespread adoption of NGS testing into clinical practice. One such difficulty includes the development of a robust bioinformatics pipeline that can handle the volume of data generated by high-throughput sequencing in a cost-effective manner. Analysis of sequencing data typically requires a substantial level of computing power that is often cost-prohibitive to most clinical diagnostics laboratories.

FINDINGS: To address this challenge, our institution has developed a Galaxy-based data analysis pipeline which relies on a web-based, cloud-computing infrastructure to process NGS data and identify genetic variants. It provides additional flexibility, needed to control storage costs, resulting in a pipeline that is cost-effective on a per-sample basis. It does not require the usage of EBS disk to run a sample.

CONCLUSIONS: We demonstrate the validation and feasibility of implementing this bioinformatics pipeline in a molecular diagnostics laboratory. Four samples were analyzed in duplicate pairs and showed 100% concordance in mutations identified. This pipeline is currently being used in the clinic and all identified pathogenic variants confirmed using Sanger sequencing further validating the software.}, } @article {pmid24870522, year = {2014}, author = {Drake, N}, title = {Cloud computing beckons scientists.}, journal = {Nature}, volume = {509}, number = {7502}, pages = {543-544}, doi = {10.1038/509543a}, pmid = {24870522}, issn = {1476-4687}, mesh = {Brain ; Confidentiality ; Databases, Factual/economics ; Humans ; Information Dissemination ; Internet/economics/*statistics & numerical data ; Research/economics/*trends ; Research Personnel/education ; }, } @article {pmid24853034, year = {2014}, author = {Bellazzi, R}, title = {Big data and biomedical informatics: a challenging opportunity.}, journal = {Yearbook of medical informatics}, volume = {9}, number = {1}, pages = {8-13}, pmid = {24853034}, issn = {2364-0502}, mesh = {*Computational Biology ; *Data Mining ; *Databases, Factual ; Medical Informatics ; Reproducibility of Results ; }, abstract = {Big data are receiving an increasing attention in biomedicine and healthcare. It is therefore important to understand the reason why big data are assuming a crucial role for the biomedical informatics community. The capability of handling big data is becoming an enabler to carry out unprecedented research studies and to implement new models of healthcare delivery. Therefore, it is first necessary to deeply understand the four elements that constitute big data, namely Volume, Variety, Velocity, and Veracity, and their meaning in practice. Then, it is mandatory to understand where big data are present, and where they can be beneficially collected. There are research fields, such as translational bioinformatics, which need to rely on big data technologies to withstand the shock wave of data that is generated every day. Other areas, ranging from epidemiology to clinical care, can benefit from the exploitation of the large amounts of data that are nowadays available, from personal monitoring to primary care. However, building big data-enabled systems carries on relevant implications in terms of reproducibility of research studies and management of privacy and data access; proper actions should be taken to deal with these issues. An interesting consequence of the big data scenario is the availability of new software, methods, and tools, such as map-reduce, cloud computing, and concept drift machine learning algorithms, which will not only contribute to big data research, but may be beneficial in many biomedical informatics applications. The way forward with the big data opportunity will require properly applied engineering principles to design studies and applications, to avoid preconceptions or over-enthusiasms, to fully exploit the available technologies, and to improve data processing and data management regulations.}, } @article {pmid24845651, year = {2014}, author = {Wiewiórka, MS and Messina, A and Pacholewska, A and Maffioletti, S and Gawrysiak, P and Okoniewski, MJ}, title = {SparkSeq: fast, scalable and cloud-ready tool for the interactive genomic data analysis with nucleotide precision.}, journal = {Bioinformatics (Oxford, England)}, volume = {30}, number = {18}, pages = {2652-2653}, doi = {10.1093/bioinformatics/btu343}, pmid = {24845651}, issn = {1367-4811}, mesh = {Algorithms ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; *Internet ; Nucleotides/*genetics ; *Software ; Statistics as Topic/*methods ; Time Factors ; }, abstract = {UNLABELLED: Many time-consuming analyses of next -: generation sequencing data can be addressed with modern cloud computing. The Apache Hadoop-based solutions have become popular in genomics BECAUSE OF: their scalability in a cloud infrastructure. So far, most of these tools have been used for batch data processing rather than interactive data querying. The SparkSeq software has been created to take advantage of a new MapReduce framework, Apache Spark, for next-generation sequencing data. SparkSeq is a general-purpose, flexible and easily extendable library for genomic cloud computing. It can be used to build genomic analysis pipelines in Scala and run them in an interactive way. SparkSeq opens up the possibility of customized ad hoc secondary analyses and iterative machine learning algorithms. This article demonstrates its scalability and overall fast performance by running the analyses of sequencing datasets. Tests of SparkSeq also prove that the use of cache and HDFS block size can be tuned for the optimal performance on multiple worker nodes.

Available under open source Apache 2.0 license: https://bitbucket.org/mwiewiorka/sparkseq/.}, } @article {pmid24842038, year = {2014}, author = {Palmer, TN}, title = {More reliable forecasts with less precise computations: a fast-track route to cloud-resolved weather and climate simulators?.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {372}, number = {2018}, pages = {20130391}, pmid = {24842038}, issn = {1364-503X}, abstract = {This paper sets out a new methodological approach to solving the equations for simulating and predicting weather and climate. In this approach, the conventionally hard boundary between the dynamical core and the sub-grid parametrizations is blurred. This approach is motivated by the relatively shallow power-law spectrum for atmospheric energy on scales of hundreds of kilometres and less. It is first argued that, because of this, the closure schemes for weather and climate simulators should be based on stochastic-dynamic systems rather than deterministic formulae. Second, as high-wavenumber elements of the dynamical core will necessarily inherit this stochasticity during time integration, it is argued that the dynamical core will be significantly over-engineered if all computations, regardless of scale, are performed completely deterministically and if all variables are represented with maximum numerical precision (in practice using double-precision floating-point numbers). As the era of exascale computing is approached, an energy- and computationally efficient approach to cloud-resolved weather and climate simulation is described where determinism and numerical precision are focused on the largest scales only.}, } @article {pmid24825705, year = {2014}, author = {Nitzlnader, M and Schreier, G}, title = {Patient identity management for secondary use of biomedical research data in a distributed computing environment.}, journal = {Studies in health technology and informatics}, volume = {198}, number = {}, pages = {211-218}, pmid = {24825705}, issn = {1879-8365}, mesh = {Austria ; Biomedical Research/*organization & administration ; *Computer Security ; *Confidentiality ; Electronic Health Records/*organization & administration ; Information Storage and Retrieval/*methods ; Medical Record Linkage/*methods ; Patient Identification Systems/*organization & administration ; }, abstract = {Dealing with data from different source domains is of increasing importance in today's large scale biomedical research endeavours. Within the European Network for Cancer research in Children and Adolescents (ENCCA) a solution to share such data for secondary use will be established. In this paper the solution arising from the aims of the ENCCA project and regulatory requirements concerning data protection and privacy is presented. Since the details of secondary biomedical dataset utilisation are often not known in advance, data protection regulations are met with an identity management concept that facilitates context-specific pseudonymisation and a way of data aggregation using a hidden reference table later on. Phonetic hashing is proposed to prevent duplicated patient registration and re-identification of patients is possible via a trusted third party only. Finally, the solution architecture allows for implementation in a distributed computing environment, including cloud-based elements.}, } @article {pmid24796822, year = {2014}, author = {Lin, CY and Peng, KL and Chen, J and Tsai, JY and Tseng, YC and Yang, JR and Chen, MH}, title = {Improvements in dental care using a new mobile app with cloud services.}, journal = {Journal of the Formosan Medical Association = Taiwan yi zhi}, volume = {113}, number = {10}, pages = {742-749}, doi = {10.1016/j.jfma.2014.02.009}, pmid = {24796822}, issn = {0929-6646}, mesh = {Appointments and Schedules ; Dental Care/*methods ; Dental Prosthesis Repair/statistics & numerical data ; *Dentist-Patient Relations ; Dentists/statistics & numerical data ; Health Communication/methods ; Health Services Accessibility/*statistics & numerical data ; Humans ; *Mobile Applications ; Quality Improvement/*statistics & numerical data ; Surveys and Questionnaires ; }, abstract = {BACKGROUND/PURPOSE: Traditional dental care, which includes long-term oral hygiene maintenance and scheduled dental appointments, requires effective communication between dentists and patients. In this study, a new system was designed to provide a platform for direct communication between dentists and patients.

METHODS: A new mobile app, Dental Calendar, combined with cloud services specific for dental care was created by a team constituted by dentists, computer scientists, and service scientists. This new system would remind patients about every scheduled appointment, and help them take pictures of their own oral cavity parts that require dental treatment and send them to dentists along with a symptom description. Dentists, by contrast, could confirm or change appointments easily and provide professional advice to their patients immediately. In this study, 26 dentists and 32 patients were evaluated by a questionnaire containing eight dental-service items before and after using this system. Paired sample t test was used for statistical analysis.

RESULTS: After using the Dental Calendar combined with cloud services, dentists were able to improve appointment arrangements significantly, taking care of the patients with sudden worse prosthesis (p < 0.05). Patients also achieved significant improvement in appointment reminder systems, rearrangement of appointments in case of sudden worse prosthesis, and establishment of a direct relationship with dentists (p < 0.05).

CONCLUSION: Our new mobile app, Dental Calendar, in combination with cloud services, provides efficient service to both dentists and patients, and helps establish a better relationship between them. It also helps dentists to arrange appointments for patients with sudden worsening of prosthesis function.}, } @article {pmid24782753, year = {2014}, author = {Da Mota, B and Tudoran, R and Costan, A and Varoquaux, G and Brasche, G and Conrod, P and Lemaitre, H and Paus, T and Rietschel, M and Frouin, V and Poline, JB and Antoniu, G and Thirion, B and , }, title = {Machine learning patterns for neuroimaging-genetic studies in the cloud.}, journal = {Frontiers in neuroinformatics}, volume = {8}, number = {}, pages = {31}, pmid = {24782753}, issn = {1662-5196}, abstract = {Brain imaging is a natural intermediate phenotype to understand the link between genetic information and behavior or brain pathologies risk factors. Massive efforts have been made in the last few years to acquire high-dimensional neuroimaging and genetic data on large cohorts of subjects. The statistical analysis of such data is carried out with increasingly sophisticated techniques and represents a great computational challenge. Fortunately, increasing computational power in distributed architectures can be harnessed, if new neuroinformatics infrastructures are designed and training to use these new tools is provided. Combining a MapReduce framework (TomusBLOB) with machine learning algorithms (Scikit-learn library), we design a scalable analysis tool that can deal with non-parametric statistics on high-dimensional data. End-users describe the statistical procedure to perform and can then test the model on their own computers before running the very same code in the cloud at a larger scale. We illustrate the potential of our approach on real data with an experiment showing how the functional signal in subcortical brain regions can be significantly fit with genome-wide genotypes. This experiment demonstrates the scalability and the reliability of our framework in the cloud with a 2 weeks deployment on hundreds of virtual machines.}, } @article {pmid24752294, year = {2014}, author = {Li, J and Doyle, MA and Saeed, I and Wong, SQ and Mar, V and Goode, DL and Caramia, F and Doig, K and Ryland, GL and Thompson, ER and Hunter, SM and Halgamuge, SK and Ellul, J and Dobrovic, A and Campbell, IG and Papenfuss, AT and McArthur, GA and Tothill, RW}, title = {Bioinformatics pipelines for targeted resequencing and whole-exome sequencing of human and mouse genomes: a virtual appliance approach for instant deployment.}, journal = {PloS one}, volume = {9}, number = {4}, pages = {e95217}, pmid = {24752294}, issn = {1932-6203}, support = {//Medical Research Council/United Kingdom ; }, mesh = {Animals ; Computational Biology/*methods ; Exome/*genetics ; Genome, Human/*genetics ; Humans ; Melanoma/genetics ; Mice ; Mutation/genetics ; *Sequence Analysis, DNA ; *User-Computer Interface ; }, abstract = {Targeted resequencing by massively parallel sequencing has become an effective and affordable way to survey small to large portions of the genome for genetic variation. Despite the rapid development in open source software for analysis of such data, the practical implementation of these tools through construction of sequencing analysis pipelines still remains a challenging and laborious activity, and a major hurdle for many small research and clinical laboratories. We developed TREVA (Targeted REsequencing Virtual Appliance), making pre-built pipelines immediately available as a virtual appliance. Based on virtual machine technologies, TREVA is a solution for rapid and efficient deployment of complex bioinformatics pipelines to laboratories of all sizes, enabling reproducible results. The analyses that are supported in TREVA include: somatic and germline single-nucleotide and insertion/deletion variant calling, copy number analysis, and cohort-based analyses such as pathway and significantly mutated genes analyses. TREVA is flexible and easy to use, and can be customised by Linux-based extensions if required. TREVA can also be deployed on the cloud (cloud computing), enabling instant access without investment overheads for additional hardware. TREVA is available at http://bioinformatics.petermac.org/treva/.}, } @article {pmid24743989, year = {2014}, author = {Blankenberg, D and Hillman-Jackson, J}, title = {Analysis of next-generation sequencing data using Galaxy.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {1150}, number = {}, pages = {21-43}, doi = {10.1007/978-1-4939-0512-6_2}, pmid = {24743989}, issn = {1940-6029}, mesh = {Biostatistics/*methods ; Chromatin Immunoprecipitation ; Computational Biology/*methods ; High-Throughput Nucleotide Sequencing/*methods ; *Internet ; Sequence Analysis, RNA ; *Software ; User-Computer Interface ; }, abstract = {The extraordinary throughput of next-generation sequencing (NGS) technology is outpacing our ability to analyze and interpret the data. This chapter will focus on practical informatics methods, strategies, and software tools for transforming NGS data into usable information through the use of a web-based platform, Galaxy. The Galaxy interface is explored through several different types of example analyses. Instructions for running one's own Galaxy server on local hardware or on cloud computing resources are provided. Installing new tools into a personal Galaxy instance is also demonstrated.}, } @article {pmid24737962, year = {2014}, author = {Kinger, S and Kumar, R and Sharma, A}, title = {Prediction based proactive thermal virtual machine scheduling in green clouds.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {208983}, doi = {10.1155/2014/208983}, pmid = {24737962}, issn = {1537-744X}, mesh = {Air Conditioning/*instrumentation/methods ; *Algorithms ; Conservation of Energy Resources/*methods ; *Energy Transfer ; Equipment Design ; Equipment Failure Analysis ; Feedback ; Hot Temperature ; Internet/*instrumentation ; Pattern Recognition, Automated/*methods ; *Support Vector Machine ; }, abstract = {Cloud computing has rapidly emerged as a widely accepted computing paradigm, but the research on Cloud computing is still at an early stage. Cloud computing provides many advanced features but it still has some shortcomings such as relatively high operating cost and environmental hazards like increasing carbon footprints. These hazards can be reduced up to some extent by efficient scheduling of Cloud resources. Working temperature on which a machine is currently running can be taken as a criterion for Virtual Machine (VM) scheduling. This paper proposes a new proactive technique that considers current and maximum threshold temperature of Server Machines (SMs) before making scheduling decisions with the help of a temperature predictor, so that maximum temperature is never reached. Different workload scenarios have been taken into consideration. The results obtained show that the proposed system is better than existing systems of VM scheduling, which does not consider current temperature of nodes before making scheduling decisions. Thus, a reduction in need of cooling systems for a Cloud environment has been obtained and validated.}, } @article {pmid24737958, year = {2014}, author = {Ji, Z and Ganchev, I and O'Droma, M and Zhang, X and Zhang, X}, title = {A cloud-based X73 ubiquitous mobile healthcare system: design and implementation.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {145803}, pmid = {24737958}, issn = {1537-744X}, mesh = {*Guidelines as Topic ; Internationality ; Internet/*standards ; Mobile Applications/*standards ; Monitoring, Ambulatory/*standards ; Software/*standards ; Software Design ; Telemedicine/*standards ; }, abstract = {Based on the user-centric paradigm for next generation networks, this paper describes a ubiquitous mobile healthcare (uHealth) system based on the ISO/IEEE 11073 personal health data (PHD) standards (X73) and cloud computing techniques. A number of design issues associated with the system implementation are outlined. The system includes a middleware on the user side, providing a plug-and-play environment for heterogeneous wireless sensors and mobile terminals utilizing different communication protocols and a distributed "big data" processing subsystem in the cloud. The design and implementation of this system are envisaged as an efficient solution for the next generation of uHealth systems.}, } @article {pmid24735269, year = {2014}, author = {Brigham, TJ}, title = {Taking advantage of Google's Web-based applications and services.}, journal = {Medical reference services quarterly}, volume = {33}, number = {2}, pages = {202-210}, doi = {10.1080/02763869.2014.897521}, pmid = {24735269}, issn = {1540-9597}, mesh = {Biomedical Research/*methods ; Data Mining ; Databases, Factual ; Humans ; *Internet ; *Search Engine ; }, abstract = {Google is a company that is constantly expanding and growing its services and products. While most librarians possess a "love/hate" relationship with Google, there are a number of reasons you should consider exploring some of the tools Google has created and made freely available. Applications and services such as Google Docs, Slides, and Google+ are functional and dynamic without the cost of comparable products. This column will address some of the issues users should be aware of before signing up to use Google's tools, and a description of some of Google's Web applications and services, plus how they can be useful to librarians in health care.}, } @article {pmid24734388, year = {2014}, author = {Free, J}, title = {Unlocking cloud computing: an examination of Seattle Children's Hospital's use of cloud computing.}, journal = {Health management technology}, volume = {35}, number = {3}, pages = {10-12}, pmid = {24734388}, issn = {1074-4770}, mesh = {*Hospitals, Pediatric ; Information Storage and Retrieval/*methods ; *Internet ; Organizational Case Studies ; Washington ; }, } @article {pmid24734019, year = {2014}, author = {Goscinski, WJ and McIntosh, P and Felzmann, U and Maksimenko, A and Hall, CJ and Gureyev, T and Thompson, D and Janke, A and Galloway, G and Killeen, NE and Raniga, P and Kaluza, O and Ng, A and Poudel, G and Barnes, DG and Nguyen, T and Bonnington, P and Egan, GF}, title = {The multi-modal Australian ScienceS Imaging and Visualization Environment (MASSIVE) high performance computing infrastructure: applications in neuroscience and neuroinformatics research.}, journal = {Frontiers in neuroinformatics}, volume = {8}, number = {}, pages = {30}, pmid = {24734019}, issn = {1662-5196}, abstract = {The Multi-modal Australian ScienceS Imaging and Visualization Environment (MASSIVE) is a national imaging and visualization facility established by Monash University, the Australian Synchrotron, the Commonwealth Scientific Industrial Research Organization (CSIRO), and the Victorian Partnership for Advanced Computing (VPAC), with funding from the National Computational Infrastructure and the Victorian Government. The MASSIVE facility provides hardware, software, and expertise to drive research in the biomedical sciences, particularly advanced brain imaging research using synchrotron x-ray and infrared imaging, functional and structural magnetic resonance imaging (MRI), x-ray computer tomography (CT), electron microscopy and optical microscopy. The development of MASSIVE has been based on best practice in system integration methodologies, frameworks, and architectures. The facility has: (i) integrated multiple different neuroimaging analysis software components, (ii) enabled cross-platform and cross-modality integration of neuroinformatics tools, and (iii) brought together neuroimaging databases and analysis workflows. MASSIVE is now operational as a nationally distributed and integrated facility for neuroinfomatics and brain imaging research.}, } @article {pmid24727042, year = {2014}, author = {Lenzen, M and Geschke, A and Wiedmann, T and Lane, J and Anderson, N and Baynes, T and Boland, J and Daniels, P and Dey, C and Fry, J and Hadjikakou, M and Kenway, S and Malik, A and Moran, D and Murray, J and Nettleton, S and Poruschi, L and Reynolds, C and Rowley, H and Ugon, J and Webb, D and West, J}, title = {Compiling and using input-output frameworks through collaborative virtual laboratories.}, journal = {The Science of the total environment}, volume = {485-486}, number = {}, pages = {241-251}, doi = {10.1016/j.scitotenv.2014.03.062}, pmid = {24727042}, issn = {1879-1026}, mesh = {Australia ; *Cooperative Behavior ; Databases, Factual ; Environment ; *Laboratories ; *Software ; *User-Computer Interface ; *Workflow ; }, abstract = {Compiling, deploying and utilising large-scale databases that integrate environmental and economic data have traditionally been labour- and cost-intensive processes, hindered by the large amount of disparate and misaligned data that must be collected and harmonised. The Australian Industrial Ecology Virtual Laboratory (IELab) is a novel, collaborative approach to compiling large-scale environmentally extended multi-region input-output (MRIO) models. The utility of the IELab product is greatly enhanced by avoiding the need to lock in an MRIO structure at the time the MRIO system is developed. The IELab advances the idea of the "mother-daughter" construction principle, whereby a regionally and sectorally very detailed "mother" table is set up, from which "daughter" tables are derived to suit specific research questions. By introducing a third tier - the "root classification" - IELab users are able to define their own mother-MRIO configuration, at no additional cost in terms of data handling. Customised mother-MRIOs can then be built, which maximise disaggregation in aspects that are useful to a family of research questions. The second innovation in the IELab system is to provide a highly automated collaborative research platform in a cloud-computing environment, greatly expediting workflows and making these computational benefits accessible to all users. Combining these two aspects realises many benefits. The collaborative nature of the IELab development project allows significant savings in resources. Timely deployment is possible by coupling automation procedures with the comprehensive input from multiple teams. User-defined MRIO tables, coupled with high performance computing, mean that MRIO analysis will be useful and accessible for a great many more research applications than would otherwise be possible. By ensuring that a common set of analytical tools such as for hybrid life-cycle assessment is adopted, the IELab will facilitate the harmonisation of fragmented, dispersed and misaligned raw data for the benefit of all interested parties.}, } @article {pmid24723842, year = {2014}, author = {Tseng, KC and Wu, CC}, title = {An expert fitness diagnosis system based on elastic cloud computing.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {981207}, pmid = {24723842}, issn = {1537-744X}, mesh = {Algorithms ; Bayes Theorem ; Computational Biology ; Information Storage and Retrieval ; *Software ; }, abstract = {This paper presents an expert diagnosis system based on cloud computing. It classifies a user's fitness level based on supervised machine learning techniques. This system is able to learn and make customized diagnoses according to the user's physiological data, such as age, gender, and body mass index (BMI). In addition, an elastic algorithm based on Poisson distribution is presented to allocate computation resources dynamically. It predicts the required resources in the future according to the exponential moving average of past observations. The experimental results show that Naïve Bayes is the best classifier with the highest accuracy (90.8%) and that the elastic algorithm is able to capture tightly the trend of requests generated from the Internet and thus assign corresponding computation resources to ensure the quality of service.}, } @article {pmid24723529, year = {2014}, author = {Qia Wang, and Wenjun Zeng, and Jun Tian, }, title = {A compressive sensing based secure watermark detection and privacy preserving storage framework.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {23}, number = {3}, pages = {1317-1328}, doi = {10.1109/TIP.2014.2298980}, pmid = {24723529}, issn = {1941-0042}, mesh = {*Algorithms ; *Computer Security ; Data Compression/*methods ; Image Enhancement/methods ; Image Interpretation, Computer-Assisted/*methods ; Pattern Recognition, Automated/*methods ; Reproducibility of Results ; Sensitivity and Specificity ; }, abstract = {Privacy is a critical issue when the data owners outsource data storage or processing to a third party computing service, such as the cloud. In this paper, we identify a cloud computing application scenario that requires simultaneously performing secure watermark detection and privacy preserving multimedia data storage. We then propose a compressive sensing (CS)-based framework using secure multiparty computation (MPC) protocols to address such a requirement. In our framework, the multimedia data and secret watermark pattern are presented to the cloud for secure watermark detection in a CS domain to protect the privacy. During CS transformation, the privacy of the CS matrix and the watermark pattern is protected by the MPC protocols under the semi-honest security model. We derive the expected watermark detection performance in the CS domain, given the target image, watermark pattern, and the size of the CS matrix (but without the CS matrix itself). The correctness of the derived performance has been validated by our experiments. Our theoretical analysis and experimental results show that secure watermark detection in the CS domain is feasible. Our framework can also be extended to other collaborative secure signal processing and data-mining applications in the cloud.}, } @article {pmid24720494, year = {2014}, author = {Spinczyk, D and Karwan, A and Copik, M}, title = {Methods for abdominal respiratory motion tracking.}, journal = {Computer aided surgery : official journal of the International Society for Computer Aided Surgery}, volume = {19}, number = {1-3}, pages = {34-47}, pmid = {24720494}, issn = {1097-0150}, mesh = {Abdominal Wall/*physiology ; Algorithms ; Female ; Humans ; Image Processing, Computer-Assisted/instrumentation/*methods ; Imaging, Three-Dimensional ; Male ; *Movement ; *Respiration ; }, abstract = {Non-invasive surface registration methods have been developed to register and track breathing motions in a patient's abdomen and thorax. We evaluated several different registration methods, including marker tracking using a stereo camera, chessboard image projection, and abdominal point clouds. Our point cloud approach was based on a time-of-flight (ToF) sensor that tracked the abdominal surface. We tested different respiratory phases using additional markers as landmarks for the extension of the non-rigid Iterative Closest Point (ICP) algorithm to improve the matching of irregular meshes. Four variants for retrieving the correspondence data were implemented and compared. Our evaluation involved 9 healthy individuals (3 females and 6 males) with point clouds captured in opposite breathing phases (i.e., inhalation and exhalation). We measured three factors: surface distance, correspondence distance, and marker error. To evaluate different methods for computing the correspondence measurements, we defined the number of correspondences for every target point and the average correspondence assignment error of the points nearest the markers.}, } @article {pmid24717145, year = {2014}, author = {Guo, X and Meng, Y and Yu, N and Pan, Y}, title = {Cloud computing for detecting high-order genome-wide epistatic interaction via dynamic clustering.}, journal = {BMC bioinformatics}, volume = {15}, number = {}, pages = {102}, pmid = {24717145}, issn = {1471-2105}, support = {N01-AR-2-2263/AR/NIAMS NIH HHS/United States ; R01-AR-44422/AR/NIAMS NIH HHS/United States ; }, mesh = {Algorithms ; Cluster Analysis ; *Epistasis, Genetic ; Genetic Loci ; Genome ; Genome-Wide Association Study/*methods ; Genotype ; Phenotype ; Polymorphism, Single Nucleotide ; }, abstract = {BACKGROUND: Taking the advantage of high-throughput single nucleotide polymorphism (SNP) genotyping technology, large genome-wide association studies (GWASs) have been considered to hold promise for unravelling complex relationships between genotype and phenotype. At present, traditional single-locus-based methods are insufficient to detect interactions consisting of multiple-locus, which are broadly existing in complex traits. In addition, statistic tests for high order epistatic interactions with more than 2 SNPs propose computational and analytical challenges because the computation increases exponentially as the cardinality of SNPs combinations gets larger.

RESULTS: In this paper, we provide a simple, fast and powerful method using dynamic clustering and cloud computing to detect genome-wide multi-locus epistatic interactions. We have constructed systematic experiments to compare powers performance against some recently proposed algorithms, including TEAM, SNPRuler, EDCF and BOOST. Furthermore, we have applied our method on two real GWAS datasets, Age-related macular degeneration (AMD) and Rheumatoid arthritis (RA) datasets, where we find some novel potential disease-related genetic factors which are not shown up in detections of 2-loci epistatic interactions.

CONCLUSIONS: Experimental results on simulated data demonstrate that our method is more powerful than some recently proposed methods on both two- and three-locus disease models. Our method has discovered many novel high-order associations that are significantly enriched in cases from two real GWAS datasets. Moreover, the running time of the cloud implementation for our method on AMD dataset and RA dataset are roughly 2 hours and 50 hours on a cluster with forty small virtual machines for detecting two-locus interactions, respectively. Therefore, we believe that our method is suitable and effective for the full-scale analysis of multiple-locus epistatic interactions in GWAS.}, } @article {pmid24712413, year = {2014}, author = {Clark, JL and Swanepoel, de W}, title = {Technology for hearing loss--as We Know it, and as We Dream it.}, journal = {Disability and rehabilitation. Assistive technology}, volume = {9}, number = {5}, pages = {408-413}, doi = {10.3109/17483107.2014.905642}, pmid = {24712413}, issn = {1748-3115}, mesh = {Audiology/*trends ; Equipment Design/*trends ; Global Health ; *Hearing Aids ; Hearing Loss/*rehabilitation ; Humans ; *Technology Transfer ; Telemedicine/trends ; United States ; }, abstract = {PURPOSE: Worldwide demand for accessible hearing health technology continues to increase while the numbers of hearing healthcare specialists are grossly inadequate to meet this demand. Proliferation of innovative technology and the advent of greater access to global connectivity are providing an opportunity to identify and harness new resources that may change current audiological service delivery methods to maximize access, efficiency and impact.

METHODS: By searching through the most current literature and engaging in discussions with industry experts, it is possible to identify avenues that could increase services to those who have hearing loss with innovative healthcare technology. This article aims to review the current state as well as future trends of hearing health technology by addressing: Technology as We Know it; and Technology as We Dream it.

RESULTS: Some of the newer technologies we have recently witnessed include: micro processors; personalized computing devices (e.g. smartphones); web-based applications; an expanding clinical repertoire with integrated test equipment; and globalization of telecommunications that opens the door to telehealth; and self-fitting of hearing aids. Yet, innovation continues scaffolding on recent successes with innovations for hearing healthcare expected to increase into the future.

CONCLUSION: As technology and connectivity continue to evolve so should the practice of audiology adapt to the global needs by capitalizing on these advances to optimize service delivery access and sustainability.

Capital investment in equipment will be dramatically reduced with smaller, lighter, less costly and more portable equipment. Individuals who live in remote regions with little or no hearing healthcare access can undergo valid assessments by a professional via telehealth. Web-based applications allow clinicians to expand their repertoire and reach of services.}, } @article {pmid24707207, year = {2014}, author = {Glasberg, R and Hartmann, M and Draheim, M and Tamm, G and Hessel, F}, title = {Risks and crises for healthcare providers: the impact of cloud computing.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {524659}, pmid = {24707207}, issn = {1537-744X}, mesh = {Health Personnel ; *Hospitals ; *Information Storage and Retrieval ; *Internet ; *Risk Assessment ; Risk Management ; }, abstract = {We analyze risks and crises for healthcare providers and discuss the impact of cloud computing in such scenarios. The analysis is conducted in a holistic way, taking into account organizational and human aspects, clinical, IT-related, and utilities-related risks as well as incorporating the view of the overall risk management.}, } @article {pmid24701160, year = {2014}, author = {Hu, R and Jiang, J and Liu, G and Wang, L}, title = {Efficient resources provisioning based on load forecasting in cloud.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {321231}, pmid = {24701160}, issn = {1537-744X}, mesh = {Forecasting ; Information Storage and Retrieval/standards/*trends ; Internet/standards/*trends ; *Neural Networks, Computer ; }, abstract = {Cloud providers should ensure QoS while maximizing resources utilization. One optimal strategy is to timely allocate resources in a fine-grained mode according to application's actual resources demand. The necessary precondition of this strategy is obtaining future load information in advance. We propose a multi-step-ahead load forecasting method, KSwSVR, based on statistical learning theory which is suitable for the complex and dynamic characteristics of the cloud computing environment. It integrates an improved support vector regression algorithm and Kalman smoother. Public trace data taken from multitypes of resources were used to verify its prediction accuracy, stability, and adaptability, comparing with AR, BPNN, and standard SVR. Subsequently, based on the predicted results, a simple and efficient strategy is proposed for resource provisioning. CPU allocation experiment indicated it can effectively reduce resources consumption while meeting service level agreements requirements.}, } @article {pmid24701149, year = {2014}, author = {Ghazizadeh, E and Zamani, M and Ab Manan, JL and Alizadeh, M}, title = {Trusted computing strengthens cloud authentication.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {260187}, doi = {10.1155/2014/260187}, pmid = {24701149}, issn = {1537-744X}, mesh = {Biometric Identification/standards/trends ; Computer Security/*standards/trends ; Humans ; Information Storage and Retrieval/standards/trends ; Internet/*standards/trends ; *Trust ; }, abstract = {Cloud computing is a new generation of technology which is designed to provide the commercial necessities, solve the IT management issues, and run the appropriate applications. Another entry on the list of cloud functions which has been handled internally is Identity Access Management (IAM). Companies encounter IAM as security challenges while adopting more technologies became apparent. Trust Multi-tenancy and trusted computing based on a Trusted Platform Module (TPM) are great technologies for solving the trust and security concerns in the cloud identity environment. Single sign-on (SSO) and OpenID have been released to solve security and privacy problems for cloud identity. This paper proposes the use of trusted computing, Federated Identity Management, and OpenID Web SSO to solve identity theft in the cloud. Besides, this proposed model has been simulated in .Net environment. Security analyzing, simulation, and BLP confidential model are three ways to evaluate and analyze our proposed model.}, } @article {pmid24701137, year = {2014}, author = {Haufe, K and Dzombeta, S and Brandis, K}, title = {Proposal for a security management in cloud computing for health care.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {146970}, pmid = {24701137}, issn = {1537-744X}, mesh = {*Computer Security ; *Electronic Health Records ; *Internet ; }, abstract = {Cloud computing is actually one of the most popular themes of information systems research. Considering the nature of the processed information especially health care organizations need to assess and treat specific risks according to cloud computing in their information security management system. Therefore, in this paper we propose a framework that includes the most important security processes regarding cloud computing in the health care sector. Starting with a framework of general information security management processes derived from standards of the ISO 27000 family the most important information security processes for health care organizations using cloud computing will be identified considering the main risks regarding cloud computing and the type of information processed. The identified processes will help a health care organization using cloud computing to focus on the most important ISMS processes and establish and operate them at an appropriate level of maturity considering limited resources.}, } @article {pmid24696645, year = {2014}, author = {Whaiduzzaman, M and Gani, A and Anuar, NB and Shiraz, M and Haque, MN and Haque, IT}, title = {Cloud service selection using multicriteria decision analysis.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {459375}, pmid = {24696645}, issn = {1537-744X}, mesh = {*Algorithms ; *Computing Methodologies ; *Decision Making, Computer-Assisted ; *Decision Support Techniques ; Information Storage and Retrieval/*methods ; *Internet ; }, abstract = {Cloud computing (CC) has recently been receiving tremendous attention from the IT industry and academic researchers. CC leverages its unique services to cloud customers in a pay-as-you-go, anytime, anywhere manner. Cloud services provide dynamically scalable services through the Internet on demand. Therefore, service provisioning plays a key role in CC. The cloud customer must be able to select appropriate services according to his or her needs. Several approaches have been proposed to solve the service selection problem, including multicriteria decision analysis (MCDA). MCDA enables the user to choose from among a number of available choices. In this paper, we analyze the application of MCDA to service selection in CC. We identify and synthesize several MCDA techniques and provide a comprehensive analysis of this technology for general readers. In addition, we present a taxonomy derived from a survey of the current literature. Finally, we highlight several state-of-the-art practical aspects of MCDA implementation in cloud computing service selection. The contributions of this study are four-fold: (a) focusing on the state-of-the-art MCDA techniques, (b) highlighting the comparative analysis and suitability of several MCDA methods, (c) presenting a taxonomy through extensive literature review, and (d) analyzing and summarizing the cloud computing service selections in different scenarios.}, } @article {pmid24694135, year = {2014}, author = {Castillo, E and Castillo, R and Fuentes, D and Guerrero, T}, title = {Computing global minimizers to a constrained B-spline image registration problem from optimal l1 perturbations to block match data.}, journal = {Medical physics}, volume = {41}, number = {4}, pages = {041904}, pmid = {24694135}, issn = {2473-4209}, support = {DP2 OD007044/OD/NIH HHS/United States ; DP2OD007044/OD/NIH HHS/United States ; 3DP2OD007044-01S1/OD/NIH HHS/United States ; K01 CA181292/CA/NCI NIH HHS/United States ; K01CA181292/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; Exhalation ; Humans ; Image Processing, Computer-Assisted/*methods ; Inhalation ; Radiography, Thoracic/*methods ; Tomography, X-Ray Computed/*methods ; }, abstract = {PURPOSE: Block matching is a well-known strategy for estimating corresponding voxel locations between a pair of images according to an image similarity metric. Though robust to issues such as image noise and large magnitude voxel displacements, the estimated point matches are not guaranteed to be spatially accurate. However, the underlying optimization problem solved by the block matching procedure is similar in structure to the class of optimization problem associated with B-spline based registration methods. By exploiting this relationship, the authors derive a numerical method for computing a global minimizer to a constrained B-spline registration problem that incorporates the robustness of block matching with the global smoothness properties inherent to B-spline parameterization.

METHODS: The method reformulates the traditional B-spline registration problem as a basis pursuit problem describing the minimall1-perturbation to block match pairs required to produce a B-spline fitting error within a given tolerance. The sparsity pattern of the optimal perturbation then defines a voxel point cloud subset on which the B-spline fit is a global minimizer to a constrained variant of the B-spline registration problem. As opposed to traditional B-spline algorithms, the optimization step involving the actual image data is addressed by block matching.

RESULTS: The performance of the method is measured in terms of spatial accuracy using ten inhale/exhale thoracic CT image pairs (available for download atwww.dir-lab.com) obtained from the COPDgene dataset and corresponding sets of expert-determined landmark point pairs. The results of the validation procedure demonstrate that the method can achieve a high spatial accuracy on a significantly complex image set.

CONCLUSIONS: The proposed methodology is demonstrated to achieve a high spatial accuracy and is generalizable in that in can employ any displacement field parameterization described as a least squares fit to block match generated estimates. Thus, the framework allows for a wide range of image similarity block match metric and physical modeling combinations.}, } @article {pmid24693240, year = {2014}, author = {Lee, SH and Lee, IY}, title = {A study of practical proxy reencryption with a keyword search scheme considering cloud storage structure.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {615679}, doi = {10.1155/2014/615679}, pmid = {24693240}, issn = {1537-744X}, mesh = {*Algorithms ; *Computer Security ; Information Storage and Retrieval/*methods ; *Internet ; *Signal Processing, Computer-Assisted ; }, abstract = {Data outsourcing services have emerged with the increasing use of digital information. They can be used to store data from various devices via networks that are easy to access. Unlike existing removable storage systems, storage outsourcing is available to many users because it has no storage limit and does not require a local storage medium. However, the reliability of storage outsourcing has become an important topic because many users employ it to store large volumes of data. To protect against unethical administrators and attackers, a variety of cryptography systems are used, such as searchable encryption and proxy reencryption. However, existing searchable encryption technology is inconvenient for use in storage outsourcing environments where users upload their data to be shared with others as necessary. In addition, some existing schemes are vulnerable to collusion attacks and have computing cost inefficiencies. In this paper, we analyze existing proxy re-encryption with keyword search.}, } @article {pmid24688434, year = {2014}, author = {Yu, S and Gui, X and Lin, J and Tian, F and Zhao, J and Dai, M}, title = {A security-awareness virtual machine management scheme based on Chinese wall policy in cloud computing.}, journal = {TheScientificWorldJournal}, volume = {2014}, number = {}, pages = {805923}, doi = {10.1155/2014/805923}, pmid = {24688434}, issn = {1537-744X}, mesh = {*Computer Security ; Information Storage and Retrieval/*methods ; }, abstract = {Cloud computing gets increasing attention for its capacity to leverage developers from infrastructure management tasks. However, recent works reveal that side channel attacks can lead to privacy leakage in the cloud. Enhancing isolation between users is an effective solution to eliminate the attack. In this paper, to eliminate side channel attacks, we investigate the isolation enhancement scheme from the aspect of virtual machine (VM) management. The security-awareness VMs management scheme (SVMS), a VMs isolation enhancement scheme to defend against side channel attacks, is proposed. First, we use the aggressive conflict of interest relation (ACIR) and aggressive in ally with relation (AIAR) to describe user constraint relations. Second, based on the Chinese wall policy, we put forward four isolation rules. Third, the VMs placement and migration algorithms are designed to enforce VMs isolation between the conflict users. Finally, based on the normal distribution, we conduct a series of experiments to evaluate SVMS. The experimental results show that SVMS is efficient in guaranteeing isolation between VMs owned by conflict users, while the resource utilization rate decreases but not by much.}, } @article {pmid24687458, year = {2015}, author = {Xue, H and Inati, S and Sørensen, TS and Kellman, P and Hansen, MS}, title = {Distributed MRI reconstruction using Gadgetron-based cloud computing.}, journal = {Magnetic resonance in medicine}, volume = {73}, number = {3}, pages = {1015-1025}, pmid = {24687458}, issn = {1522-2594}, support = {Z99 HL999999//Intramural NIH HHS/United States ; }, mesh = {*Algorithms ; Data Compression/*methods ; Image Enhancement/methods ; Image Interpretation, Computer-Assisted/*methods ; *Internet ; Magnetic Resonance Imaging/*methods ; Programming Languages ; Reproducibility of Results ; Sensitivity and Specificity ; *Software ; }, abstract = {PURPOSE: To expand the open source Gadgetron reconstruction framework to support distributed computing and to demonstrate that a multinode version of the Gadgetron can be used to provide nonlinear reconstruction with clinically acceptable latency.

METHODS: The Gadgetron framework was extended with new software components that enable an arbitrary number of Gadgetron instances to collaborate on a reconstruction task. This cloud-enabled version of the Gadgetron was deployed on three different distributed computing platforms ranging from a heterogeneous collection of commodity computers to the commercial Amazon Elastic Compute Cloud. The Gadgetron cloud was used to provide nonlinear, compressed sensing reconstruction on a clinical scanner with low reconstruction latency (eg, cardiac and neuroimaging applications).

RESULTS: The proposed setup was able to handle acquisition and 11 -SPIRiT reconstruction of nine high temporal resolution real-time, cardiac short axis cine acquisitions, covering the ventricles for functional evaluation, in under 1 min. A three-dimensional high-resolution brain acquisition with 1 mm(3) isotropic pixel size was acquired and reconstructed with nonlinear reconstruction in less than 5 min.

CONCLUSION: A distributed computing enabled Gadgetron provides a scalable way to improve reconstruction performance using commodity cluster computing. Nonlinear, compressed sensing reconstruction can be deployed clinically with low image reconstruction latency.}, } @article {pmid24686728, year = {2014}, author = {Chen, SL and Chen, YY and Hsu, C}, title = {A new approach to integrate Internet-of-things and software-as-a-service model for logistic systems: a case study.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {4}, pages = {6144-6164}, pmid = {24686728}, issn = {1424-8220}, abstract = {Cloud computing is changing the ways software is developed and managed in enterprises, which is changing the way of doing business in that dynamically scalable and virtualized resources are regarded as services over the Internet. Traditional manufacturing systems such as supply chain management (SCM), customer relationship management (CRM), and enterprise resource planning (ERP) are often developed case by case. However, effective collaboration between different systems, platforms, programming languages, and interfaces has been suggested by researchers. In cloud-computing-based systems, distributed resources are encapsulated into cloud services and centrally managed, which allows high automation, flexibility, fast provision, and ease of integration at low cost. The integration between physical resources and cloud services can be improved by combining Internet of things (IoT) technology and Software-as-a-Service (SaaS) technology. This study proposes a new approach for developing cloud-based manufacturing systems based on a four-layer SaaS model. There are three main contributions of this paper: (1) enterprises can develop their own cloud-based logistic management information systems based on the approach proposed in this paper; (2) a case study based on literature reviews with experimental results is proposed to verify that the system performance is remarkable; (3) challenges encountered and feedback collected from T Company in the case study are discussed in this paper for the purpose of enterprise deployment.}, } @article {pmid24678909, year = {2014}, author = {Krstajic, D and Buturovic, LJ and Leahy, DE and Thomas, S}, title = {Cross-validation pitfalls when selecting and assessing regression and classification models.}, journal = {Journal of cheminformatics}, volume = {6}, number = {1}, pages = {10}, pmid = {24678909}, issn = {1758-2946}, abstract = {BACKGROUND: We address the problem of selecting and assessing classification and regression models using cross-validation. Current state-of-the-art methods can yield models with high variance, rendering them unsuitable for a number of practical applications including QSAR. In this paper we describe and evaluate best practices which improve reliability and increase confidence in selected models. A key operational component of the proposed methods is cloud computing which enables routine use of previously infeasible approaches.

METHODS: We describe in detail an algorithm for repeated grid-search V-fold cross-validation for parameter tuning in classification and regression, and we define a repeated nested cross-validation algorithm for model assessment. As regards variable selection and parameter tuning we define two algorithms (repeated grid-search cross-validation and double cross-validation), and provide arguments for using the repeated grid-search in the general case.

RESULTS: We show results of our algorithms on seven QSAR datasets. The variation of the prediction performance, which is the result of choosing different splits of the dataset in V-fold cross-validation, needs to be taken into account when selecting and assessing classification and regression models.

CONCLUSIONS: We demonstrate the importance of repeating cross-validation when selecting an optimal model, as well as the importance of repeating nested cross-validation when assessing a prediction error.}, } @article {pmid24678297, year = {2014}, author = {Mahmud, M and Pulizzi, R and Vasilaki, E and Giugliano, M}, title = {QSpike tools: a generic framework for parallel batch preprocessing of extracellular neuronal signals recorded by substrate microelectrode arrays.}, journal = {Frontiers in neuroinformatics}, volume = {8}, number = {}, pages = {26}, pmid = {24678297}, issn = {1662-5196}, abstract = {Micro-Electrode Arrays (MEAs) have emerged as a mature technique to investigate brain (dys)functions in vivo and in in vitro animal models. Often referred to as "smart" Petri dishes, MEAs have demonstrated a great potential particularly for medium-throughput studies in vitro, both in academic and pharmaceutical industrial contexts. Enabling rapid comparison of ionic/pharmacological/genetic manipulations with control conditions, MEAs are employed to screen compounds by monitoring non-invasively the spontaneous and evoked neuronal electrical activity in longitudinal studies, with relatively inexpensive equipment. However, in order to acquire sufficient statistical significance, recordings last up to tens of minutes and generate large amount of raw data (e.g., 60 channels/MEA, 16 bits A/D conversion, 20 kHz sampling rate: approximately 8 GB/MEA,h uncompressed). Thus, when the experimental conditions to be tested are numerous, the availability of fast, standardized, and automated signal preprocessing becomes pivotal for any subsequent analysis and data archiving. To this aim, we developed an in-house cloud-computing system, named QSpike Tools, where CPU-intensive operations, required for preprocessing of each recorded channel (e.g., filtering, multi-unit activity detection, spike-sorting, etc.), are decomposed and batch-queued to a multi-core architecture or to a computers cluster. With the commercial availability of new and inexpensive high-density MEAs, we believe that disseminating QSpike Tools might facilitate its wide adoption and customization, and inspire the creation of community-supported cloud-computing facilities for MEAs users.}, } @article {pmid24676572, year = {2014}, author = {Haller, S and Lovblad, KO and Giannakopoulos, P and Van De Ville, D}, title = {Multivariate pattern recognition for diagnosis and prognosis in clinical neuroimaging: state of the art, current challenges and future trends.}, journal = {Brain topography}, volume = {27}, number = {3}, pages = {329-337}, doi = {10.1007/s10548-014-0360-z}, pmid = {24676572}, issn = {1573-6792}, mesh = {Brain/*pathology ; Brain Diseases/*diagnosis/*pathology ; Diagnosis, Computer-Assisted/*methods/trends ; Humans ; Information Dissemination/methods ; Multivariate Analysis ; Neuroimaging/*methods/trends ; Pattern Recognition, Automated/*methods/trends ; Prognosis ; }, abstract = {Many diseases are associated with systematic modifications in brain morphometry and function. These alterations may be subtle, in particular at early stages of the disease progress, and thus not evident by visual inspection alone. Group-level statistical comparisons have dominated neuroimaging studies for many years, proving fascinating insight into brain regions involved in various diseases. However, such group-level results do not warrant diagnostic value for individual patients. Recently, pattern recognition approaches have led to a fundamental shift in paradigm, bringing multivariate analysis and predictive results, notably for the early diagnosis of individual patients. We review the state-of-the-art fundamentals of pattern recognition including feature selection, cross-validation and classification techniques, as well as limitations including inter-individual variation in normal brain anatomy and neurocognitive reserve. We conclude with the discussion of future trends including multi-modal pattern recognition, multi-center approaches with data-sharing and cloud-computing.}, } @article {pmid24647123, year = {2014}, author = {Riliskis, L and Osipov, E}, title = {Maestro: an orchestration framework for large-scale WSN simulations.}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {3}, pages = {5392-5414}, pmid = {24647123}, issn = {1424-8220}, abstract = {Contemporary wireless sensor networks (WSNs) have evolved into large and complex systems and are one of the main technologies used in cyber-physical systems and the Internet of Things. Extensive research on WSNs has led to the development of diverse solutions at all levels of software architecture, including protocol stacks for communications. This multitude of solutions is due to the limited computational power and restrictions on energy consumption that must be accounted for when designing typical WSN systems. It is therefore challenging to develop, test and validate even small WSN applications, and this process can easily consume significant resources. Simulations are inexpensive tools for testing, verifying and generally experimenting with new technologies in a repeatable fashion. Consequently, as the size of the systems to be tested increases, so does the need for large-scale simulations. This article describes a tool called Maestro for the automation of large-scale simulation and investigates the feasibility of using cloud computing facilities for such task. Using tools that are built into Maestro, we demonstrate a feasible approach for benchmarking cloud infrastructure in order to identify cloud Virtual Machine (VM)instances that provide an optimal balance of performance and cost for a given simulation.}, } @article {pmid24630831, year = {2014}, author = {Bolouri, H}, title = {Modeling genomic regulatory networks with big data.}, journal = {Trends in genetics : TIG}, volume = {30}, number = {5}, pages = {182-191}, doi = {10.1016/j.tig.2014.02.005}, pmid = {24630831}, issn = {0168-9525}, mesh = {Animals ; *Databases, Genetic ; Disease/genetics ; Gene Regulatory Networks/*genetics ; Genome/*genetics ; High-Throughput Nucleotide Sequencing ; Humans ; *Models, Genetic ; }, abstract = {High-throughput sequencing, large-scale data generation projects, and web-based cloud computing are changing how computational biology is performed, who performs it, and what biological insights it can deliver. I review here the latest developments in available data, methods, and software, focusing on the modeling and analysis of the gene regulatory interactions in cells. Three key findings are: (i) although sophisticated computational resources are increasingly available to bench biologists, tailored ongoing education is necessary to avoid the erroneous use of these resources. (ii) Current models of the regulation of gene expression are far too simplistic and need updating. (iii) Integrative computational analysis of large-scale datasets is becoming a fundamental component of molecular biology. I discuss current and near-term opportunities and challenges related to these three points.}, } @article {pmid24621177, year = {2014}, author = {Melicher, D and Torson, AS and Dworkin, I and Bowsher, JH}, title = {A pipeline for the de novo assembly of the Themira biloba (Sepsidae: Diptera) transcriptome using a multiple k-mer length approach.}, journal = {BMC genomics}, volume = {15}, number = {1}, pages = {188}, pmid = {24621177}, issn = {1471-2164}, mesh = {Animals ; Computational Biology/*methods ; Diptera/*genetics ; Female ; Gene Expression Profiling/*methods ; Genomics/methods ; Male ; Molecular Sequence Annotation ; *Transcriptome ; }, abstract = {BACKGROUND: The Sepsidae family of flies is a model for investigating how sexual selection shapes courtship and sexual dimorphism in a comparative framework. However, like many non-model systems, there are few molecular resources available. Large-scale sequencing and assembly have not been performed in any sepsid, and the lack of a closely related genome makes investigation of gene expression challenging. Our goal was to develop an automated pipeline for de novo transcriptome assembly, and to use that pipeline to assemble and analyze the transcriptome of the sepsid Themira biloba.

RESULTS: Our bioinformatics pipeline uses cloud computing services to assemble and analyze the transcriptome with off-site data management, processing, and backup. It uses a multiple k-mer length approach combined with a second meta-assembly to extend transcripts and recover more bases of transcript sequences than standard single k-mer assembly. We used 454 sequencing to generate 1.48 million reads from cDNA generated from embryo, larva, and pupae of T. biloba and assembled a transcriptome consisting of 24,495 contigs. Annotation identified 16,705 transcripts, including those involved in embryogenesis and limb patterning. We assembled transcriptomes from an additional three non-model organisms to demonstrate that our pipeline assembled a higher-quality transcriptome than single k-mer approaches across multiple species.

CONCLUSIONS: The pipeline we have developed for assembly and analysis increases contig length, recovers unique transcripts, and assembles more base pairs than other methods through the use of a meta-assembly. The T. biloba transcriptome is a critical resource for performing large-scale RNA-Seq investigations of gene expression patterns, and is the first transcriptome sequenced in this Dipteran family.}, } @article {pmid24608051, year = {2014}, author = {Lai, CF and Chen, M and Pan, JS and Youn, CH and Chao, HC}, title = {A collaborative computing framework of cloud network and WBSN applied to fall detection and 3-D motion reconstruction.}, journal = {IEEE journal of biomedical and health informatics}, volume = {18}, number = {2}, pages = {457-466}, doi = {10.1109/JBHI.2014.2298467}, pmid = {24608051}, issn = {2168-2208}, mesh = {*Accidental Falls ; Algorithms ; *Computer Communication Networks ; Humans ; Imaging, Three-Dimensional/*methods ; Internet ; Remote Sensing Technology/*methods ; *Wireless Technology ; }, abstract = {As cloud computing and wireless body sensor network technologies become gradually developed, ubiquitous healthcare services prevent accidents instantly and effectively, as well as provides relevant information to reduce related processing time and cost. This study proposes a co-processing intermediary framework integrated cloud and wireless body sensor networks, which is mainly applied to fall detection and 3-D motion reconstruction. In this study, the main focuses includes distributed computing and resource allocation of processing sensing data over the computing architecture, network conditions and performance evaluation. Through this framework, the transmissions and computing time of sensing data are reduced to enhance overall performance for the services of fall events detection and 3-D motion reconstruction.}, } @article {pmid24608002, year = {2014}, author = {Su, CJ and Chiang, CY and Chih, MC}, title = {Ontological knowledge engine and health screening data enabled ubiquitous personalized physical fitness (UFIT).}, journal = {Sensors (Basel, Switzerland)}, volume = {14}, number = {3}, pages = {4560-4584}, pmid = {24608002}, issn = {1424-8220}, mesh = {Exercise ; *Health ; Humans ; *Knowledge ; Male ; *Mass Screening ; Physical Fitness/*physiology ; Search Engine ; Software ; Surveys and Questionnaires ; }, abstract = {Good physical fitness generally makes the body less prone to common diseases. A personalized exercise plan that promotes a balanced approach to fitness helps promotes fitness, while inappropriate forms of exercise can have adverse consequences for health. This paper aims to develop an ontology-driven knowledge-based system for generating custom-designed exercise plans based on a user's profile and health status, incorporating international standard Health Level Seven International (HL7) data on physical fitness and health screening. The generated plan exposing Representational State Transfer (REST) style web services which can be accessed from any Internet-enabled device and deployed in cloud computing environments. To ensure the practicality of the generated exercise plans, encapsulated knowledge used as a basis for inference in the system is acquired from domain experts. The proposed Ubiquitous Exercise Plan Generation for Personalized Physical Fitness (UFIT) will not only improve health-related fitness through generating personalized exercise plans, but also aid users in avoiding inappropriate work outs.}, } @article {pmid24597646, year = {2014}, author = {Liu, H and Wang, L and Lv, M and Pei, R and Li, P and Pei, Z and Wang, Y and Su, W and Xie, XQ}, title = {AlzPlatform: an Alzheimer's disease domain-specific chemogenomics knowledgebase for polypharmacology and target identification research.}, journal = {Journal of chemical information and modeling}, volume = {54}, number = {4}, pages = {1050-1060}, pmid = {24597646}, issn = {1549-960X}, support = {R01 DA025612/DA/NIDA NIH HHS/United States ; R21 HL109654/HL/NHLBI NIH HHS/United States ; P30 DA035778/DA/NIDA NIH HHS/United States ; DA025612/DA/NIDA NIH HHS/United States ; HL109654/HL/NHLBI NIH HHS/United States ; }, mesh = {Alzheimer Disease/drug therapy/genetics/*metabolism ; *Databases, Chemical ; *Databases, Genetic ; Drug Discovery ; Hep G2 Cells ; Humans ; Neuroprotective Agents/pharmacology/therapeutic use ; }, abstract = {Alzheimer's disease (AD) is one of the most complicated progressive neurodegeneration diseases that involve many genes, proteins, and their complex interactions. No effective medicines or treatments are available yet to stop or reverse the progression of the disease due to its polygenic nature. To facilitate discovery of new AD drugs and better understand the AD neurosignaling pathways involved, we have constructed an Alzheimer's disease domain-specific chemogenomics knowledgebase, AlzPlatform (www.cbligand.org/AD/) with cloud computing and sourcing functions. AlzPlatform is implemented with powerful computational algorithms, including our established TargetHunter, HTDocking, and BBB Predictor for target identification and polypharmacology analysis for AD research. The platform has assembled various AD-related chemogenomics data records, including 928 genes and 320 proteins related to AD, 194 AD drugs approved or in clinical trials, and 405,188 chemicals associated with 1, 023,137 records of reported bioactivities from 38,284 corresponding bioassays and 10, 050 references. Furthermore, we have demonstrated the application of the AlzPlatform in three case studies for identification of multitargets and polypharmacology analysis of FDA-approved drugs and also for screening and prediction of new AD active small chemical molecules and potential novel AD drug targets by our established TargetHunter and/or HTDocking programs. The predictions were confirmed by reported bioactivity data and our in vitro experimental validation. Overall, AlzPlatform will enrich our knowledge for AD target identification, drug discovery, and polypharmacology analyses and, also, facilitate the chemogenomics data sharing and information exchange/communications in aid of new anti-AD drug discovery and development.}, } @article {pmid24590675, year = {2014}, author = {Chen, TJ and Kotecha, N}, title = {Cytobank: providing an analytics platform for community cytometry data analysis and collaboration.}, journal = {Current topics in microbiology and immunology}, volume = {377}, number = {}, pages = {127-157}, doi = {10.1007/82_2014_364}, pmid = {24590675}, issn = {0070-217X}, support = {AI094929/AI/NIAID NIH HHS/United States ; GM096579/GM/NIGMS NIH HHS/United States ; HHSN268201300037C//PHS HHS/United States ; }, mesh = {Animals ; Computational Biology ; Cooperative Behavior ; *Database Management Systems ; *Databases, Factual ; Flow Cytometry/*instrumentation/standards ; Humans ; }, abstract = {Cytometry is used extensively in clinical and laboratory settings to diagnose and track cell subsets in blood and tissue. High-throughput, single-cell approaches leveraging cytometry are developed and applied in the computational and systems biology communities by researchers, who seek to improve the diagnosis of human diseases, map the structures of cell signaling networks, and identify new cell types. Data analysis and management present a bottleneck in the flow of knowledge from bench to clinic. Multi-parameter flow and mass cytometry enable identification of signaling profiles of patient cell samples. Currently, this process is manual, requiring hours of work to summarize multi-dimensional data and translate these data for input into other analysis programs. In addition, the increase in the number and size of collaborative cytometry studies as well as the computational complexity of analytical tools require the ability to assemble sufficient and appropriately configured computing capacity on demand. There is a critical need for platforms that can be used by both clinical and basic researchers who routinely rely on cytometry. Recent advances provide a unique opportunity to facilitate collaboration and analysis and management of cytometry data. Specifically, advances in cloud computing and virtualization are enabling efficient use of large computing resources for analysis and backup. An example is Cytobank, a platform that allows researchers to annotate, analyze, and share results along with the underlying single-cell data.}, } @article {pmid24579087, year = {2014}, author = {Chang, TH and Wu, SL and Wang, WJ and Horng, JT and Chang, CW}, title = {A novel approach for discovering condition-specific correlations of gene expressions within biological pathways by using cloud computing technology.}, journal = {BioMed research international}, volume = {2014}, number = {}, pages = {763237}, pmid = {24579087}, issn = {2314-6141}, mesh = {Algorithms ; *Computational Biology ; *Gene Expression Profiling ; Humans ; *Oligonucleotide Array Sequence Analysis ; Software ; }, abstract = {Microarrays are widely used to assess gene expressions. Most microarray studies focus primarily on identifying differential gene expressions between conditions (e.g., cancer versus normal cells), for discovering the major factors that cause diseases. Because previous studies have not identified the correlations of differential gene expression between conditions, crucial but abnormal regulations that cause diseases might have been disregarded. This paper proposes an approach for discovering the condition-specific correlations of gene expressions within biological pathways. Because analyzing gene expression correlations is time consuming, an Apache Hadoop cloud computing platform was implemented. Three microarray data sets of breast cancer were collected from the Gene Expression Omnibus, and pathway information from the Kyoto Encyclopedia of Genes and Genomes was applied for discovering meaningful biological correlations. The results showed that adopting the Hadoop platform considerably decreased the computation time. Several correlations of differential gene expressions were discovered between the relapse and nonrelapse breast cancer samples, and most of them were involved in cancer regulation and cancer-related pathways. The results showed that breast cancer recurrence might be highly associated with the abnormal regulations of these gene pairs, rather than with their individual expression levels. The proposed method was computationally efficient and reliable, and stable results were obtained when different data sets were used. The proposed method is effective in identifying meaningful biological regulation patterns between conditions.}, } @article {pmid24564380, year = {2013}, author = {Tan, TW and Xie, C and De Silva, M and Lim, KS and Patro, CP and Lim, SJ and Govindarajan, KR and Tong, JC and Choo, KH and Ranganathan, S and Khan, AM}, title = {Simple re-instantiation of small databases using cloud computing.}, journal = {BMC genomics}, volume = {14 Suppl 5}, number = {Suppl 5}, pages = {S13}, pmid = {24564380}, issn = {1471-2164}, mesh = {Archives ; Computational Biology/*methods ; *Databases, Factual ; *Internet ; Software ; User-Computer Interface ; }, abstract = {BACKGROUND: Small bioinformatics databases, unlike institutionally funded large databases, are vulnerable to discontinuation and many reported in publications are no longer accessible. This leads to irreproducible scientific work and redundant effort, impeding the pace of scientific progress.

RESULTS: We describe a Web-accessible system, available online at http://biodb100.apbionet.org, for archival and future on demand re-instantiation of small databases within minutes. Depositors can rebuild their databases by downloading a Linux live operating system (http://www.bioslax.com), preinstalled with bioinformatics and UNIX tools. The database and its dependencies can be compressed into an ".lzm" file for deposition. End-users can search for archived databases and activate them on dynamically re-instantiated BioSlax instances, run as virtual machines over the two popular full virtualization standard cloud-computing platforms, Xen Hypervisor or vSphere. The system is adaptable to increasing demand for disk storage or computational load and allows database developers to use the re-instantiated databases for integration and development of new databases.

CONCLUSIONS: Herein, we demonstrate that a relatively inexpensive solution can be implemented for archival of bioinformatics databases and their rapid re-instantiation should the live databases disappear.}, } @article {pmid24560680, year = {2014}, author = {Wang, H and Wu, Q and Qin, B and Domingo-Ferrer, J}, title = {FRR: fair remote retrieval of outsourced private medical records in electronic health networks.}, journal = {Journal of biomedical informatics}, volume = {50}, number = {}, pages = {226-233}, doi = {10.1016/j.jbi.2014.02.008}, pmid = {24560680}, issn = {1532-0480}, mesh = {Computer Simulation ; *Contract Services ; *Electronic Health Records ; *Privacy ; }, abstract = {Cloud computing is emerging as the next-generation IT architecture. However, cloud computing also raises security and privacy concerns since the users have no physical control over the outsourced data. This paper focuses on fairly retrieving encrypted private medical records outsourced to remote untrusted cloud servers in the case of medical accidents and disputes. Our goal is to enable an independent committee to fairly recover the original private medical records so that medical investigation can be carried out in a convincing way. We achieve this goal with a fair remote retrieval (FRR) model in which either t investigation committee members cooperatively retrieve the original medical data or none of them can get any information on the medical records. We realize the first FRR scheme by exploiting fair multi-member key exchange and homomorphic privately verifiable tags. Based on the standard computational Diffie-Hellman (CDH) assumption, our scheme is provably secure in the random oracle model (ROM). A detailed performance analysis and experimental results show that our scheme is efficient in terms of communication and computation.}, } @article {pmid24535887, year = {2014}, author = {McCall, J and Hardcastle, K}, title = {Communication and computing technology in biocontainment laboratories using the NEIDL as a model.}, journal = {Pathogens and disease}, volume = {71}, number = {2}, pages = {96-101}, doi = {10.1111/2049-632X.12159}, pmid = {24535887}, issn = {2049-632X}, mesh = {Biomedical Research/methods ; Communicable Diseases, Emerging/diagnosis/drug therapy/etiology ; *Computing Methodologies ; *Containment of Biohazards ; Humans ; *Laboratories ; Occupational Health ; *Telecommunications ; }, abstract = {The National Emerging Infectious Diseases Laboratories (NEIDL), Boston University, is a globally unique biocontainment research facility housing biosafety level 2 (BSL-2), BSL-3, and BSL-4 laboratories. Located in the BioSquare area at the University's Medical Campus, it is part of a national network of secure facilities constructed to study infectious diseases of major public health concern. The NEIDL allows for basic, translational, and clinical phases of research to be carried out in a single facility with the overall goal of accelerating understanding, treatment, and prevention of infectious diseases. The NEIDL will also act as a center of excellence providing training and education in all aspects of biocontainment research. Within every detail of NEIDL operations is a primary emphasis on safety and security. The ultramodern NEIDL has required a new approach to communications technology solutions in order to ensure safety and security and meet the needs of investigators working in this complex building. This article discusses the implementation of secure wireless networks and private cloud computing to promote operational efficiency, biosecurity, and biosafety with additional energy-saving advantages. The utilization of a dedicated data center, virtualized servers, virtualized desktop integration, multichannel secure wireless networks, and a NEIDL-dedicated Voice over Internet Protocol (VoIP) network are all discussed.}, } @article {pmid24529695, year = {2014}, author = {Tzou, CH and Artner, NM and Pona, I and Hold, A and Placheta, E and Kropatsch, WG and Frey, M}, title = {Comparison of three-dimensional surface-imaging systems.}, journal = {Journal of plastic, reconstructive & aesthetic surgery : JPRAS}, volume = {67}, number = {4}, pages = {489-497}, doi = {10.1016/j.bjps.2014.01.003}, pmid = {24529695}, issn = {1878-0539}, mesh = {Humans ; *Image Processing, Computer-Assisted ; *Imaging, Three-Dimensional ; Photogrammetry/*methods ; Software ; }, abstract = {BACKGROUND: In recent decades, three-dimensional (3D) surface-imaging technologies have gained popularity worldwide, but because most published articles that mention them are technical, clinicians often have difficulties gaining a proper understanding of them. This article aims to provide the reader with relevant information on 3D surface-imaging systems. In it, we compare the most recent technologies to reveal their differences.

METHODS: We have accessed five international companies with the latest technologies in 3D surface-imaging systems: 3dMD, Axisthree, Canfield, Crisalix and Dimensional Imaging (Di3D; in alphabetical order). We evaluated their technical equipment, independent validation studies and corporate backgrounds.

RESULTS: The fastest capturing devices are the 3dMD and Di3D systems, capable of capturing images within 1.5 and 1 ms, respectively. All companies provide software for tissue modifications. Additionally, 3dMD, Canfield and Di3D can fuse computed tomography (CT)/cone-beam computed tomography (CBCT) images into their 3D surface-imaging data. 3dMD and Di3D provide 4D capture systems, which allow capturing the movement of a 3D surface over time. Crisalix greatly differs from the other four systems as it is purely web based and realised via cloud computing.

CONCLUSION: 3D surface-imaging systems are becoming important in today's plastic surgical set-ups, taking surgeons to a new level of communication with patients, surgical planning and outcome evaluation. Technologies used in 3D surface-imaging systems and their intended field of application vary within the companies evaluated. Potential users should define their requirements and assignment of 3D surface-imaging systems in their clinical as research environment before making the final decision for purchase.}, } @article {pmid24517889, year = {2014}, author = {Qadir, MA and Zhan, SH and Kwok, B and Bruestle, J and Drees, B and Popescu, OE and Sorensen, PH}, title = {ChildSeq-RNA: A next-generation sequencing-based diagnostic assay to identify known fusion transcripts in childhood sarcomas.}, journal = {The Journal of molecular diagnostics : JMD}, volume = {16}, number = {3}, pages = {361-370}, doi = {10.1016/j.jmoldx.2014.01.002}, pmid = {24517889}, issn = {1943-7811}, mesh = {Cell Line, Tumor ; Child ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; Oncogene Fusion/genetics ; Oncogene Proteins, Fusion/*genetics ; Paired Box Transcription Factors/genetics ; RNA/genetics ; Sarcoma/*genetics ; Sensitivity and Specificity ; Sequence Analysis, RNA/*methods ; }, abstract = {Childhood sarcomas can be extremely difficult to accurately diagnose on the basis of morphological characteristics alone. Ancillary methods, such as RT-PCR or fluorescence in situ hybridization, to detect pathognomonic gene fusions can help to distinguish these tumors. Two major deficiencies of these assays are their inability to identify gene fusions at nucleotide resolution or to detect multiple gene fusions simultaneously. We developed a next-generation sequencing-based assay designated ChildSeq-RNA that uses the Ion Torrent platform to screen for EWSR1-FLI1 and EWSR1-ERG, PAX3-FOXO1 and PAX7-FOXO1, EWSR1-WT1, and ETV6-NTRK3 fusions of Ewing sarcoma (ES), alveolar rhabdomyosarcoma, desmoplastic small round cell tumor, and congenital fibrosarcoma, respectively. To rapidly analyze resulting data, we codeveloped a bioinformatics tool, termed ChildDecode, that operates on a scalable, cloud-computing platform. Total RNA from four ES cell lines plus 33 clinical samples representing ES, alveolar rhabdomyosarcoma, desmoplastic small round cell tumor, and congenital fibrosarcoma tumors was subjected to ChildSeq-RNA. This accurately identified corresponding gene fusions in each tumor type, with no examples of false positive fusion detection in this proof-of-concept study. Comparison with previous RT-PCR findings demonstrated high sensitivity (96.4%; 95% CI, 82.3%-99.4%) and specificity (100%; 95% CI, 56.6%-100%) of ChildSeq-RNA to detect gene fusions. Herein, we propose ChildSeq-RNA as a novel tool to detect gene fusions in childhood sarcomas at single-nucleotide resolution.}, } @article {pmid24516936, year = {2014}, author = {Baillie, J}, title = {Wireless world widens nurse call options.}, journal = {Health estate}, volume = {68}, number = {1}, pages = {45-49}, pmid = {24516936}, mesh = {*Hospital Communication Systems ; *Nursing Staff, Hospital ; State Medicine ; United Kingdom ; *Wireless Technology ; }, abstract = {With wireless technology now an integral part of all our lives, and miniaturisation of computing power having made even hand-held portable devices such as mobile phones powerful tools in their own right, HEJ editor, Jonathan Baillie, spoke to specialist in wireless nurse call systems, Courtney-Thorne, to discuss some of its key recent product innovations, and see what impact it feels developments such as 'cloud' technology will have on the bringing of more technology into the nurse call sector as 'nurse and carer tools'.}, } @article {pmid24516326, year = {2014}, author = {Piotto, S and Biasi, LD and Concilio, S and Castiglione, A and Cattaneo, G}, title = {GRIMD: distributed computing for chemists and biologists.}, journal = {Bioinformation}, volume = {10}, number = {1}, pages = {43-47}, pmid = {24516326}, issn = {0973-2063}, abstract = {MOTIVATION: Biologists and chemists are facing problems of high computational complexity that require the use of several computers organized in clusters or in specialized grids. Examples of such problems can be found in molecular dynamics (MD), in silico screening, and genome analysis. Grid Computing and Cloud Computing are becoming prevalent mainly because of their competitive performance/cost ratio. Regrettably, the diffusion of Grid Computing is strongly limited because two main limitations: it is confined to scientists with strong Computer Science background and the analyses of the large amount of data produced can be cumbersome it. We have developed a package named GRIMD to provide an easy and flexible implementation of distributed computing for the Bioinformatics community. GRIMD is very easy to install and maintain, and it does not require any specific Computer Science skill. Moreover, permits preliminary analysis on the distributed machines to reduce the amount of data to transfer. GRIMD is very flexible because it shields the typical computational biologist from the need to write specific code for tasks such as molecular dynamics or docking calculations. Furthermore, it permits an efficient use of GPU cards whenever is possible. GRIMD calculations scale almost linearly and, therefore, permits to exploit efficiently each machine in the network. Here, we provide few examples of grid computing in computational biology (MD and docking) and bioinformatics (proteome analysis).

AVAILABILITY: GRIMD is available for free for noncommercial research at www.yadamp.unisa.it/grimd.

SUPPLEMENTARY INFORMATION: www.yadamp.unisa.it/grimd/howto.aspx.}, } @article {pmid24475911, year = {2014}, author = {Reid, JG and Carroll, A and Veeraraghavan, N and Dahdouli, M and Sundquist, A and English, A and Bainbridge, M and White, S and Salerno, W and Buhay, C and Yu, F and Muzny, D and Daly, R and Duyk, G and Gibbs, RA and Boerwinkle, E}, title = {Launching genomics into the cloud: deployment of Mercury, a next generation sequence analysis pipeline.}, journal = {BMC bioinformatics}, volume = {15}, number = {}, pages = {30}, pmid = {24475911}, issn = {1471-2105}, support = {U54 HG003273/HG/NHGRI NIH HHS/United States ; U54 HG006542/HG/NHGRI NIH HHS/United States ; U54HG006542/HG/NHGRI NIH HHS/United States ; U54HG003273/HG/NHGRI NIH HHS/United States ; }, mesh = {Genome/genetics ; Genomics/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Humans ; *Internet ; *Software ; }, abstract = {BACKGROUND: Massively parallel DNA sequencing generates staggering amounts of data. Decreasing cost, increasing throughput, and improved annotation have expanded the diversity of genomics applications in research and clinical practice. This expanding scale creates analytical challenges: accommodating peak compute demand, coordinating secure access for multiple analysts, and sharing validated tools and results.

RESULTS: To address these challenges, we have developed the Mercury analysis pipeline and deployed it in local hardware and the Amazon Web Services cloud via the DNAnexus platform. Mercury is an automated, flexible, and extensible analysis workflow that provides accurate and reproducible genomic results at scales ranging from individuals to large cohorts.

CONCLUSIONS: By taking advantage of cloud computing and with Mercury implemented on the DNAnexus platform, we have demonstrated a powerful combination of a robust and fully validated software pipeline and a scalable computational resource that, to date, we have applied to more than 10,000 whole genome and whole exome samples.}, } @article {pmid24465714, year = {2014}, author = {Dong, J and Xiao, X and Sheldon, S and Biradar, C and Zhang, G and Duong, ND and Hazarika, M and Wikantika, K and Takeuhci, W and Moore, B}, title = {A 50-m forest cover map in Southeast Asia from ALOS/PALSAR and its application on forest fragmentation assessment.}, journal = {PloS one}, volume = {9}, number = {1}, pages = {e85801}, pmid = {24465714}, issn = {1932-6203}, mesh = {Algorithms ; Asia, Southeastern ; *Biodiversity ; Biomass ; Conservation of Natural Resources/*methods/statistics & numerical data ; Crops, Agricultural/growth & development ; *Forests ; Geographic Information Systems/statistics & numerical data ; Geography ; Models, Theoretical ; Radar ; Remote Sensing Technology/*methods/statistics & numerical data ; Reproducibility of Results ; Tropical Climate ; }, abstract = {Southeast Asia experienced higher rates of deforestation than other continents in the 1990s and still was a hotspot of forest change in the 2000s. Biodiversity conservation planning and accurate estimation of forest carbon fluxes and pools need more accurate information about forest area, spatial distribution and fragmentation. However, the recent forest maps of Southeast Asia were generated from optical images at spatial resolutions of several hundreds of meters, and they do not capture well the exceptionally complex and dynamic environments in Southeast Asia. The forest area estimates from those maps vary substantially, ranging from 1.73×10(6) km(2) (GlobCover) to 2.69×10(6) km(2) (MCD12Q1) in 2009; and their uncertainty is constrained by frequent cloud cover and coarse spatial resolution. Recently, cloud-free imagery from the Phased Array Type L-band Synthetic Aperture Radar (PALSAR) onboard the Advanced Land Observing Satellite (ALOS) became available. We used the PALSAR 50-m orthorectified mosaic imagery in 2009 to generate a forest cover map of Southeast Asia at 50-m spatial resolution. The validation, using ground-reference data collected from the Geo-Referenced Field Photo Library and high-resolution images in Google Earth, showed that our forest map has a reasonably high accuracy (producer's accuracy 86% and user's accuracy 93%). The PALSAR-based forest area estimates in 2009 are significantly correlated with those from GlobCover and MCD12Q1 at national and subnational scales but differ in some regions at the pixel scale due to different spatial resolutions, forest definitions, and algorithms. The resultant 50-m forest map was used to quantify forest fragmentation and it revealed substantial details of forest fragmentation. This new 50-m map of tropical forests could serve as a baseline map for forest resource inventory, deforestation monitoring, reducing emissions from deforestation and forest degradation (REDD+) implementation, and biodiversity.}, } @article {pmid24464852, year = {2014}, author = {Heath, AP and Greenway, M and Powell, R and Spring, J and Suarez, R and Hanley, D and Bandlamudi, C and McNerney, ME and White, KP and Grossman, RL}, title = {Bionimbus: a cloud for managing, analyzing and sharing large genomics datasets.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {21}, number = {6}, pages = {969-975}, pmid = {24464852}, issn = {1527-974X}, support = {UL1 TR000430/TR/NCATS NIH HHS/United States ; P50 GM081892/GM/NIGMS NIH HHS/United States ; P50 MH094267/MH/NIMH NIH HHS/United States ; P50MH094267/MH/NIMH NIH HHS/United States ; P50GM081892-03A1/GM/NIGMS NIH HHS/United States ; K08 CA181254/CA/NCI NIH HHS/United States ; HHSN261200800001C/RC/CCR NIH HHS/United States ; HHSN261200800001E/CA/NCI NIH HHS/United States ; 3XS021/HHSN261200800001E//PHS HHS/United States ; }, mesh = {*Computer Systems ; *Datasets as Topic ; *Genomics ; Humans ; Internet ; Phenotype ; *Software ; Systems Integration ; }, abstract = {BACKGROUND: As large genomics and phenotypic datasets are becoming more common, it is increasingly difficult for most researchers to access, manage, and analyze them. One possible approach is to provide the research community with several petabyte-scale cloud-based computing platforms containing these data, along with tools and resources to analyze it.

METHODS: Bionimbus is an open source cloud-computing platform that is based primarily upon OpenStack, which manages on-demand virtual machines that provide the required computational resources, and GlusterFS, which is a high-performance clustered file system. Bionimbus also includes Tukey, which is a portal, and associated middleware that provides a single entry point and a single sign on for the various Bionimbus resources; and Yates, which automates the installation, configuration, and maintenance of the software infrastructure required.

RESULTS: Bionimbus is used by a variety of projects to process genomics and phenotypic data. For example, it is used by an acute myeloid leukemia resequencing project at the University of Chicago. The project requires several computational pipelines, including pipelines for quality control, alignment, variant calling, and annotation. For each sample, the alignment step requires eight CPUs for about 12 h. BAM file sizes ranged from 5 GB to 10 GB for each sample.

CONCLUSIONS: Most members of the research community have difficulty downloading large genomics datasets and obtaining sufficient storage and computer resources to manage and analyze the data. Cloud computing platforms, such as Bionimbus, with data commons that contain large genomics datasets, are one choice for broadening access to research data in genomics.}, } @article {pmid24462600, year = {2014}, author = {Liu, B and Madduri, RK and Sotomayor, B and Chard, K and Lacinski, L and Dave, UJ and Li, J and Liu, C and Foster, IT}, title = {Cloud-based bioinformatics workflow platform for large-scale next-generation sequencing analyses.}, journal = {Journal of biomedical informatics}, volume = {49}, number = {}, pages = {119-133}, pmid = {24462600}, issn = {1532-0480}, support = {R24 HL085343/HL/NHLBI NIH HHS/United States ; R24HL085343/HL/NHLBI NIH HHS/United States ; }, mesh = {*Computational Biology ; *Information Storage and Retrieval ; Sequence Analysis/*instrumentation ; }, abstract = {Due to the upcoming data deluge of genome data, the need for storing and processing large-scale genome data, easy access to biomedical analyses tools, efficient data sharing and retrieval has presented significant challenges. The variability in data volume results in variable computing and storage requirements, therefore biomedical researchers are pursuing more reliable, dynamic and convenient methods for conducting sequencing analyses. This paper proposes a Cloud-based bioinformatics workflow platform for large-scale next-generation sequencing analyses, which enables reliable and highly scalable execution of sequencing analyses workflows in a fully automated manner. Our platform extends the existing Galaxy workflow system by adding data management capabilities for transferring large quantities of data efficiently and reliably (via Globus Transfer), domain-specific analyses tools preconfigured for immediate use by researchers (via user-specific tools integration), automatic deployment on Cloud for on-demand resource allocation and pay-as-you-go pricing (via Globus Provision), a Cloud provisioning tool for auto-scaling (via HTCondor scheduler), and the support for validating the correctness of workflows (via semantic verification tools). Two bioinformatics workflow use cases as well as performance evaluation are presented to validate the feasibility of the proposed approach.}, } @article {pmid24454756, year = {2014}, author = {Karczewski, KJ and Fernald, GH and Martin, AR and Snyder, M and Tatonetti, NP and Dudley, JT}, title = {STORMSeq: an open-source, user-friendly pipeline for processing personal genomics data in the cloud.}, journal = {PloS one}, volume = {9}, number = {1}, pages = {e84860}, pmid = {24454756}, issn = {1932-6203}, support = {R01 DK098242/DK/NIDDK NIH HHS/United States ; GM007790/GM/NIGMS NIH HHS/United States ; T15 LM007033/LM/NLM NIH HHS/United States ; T32 GM007790/GM/NIGMS NIH HHS/United States ; LM007033/LM/NLM NIH HHS/United States ; }, mesh = {*Genome, Human ; Humans ; *User-Computer Interface ; }, abstract = {The increasing public availability of personal complete genome sequencing data has ushered in an era of democratized genomics. However, read mapping and variant calling software is constantly improving and individuals with personal genomic data may prefer to customize and update their variant calls. Here, we describe STORMSeq (Scalable Tools for Open-Source Read Mapping), a graphical interface cloud computing solution that does not require a parallel computing environment or extensive technical experience. This customizable and modular system performs read mapping, read cleaning, and variant calling and annotation. At present, STORMSeq costs approximately $2 and 5-10 hours to process a full exome sequence and $30 and 3-8 days to process a whole genome sequence. We provide this open-access and open-source resource as a user-friendly interface in Amazon EC2.}, } @article {pmid24451621, year = {2014}, author = {Santorum, JM and Darriba, D and Taboada, GL and Posada, D}, title = {jmodeltest.org: selection of nucleotide substitution models on the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {30}, number = {9}, pages = {1310-1311}, pmid = {24451621}, issn = {1367-4811}, support = {203161/ERC_/European Research Council/International ; }, mesh = {Cluster Analysis ; Internet ; Models, Genetic ; Nucleotides/*genetics ; Phylogeny ; Software ; }, abstract = {The selection of models of nucleotide substitution is one of the major steps of modern phylogenetic analysis. Different tools exist to accomplish this task, among which jModelTest 2 (jMT2) is one of the most popular. Still, to deal with large DNA alignments with hundreds or thousands of loci, users of jMT2 need to have access to High Performance Computing clusters, including installation and configuration capabilities, conditions not always met. Here we present jmodeltest.org, a novel web server for the transparent execution of jMT2 across different platforms and for a wide range of users. Its main benefit is straightforward execution, avoiding any configuration/execution issues, and reducing significantly in most cases the time required to complete the analysis.}, } @article {pmid24451106, year = {2014}, author = {Takeuchi, F and Sekizuka, T and Yamashita, A and Ogasawara, Y and Mizuta, K and Kuroda, M}, title = {MePIC, metagenomic pathogen identification for clinical specimens.}, journal = {Japanese journal of infectious diseases}, volume = {67}, number = {1}, pages = {62-65}, doi = {10.7883/yoken.67.62}, pmid = {24451106}, issn = {1884-2836}, mesh = {Communicable Diseases/*diagnosis ; Computational Biology/methods ; Humans ; Internet ; Metagenomics/*methods ; Molecular Diagnostic Techniques/*methods ; }, abstract = {Next-generation DNA sequencing technologies have led to a new method of identifying the causative agents of infectious diseases. The analysis comprises three steps. First, DNA/RNA is extracted and extensively sequenced from a specimen that includes the pathogen, human tissue and commensal microorganisms. Second, the sequenced reads are matched with a database of known sequences, and the organisms from which the individual reads were derived are inferred. Last, the percentages of the organisms' genomic sequences in the specimen (i.e., the metagenome) are estimated, and the pathogen is identified. The first and last steps have become easy due to the development of benchtop sequencers and metagenomic software. To facilitate the middle step, which requires computational resources and skill, we developed a cloud-computing pipeline, MePIC: "Metagenomic Pathogen Identification for Clinical specimens." In the pipeline, unnecessary bases are trimmed off the reads, and human reads are removed. For the remaining reads, similar sequences are searched in the database of known nucleotide sequences. The search is drastically sped up by using a cloud-computing system. The webpage interface can be used easily by clinicians and epidemiologists. We believe that the use of the MePIC pipeline will promote metagenomic pathogen identification and improve the understanding of infectious diseases.}, } @article {pmid24445861, year = {2014}, author = {Wu, TT and Sung, TW}, title = {Public health practice course using Google Plus.}, journal = {Computers, informatics, nursing : CIN}, volume = {32}, number = {3}, pages = {144-152}, doi = {10.1097/CIN.0000000000000040}, pmid = {24445861}, issn = {1538-9774}, mesh = {*Information Storage and Retrieval ; *Internet ; *Public Health Practice ; }, abstract = {In recent years, mobile device-assisted clinical education has become popular among nursing school students. The introduction of mobile devices saves manpower and reduces errors while enhancing nursing students' professional knowledge and skills. To respond to the demands of various learning strategies and to maintain existing systems of education, the concept of Cloud Learning is gradually being introduced to instructional environments. Cloud computing facilitates learning that is personalized, diverse, and virtual. This study involved assessing the advantages of mobile devices and Cloud Learning in a public health practice course, in which Google+ was used as the learning platform, integrating various application tools. Users could save and access data by using any wireless Internet device. The platform was student centered and based on resource sharing and collaborative learning. With the assistance of highly flexible and convenient technology, certain obstacles in traditional practice training can be resolved. Our findings showed that the students who adopted Google+ were learned more effectively compared with those who were limited to traditional learning systems. Most students and the nurse educator expressed a positive attitude toward and were satisfied with the innovative learning method.}, } @article {pmid24433564, year = {2014}, author = {Pratas, D and Pinho, AJ and Rodrigues, JM}, title = {XS: a FASTQ read simulator.}, journal = {BMC research notes}, volume = {7}, number = {}, pages = {40}, pmid = {24433564}, issn = {1756-0500}, mesh = {Algorithms ; Computational Biology/*methods ; High-Throughput Nucleotide Sequencing/*statistics & numerical data ; Reproducibility of Results ; Sequence Analysis, DNA/*methods ; Software ; }, abstract = {BACKGROUND: The emerging next-generation sequencing (NGS) is bringing, besides the natural huge amounts of data, an avalanche of new specialized tools (for analysis, compression, alignment, among others) and large public and private network infrastructures. Therefore, a direct necessity of specific simulation tools for testing and benchmarking is rising, such as a flexible and portable FASTQ read simulator, without the need of a reference sequence, yet correctly prepared for producing approximately the same characteristics as real data.

FINDINGS: We present XS, a skilled FASTQ read simulation tool, flexible, portable (does not need a reference sequence) and tunable in terms of sequence complexity. It has several running modes, depending on the time and memory available, and is aimed at testing computing infrastructures, namely cloud computing of large-scale projects, and testing FASTQ compression algorithms. Moreover, XS offers the possibility of simulating the three main FASTQ components individually (headers, DNA sequences and quality-scores).

CONCLUSIONS: XS provides an efficient and convenient method for fast simulation of FASTQ files, such as those from Ion Torrent (currently uncovered by other simulators), Roche-454, Illumina and ABI-SOLiD sequencing machines. This tool is publicly available at http://bioinformatics.ua.pt/software/xs/.}, } @article {pmid24428926, year = {2014}, author = {Lee, WP and Hsiao, YT and Hwang, WC}, title = {Designing a parallel evolutionary algorithm for inferring gene networks on the cloud computing environment.}, journal = {BMC systems biology}, volume = {8}, number = {}, pages = {5}, pmid = {24428926}, issn = {1752-0509}, mesh = {*Algorithms ; Computational Biology/*methods ; *Computers ; *Gene Regulatory Networks ; *Internet ; Saccharomyces cerevisiae/genetics ; *Software ; }, abstract = {BACKGROUND: To improve the tedious task of reconstructing gene networks through testing experimentally the possible interactions between genes, it becomes a trend to adopt the automated reverse engineering procedure instead. Some evolutionary algorithms have been suggested for deriving network parameters. However, to infer large networks by the evolutionary algorithm, it is necessary to address two important issues: premature convergence and high computational cost. To tackle the former problem and to enhance the performance of traditional evolutionary algorithms, it is advisable to use parallel model evolutionary algorithms. To overcome the latter and to speed up the computation, it is advocated to adopt the mechanism of cloud computing as a promising solution: most popular is the method of MapReduce programming model, a fault-tolerant framework to implement parallel algorithms for inferring large gene networks.

RESULTS: This work presents a practical framework to infer large gene networks, by developing and parallelizing a hybrid GA-PSO optimization method. Our parallel method is extended to work with the Hadoop MapReduce programming model and is executed in different cloud computing environments. To evaluate the proposed approach, we use a well-known open-source software GeneNetWeaver to create several yeast S. cerevisiae sub-networks and use them to produce gene profiles. Experiments have been conducted and the results have been analyzed. They show that our parallel approach can be successfully used to infer networks with desired behaviors and the computation time can be largely reduced.

CONCLUSIONS: Parallel population-based algorithms can effectively determine network parameters and they perform better than the widely-used sequential algorithms in gene network inference. These parallel algorithms can be distributed to the cloud computing environment to speed up the computation. By coupling the parallel model population-based optimization method and the parallel computational framework, high quality solutions can be obtained within relatively short time. This integrated approach is a promising way for inferring large networks.}, } @article {pmid24415903, year = {2013}, author = {Rayhan, RU and Zheng, Y and Uddin, E and Timbol, C and Adewuyi, O and Baraniuk, JN}, title = {Administer and collect medical questionnaires with Google documents: a simple, safe, and free system.}, journal = {Applied medical informatics}, volume = {33}, number = {3}, pages = {12-21}, pmid = {24415903}, issn = {1224-5593}, support = {R01 ES015382/ES/NIEHS NIH HHS/United States ; UL1 RR031975/RR/NCRR NIH HHS/United States ; UL1 TR000101/TR/NCATS NIH HHS/United States ; }, abstract = {AIM: Questionnaires are an invaluable resource for clinical trials. They serve to estimate disease burden and clinical parameters associated with a particular study. However, current researchers are tackling budget constraints, loss of funding opportunities, and rise of research associated fees. We aimed at exploring alternative avenues taking advantage of the free Google docs software for questionnaire administration. This presents an opportunity to reduce costs while simultaneously increasing efficiency and data fidelity.

MATERIAL AND METHODS: Google documents were used as a platform to create online questionnaires that were automatically hosted via a unique URL. Password protected access to the URL link and a unique study ID gave patients around the clock access from anywhere in the world. Unique study ID ensured confidentially of all self-reported data. Patient responses were secured using a "Cloud" database where the data was automatically sorted, scaled and scored by custom Excel formulas. Researchers downloaded real-time questionnaire responses in multiple formats (e.g. excel) which was then analyzed with a statistical software of choice.

RESULTS: This simple workflow provided instant questionnaire scores that eliminated the use for paper-based responses and subsequent manual entry of data. Ease of access to online questionnaires provided convenience to patients leading to better response rates and increase in data fidelity. The system also allowed for real time monitoring of patient's progress on completing questionnaires. Online questionnaires had 100% completion rate compared to paper-based questionnaires.

CONCLUSIONS: Google docs can serve as an efficient and free platform to administer questionnaires to a clinical population without sacrificing quality, security, and fidelity of data.}, } @article {pmid24387497, year = {2014}, author = {Méndez, I and Peterlin, P and Hudej, R and Strojnik, A and Casar, B}, title = {On multichannel film dosimetry with channel-independent perturbations.}, journal = {Medical physics}, volume = {41}, number = {1}, pages = {011705}, doi = {10.1118/1.4845095}, pmid = {24387497}, issn = {2473-4209}, mesh = {Calibration ; Film Dosimetry/*methods ; Radiation Dosage ; }, abstract = {PURPOSE: Different multichannel methods for film dosimetry have been proposed in the literature. Two of them are the weighted mean method and the method put forth by Micke et al. ["Multichannel film dosimetry with nonuniformity correction," Med. Phys. 38, 2523-2534 (2011)] and Mayer et al. ["Enhanced dosimetry procedures and assessment for EBT2 radiochromic film," Med. Phys. 39, 2147-2155 (2012)]. The purpose of this work was to compare their results and to develop a generalized channel-independent perturbations framework in which both methods enter as special cases.

METHODS: Four models of channel-independent perturbations were compared: weighted mean, Micke-Mayer method, uniform distribution, and truncated normal distribution. A closed-form formula to calculate film doses and the associated type B uncertainty for all four models was deduced. To evaluate the models, film dose distributions were compared with planned and measured dose distributions. At the same time, several elements of the dosimetry process were compared: film type EBT2 versus EBT3, different waiting-time windows, reflection mode versus transmission mode scanning, and planned versus measured dose distribution for film calibration and for γ-index analysis. The methods and the models described in this study are publicly accessible through IRISEU. Alpha 1.1 (http://www.iriseu.com). IRISEU. is a cloud computing web application for calibration and dosimetry of radiochromic films.

RESULTS: The truncated normal distribution model provided the best agreement between film and reference doses, both for calibration and γ-index verification, and proved itself superior to both the weighted mean model, which neglects correlations between the channels, and the Micke-Mayer model, whose accuracy depends on the properties of the sensitometric curves. With respect to the selection of dosimetry protocol, no significant differences were found between transmission and reflection mode scanning, between 75 ± 5 min and 20 ± 1 h waiting-time windows or between employing EBT2 or EBT3 films. Significantly better results were obtained when a measured dose distribution was used instead of a planned one as reference for the calibration, and when a planned dose distribution was used instead of a measured one as evaluation for the γ-analysis.

CONCLUSIONS: The truncated normal distribution model of channel-independent perturbations was found superior to the other three models under comparison and the authors propose its use for multichannel dosimetry.}, } @article {pmid24387492, year = {2014}, author = {Moore, KL and Kagadis, GC and McNutt, TR and Moiseenko, V and Mutic, S}, title = {Vision 20/20: Automation and advanced computing in clinical radiation oncology.}, journal = {Medical physics}, volume = {41}, number = {1}, pages = {010901}, doi = {10.1118/1.4842515}, pmid = {24387492}, issn = {2473-4209}, mesh = {Automation ; *Computers ; Humans ; Internet ; Radiation Oncology/*methods ; Safety ; Statistics as Topic ; }, abstract = {This Vision 20/20 paper considers what computational advances are likely to be implemented in clinical radiation oncology in the coming years and how the adoption of these changes might alter the practice of radiotherapy. Four main areas of likely advancement are explored: cloud computing, aggregate data analyses, parallel computation, and automation. As these developments promise both new opportunities and new risks to clinicians and patients alike, the potential benefits are weighed against the hazards associated with each advance, with special considerations regarding patient safety under new computational platforms and methodologies. While the concerns of patient safety are legitimate, the authors contend that progress toward next-generation clinical informatics systems will bring about extremely valuable developments in quality improvement initiatives, clinical efficiency, outcomes analyses, data sharing, and adaptive radiotherapy.}, } @article {pmid24385877, year = {2013}, author = {Xu, G and Ding, Y and Zhao, J and Hu, L and Fu, X}, title = {A novel artificial bee colony approach of live virtual machine migration policy using Bayes theorem.}, journal = {TheScientificWorldJournal}, volume = {2013}, number = {}, pages = {369209}, pmid = {24385877}, issn = {1537-744X}, mesh = {Animals ; *Artificial Intelligence ; *Bayes Theorem ; Bees/*physiology ; Biomimetics/*methods ; Information Storage and Retrieval/*methods ; *Internet ; Pattern Recognition, Automated/*methods ; User-Computer Interface ; }, abstract = {Green cloud data center has become a research hotspot of virtualized cloud computing architecture. Since live virtual machine (VM) migration technology is widely used and studied in cloud computing, we have focused on the VM placement selection of live migration for power saving. We present a novel heuristic approach which is called PS-ABC. Its algorithm includes two parts. One is that it combines the artificial bee colony (ABC) idea with the uniform random initialization idea, the binary search idea, and Boltzmann selection policy to achieve an improved ABC-based approach with better global exploration's ability and local exploitation's ability. The other one is that it uses the Bayes theorem to further optimize the improved ABC-based process to faster get the final optimal solution. As a result, the whole approach achieves a longer-term efficient optimization for power saving. The experimental results demonstrate that PS-ABC evidently reduces the total incremental power consumption and better protects the performance of VM running and migrating compared with the existing research. It makes the result of live VM migration more high-effective and meaningful.}, } @article {pmid25983539, year = {2014}, author = {Agarwal, P and Owzar, K}, title = {Next generation distributed computing for cancer research.}, journal = {Cancer informatics}, volume = {13}, number = {Suppl 7}, pages = {97-109}, pmid = {25983539}, issn = {1176-9351}, abstract = {Advances in next generation sequencing (NGS) and mass spectrometry (MS) technologies have provided many new opportunities and angles for extending the scope of translational cancer research while creating tremendous challenges in data management and analysis. The resulting informatics challenge is invariably not amenable to the use of traditional computing models. Recent advances in scalable computing and associated infrastructure, particularly distributed computing for Big Data, can provide solutions for addressing these challenges. In this review, the next generation of distributed computing technologies that can address these informatics problems is described from the perspective of three key components of a computational platform, namely computing, data storage and management, and networking. A broad overview of scalable computing is provided to set the context for a detailed description of Hadoop, a technology that is being rapidly adopted for large-scale distributed computing. A proof-of-concept Hadoop cluster, set up for performance benchmarking of NGS read alignment, is described as an example of how to work with Hadoop. Finally, Hadoop is compared with a number of other current technologies for distributed computing.}, } @article {pmid25848590, year = {2014}, author = {Nichols, DA and DeSalvo, S and Miller, RA and Jónsson, D and Griffin, KS and Hyde, PR and Walsh, JK and Kushida, CA}, title = {The COMET Sleep Research Platform.}, journal = {EGEMS (Washington, DC)}, volume = {2}, number = {1}, pages = {1059}, pmid = {25848590}, issn = {2327-9214}, support = {R01 HS019738/HS/AHRQ HHS/United States ; }, abstract = {INTRODUCTION: The Comparative Outcomes Management with Electronic Data Technology (COMET) platform is extensible and designed for facilitating multicenter electronic clinical research.

BACKGROUND: Our research goals were the following: (1) to conduct a comparative effectiveness trial (CET) for two obstructive sleep apnea treatments-positive airway pressure versus oral appliance therapy; and (2) to establish a new electronic network infrastructure that would support this study and other clinical research studies.

DISCUSSION: The COMET platform was created to satisfy the needs of CET with a focus on creating a platform that provides comprehensive toolsets, multisite collaboration, and end-to-end data management. The platform also provides medical researchers the ability to visualize and interpret data using business intelligence (BI) tools.

CONCLUSION: COMET is a research platform that is scalable and extensible, and which, in a future version, can accommodate big data sets and enable efficient and effective research across multiple studies and medical specialties. The COMET platform components were designed for an eventual move to a cloud computing infrastructure that enhances sustainability, overall cost effectiveness, and return on investment.}, } @article {pmid24348165, year = {2013}, author = {Zhao, J and Ding, Y and Xu, G and Hu, L and Dong, Y and Fu, X}, title = {A location selection policy of live virtual machine migration for power saving and load balancing.}, journal = {TheScientificWorldJournal}, volume = {2013}, number = {}, pages = {492615}, pmid = {24348165}, issn = {1537-744X}, mesh = {*Meteorological Concepts ; }, abstract = {Green cloud data center has become a research hotspot of virtualized cloud computing architecture. And load balancing has also been one of the most important goals in cloud data centers. Since live virtual machine (VM) migration technology is widely used and studied in cloud computing, we have focused on location selection (migration policy) of live VM migration for power saving and load balancing. We propose a novel approach MOGA-LS, which is a heuristic and self-adaptive multiobjective optimization algorithm based on the improved genetic algorithm (GA). This paper has presented the specific design and implementation of MOGA-LS such as the design of the genetic operators, fitness values, and elitism. We have introduced the Pareto dominance theory and the simulated annealing (SA) idea into MOGA-LS and have presented the specific process to get the final solution, and thus, the whole approach achieves a long-term efficient optimization for power saving and load balancing. The experimental results demonstrate that MOGA-LS evidently reduces the total incremental power consumption and better protects the performance of VM migration and achieves the balancing of system load compared with the existing research. It makes the result of live VM migration more high-effective and meaningful.}, } @article {pmid24346931, year = {2014}, author = {Siddiqui, Z and Abdullah, AH and Khan, MK and Alghamdi, AS}, title = {Smart environment as a service: three factor cloud based user authentication for telecare medical information system.}, journal = {Journal of medical systems}, volume = {38}, number = {1}, pages = {9997}, pmid = {24346931}, issn = {1573-689X}, mesh = {Algorithms ; Computer Security/*instrumentation ; *Confidentiality ; *Health Information Exchange ; Humans ; Internet ; Telemedicine ; }, abstract = {The Telecare Medical Information System (TMIS) provides a set of different medical services to the patient and medical practitioner. The patients and medical practitioners can easily connect to the services remotely from their own premises. There are several studies carried out to enhance and authenticate smartcard-based remote user authentication protocols for TMIS system. In this article, we propose a set of enhanced and authentic Three Factor (3FA) remote user authentication protocols utilizing a smartphone capability over a dynamic Cloud Computing (CC) environment. A user can access the TMIS services presented in the form of CC services using his smart device e.g. smartphone. Our framework transforms a smartphone to act as a unique and only identity required to access the TMIS system remotely. Methods, Protocols and Authentication techniques are proposed followed by security analysis and a performance analysis with the two recent authentication protocols proposed for the healthcare TMIS system.}, } @article {pmid24345941, year = {2014}, author = {Kohlhoff, KJ and Shukla, D and Lawrenz, M and Bowman, GR and Konerding, DE and Belov, D and Altman, RB and Pande, VS}, title = {Cloud-based simulations on Google Exacycle reveal ligand modulation of GPCR activation pathways.}, journal = {Nature chemistry}, volume = {6}, number = {1}, pages = {15-21}, pmid = {24345941}, issn = {1755-4349}, support = {R01 LM005652/LM/NLM NIH HHS/United States ; U54 GM072970/GM/NIGMS NIH HHS/United States ; U54 GM07297/GM/NIGMS NIH HHS/United States ; }, mesh = {*Internet ; Ligands ; Markov Chains ; Receptors, G-Protein-Coupled/*metabolism ; }, abstract = {Simulations can provide tremendous insight into the atomistic details of biological mechanisms, but micro- to millisecond timescales are historically only accessible on dedicated supercomputers. We demonstrate that cloud computing is a viable alternative that brings long-timescale processes within reach of a broader community. We used Google's Exacycle cloud-computing platform to simulate two milliseconds of dynamics of a major drug target, the G-protein-coupled receptor β2AR. Markov state models aggregate independent simulations into a single statistical model that is validated by previous computational and experimental results. Moreover, our models provide an atomistic description of the activation of a G-protein-coupled receptor and reveal multiple activation pathways. Agonists and inverse agonists interact differentially with these pathways, with profound implications for drug design.}, } @article {pmid24330342, year = {2014}, author = {McIntyre, RS and Cha, DS and Jerrell, JM and Swardfager, W and Kim, RD and Costa, LG and Baskaran, A and Soczynska, JK and Woldeyohannes, HO and Mansur, RB and Brietzke, E and Powell, AM and Gallaugher, A and Kudlow, P and Kaidanovich-Beilin, O and Alsuwaidan, M}, title = {Advancing biomarker research: utilizing 'Big Data' approaches for the characterization and prevention of bipolar disorder.}, journal = {Bipolar disorders}, volume = {16}, number = {5}, pages = {531-547}, doi = {10.1111/bdi.12162}, pmid = {24330342}, issn = {1399-5618}, mesh = {*Biomarkers ; *Biomedical Research ; *Bipolar Disorder/diagnosis/prevention & control/psychology ; Databases, Factual/*statistics & numerical data ; Humans ; }, abstract = {OBJECTIVE: To provide a strategic framework for the prevention of bipolar disorder (BD) that incorporates a 'Big Data' approach to risk assessment for BD.

METHODS: Computerized databases (e.g., Pubmed, PsychInfo, and MedlinePlus) were used to access English-language articles published between 1966 and 2012 with the search terms bipolar disorder, prodrome, 'Big Data', and biomarkers cross-referenced with genomics/genetics, transcriptomics, proteomics, metabolomics, inflammation, oxidative stress, neurotrophic factors, cytokines, cognition, neurocognition, and neuroimaging. Papers were selected from the initial search if the primary outcome(s) of interest was (were) categorized in any of the following domains: (i) 'omics' (e.g., genomics), (ii) molecular, (iii) neuroimaging, and (iv) neurocognitive.

RESULTS: The current strategic approach to identifying individuals at risk for BD, with an emphasis on phenotypic information and family history, has insufficient predictive validity and is clinically inadequate. The heterogeneous clinical presentation of BD, as well as its pathoetiological complexity, suggests that it is unlikely that a single biomarker (or an exclusive biomarker approach) will sufficiently augment currently inadequate phenotypic-centric prediction models. We propose a 'Big Data'- bioinformatics approach that integrates vast and complex phenotypic, anamnestic, behavioral, family, and personal 'omics' profiling. Bioinformatic processing approaches, utilizing cloud- and grid-enabled computing, are now capable of analyzing data on the order of tera-, peta-, and exabytes, providing hitherto unheard of opportunities to fundamentally revolutionize how psychiatric disorders are predicted, prevented, and treated. High-throughput networks dedicated to research on, and the treatment of, BD, integrating both adult and younger populations, will be essential to sufficiently enroll adequate samples of individuals across the neurodevelopmental trajectory in studies to enable the characterization and prevention of this heterogeneous disorder.

CONCLUSIONS: Advances in bioinformatics using a 'Big Data' approach provide an opportunity for novel insights regarding the pathoetiology of BD. The coordinated integration of research centers, inclusive of mixed-age populations, is a promising strategic direction for advancing this line of neuropsychiatric research.}, } @article {pmid25663956, year = {2013}, author = {Edwards, NJ}, title = {PepArML: A Meta-Search Peptide Identification Platform for Tandem Mass Spectra.}, journal = {Current protocols in bioinformatics}, volume = {44}, number = {1323}, pages = {13.23.1-23}, pmid = {25663956}, issn = {1934-340X}, support = {R01 CA126189/CA/NCI NIH HHS/United States ; }, mesh = {Humans ; Peptides/*analysis ; *Search Engine ; *Software ; Tandem Mass Spectrometry/*methods ; }, abstract = {The PepArML meta-search peptide identification platform for tandem mass spectra provides a unified search interface to seven search engines; a robust cluster, grid, and cloud computing scheduler for large-scale searches; and an unsupervised, model-free, machine-learning-based result combiner, which selects the best peptide identification for each spectrum, estimates false-discovery rates, and outputs pepXML format identifications. The meta-search platform supports Mascot; Tandem with native, k-score and s-score scoring; OMSSA; MyriMatch; and InsPecT with MS-GF spectral probability scores—reformatting spectral data and constructing search configurations for each search engine on the fly. The combiner selects the best peptide identification for each spectrum based on search engine results and features that model enzymatic digestion, retention time, precursor isotope clusters, mass accuracy, and proteotypic peptide properties, requiring no prior knowledge of feature utility or weighting. The PepArML meta-search peptide identification platform often identifies two to three times more spectra than individual search engines at 10% FDR.}, } @article {pmid24326538, year = {2014}, author = {Sahoo, SS and Jayapandian, C and Garg, G and Kaffashi, F and Chung, S and Bozorgi, A and Chen, CH and Loparo, K and Lhatoo, SD and Zhang, GQ}, title = {Heart beats in the cloud: distributed analysis of electrophysiological 'Big Data' using cloud computing for epilepsy clinical research.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {21}, number = {2}, pages = {263-271}, pmid = {24326538}, issn = {1527-974X}, support = {P20 NS076965/NS/NINDS NIH HHS/United States ; UL1 TR000439/TR/NCATS NIH HHS/United States ; UL1TR000439/TR/NCATS NIH HHS/United States ; 1-P20-NS076965-01/NS/NINDS NIH HHS/United States ; }, mesh = {*Algorithms ; Arrhythmias, Cardiac/complications/diagnosis ; Biomedical Research ; *Computer Communication Networks/economics ; Confidentiality ; Cost-Benefit Analysis ; *Databases, Factual ; Death, Sudden ; *Electrocardiography ; Electrophysiologic Techniques, Cardiac ; Epilepsy/complications/*physiopathology ; Health Insurance Portability and Accountability Act ; Humans ; Internet ; *Signal Processing, Computer-Assisted ; United States ; }, abstract = {OBJECTIVE: The rapidly growing volume of multimodal electrophysiological signal data is playing a critical role in patient care and clinical research across multiple disease domains, such as epilepsy and sleep medicine. To facilitate secondary use of these data, there is an urgent need to develop novel algorithms and informatics approaches using new cloud computing technologies as well as ontologies for collaborative multicenter studies.

MATERIALS AND METHODS: We present the Cloudwave platform, which (a) defines parallelized algorithms for computing cardiac measures using the MapReduce parallel programming framework, (b) supports real-time interaction with large volumes of electrophysiological signals, and (c) features signal visualization and querying functionalities using an ontology-driven web-based interface. Cloudwave is currently used in the multicenter National Institute of Neurological Diseases and Stroke (NINDS)-funded Prevention and Risk Identification of SUDEP (sudden unexplained death in epilepsy) Mortality (PRISM) project to identify risk factors for sudden death in epilepsy.

RESULTS: Comparative evaluations of Cloudwave with traditional desktop approaches to compute cardiac measures (eg, QRS complexes, RR intervals, and instantaneous heart rate) on epilepsy patient data show one order of magnitude improvement for single-channel ECG data and 20 times improvement for four-channel ECG data. This enables Cloudwave to support real-time user interaction with signal data, which is semantically annotated with a novel epilepsy and seizure ontology.

DISCUSSION: Data privacy is a critical issue in using cloud infrastructure, and cloud platforms, such as Amazon Web Services, offer features to support Health Insurance Portability and Accountability Act standards.

CONCLUSION: The Cloudwave platform is a new approach to leverage of large-scale electrophysiological data for advancing multicenter clinical research.}, } @article {pmid24319361, year = {2013}, author = {Yassa, S and Chelouah, R and Kadima, H and Granado, B}, title = {Multi-objective approach for energy-aware workflow scheduling in cloud computing environments.}, journal = {TheScientificWorldJournal}, volume = {2013}, number = {}, pages = {350934}, doi = {10.1155/2013/350934}, pmid = {24319361}, issn = {1537-744X}, mesh = {*Algorithms ; Energy Transfer ; Information Storage and Retrieval/*methods ; *Internet ; *Signal Processing, Computer-Assisted ; *Software ; *Workflow ; }, abstract = {We address the problem of scheduling workflow applications on heterogeneous computing systems like cloud computing infrastructures. In general, the cloud workflow scheduling is a complex optimization problem which requires considering different criteria so as to meet a large number of QoS (Quality of Service) requirements. Traditional research in workflow scheduling mainly focuses on the optimization constrained by time or cost without paying attention to energy consumption. The main contribution of this study is to propose a new approach for multi-objective workflow scheduling in clouds, and present the hybrid PSO algorithm to optimize the scheduling performance. Our method is based on the Dynamic Voltage and Frequency Scaling (DVFS) technique to minimize energy consumption. This technique allows processors to operate in different voltage supply levels by sacrificing clock frequencies. This multiple voltage involves a compromise between the quality of schedules and energy. Simulation results on synthetic and real-world scientific applications highlight the robust performance of the proposed approach.}, } @article {pmid24316562, year = {2013}, author = {Fong, EM and Chung, WY}, title = {Mobile cloud-computing-based healthcare service by noncontact ECG monitoring.}, journal = {Sensors (Basel, Switzerland)}, volume = {13}, number = {12}, pages = {16451-16473}, pmid = {24316562}, issn = {1424-8220}, mesh = {Cell Phone/*instrumentation ; Computer Systems ; Delivery of Health Care/*methods ; Electrocardiography/*instrumentation/*methods ; Humans ; Internet/*instrumentation ; Telemedicine/*instrumentation/*methods ; }, abstract = {Noncontact electrocardiogram (ECG) measurement technique has gained popularity these days owing to its noninvasive features and convenience in daily life use. This paper presents mobile cloud computing for a healthcare system where a noncontact ECG measurement method is employed to capture biomedical signals from users. Healthcare service is provided to continuously collect biomedical signals from multiple locations. To observe and analyze the ECG signals in real time, a mobile device is used as a mobile monitoring terminal. In addition, a personalized healthcare assistant is installed on the mobile device; several healthcare features such as health status summaries, medication QR code scanning, and reminders are integrated into the mobile application. Health data are being synchronized into the healthcare cloud computing service (Web server system and Web server dataset) to ensure a seamless healthcare monitoring system and anytime and anywhere coverage of network connection is available. Together with a Web page application, medical data are easily accessed by medical professionals or family members. Web page performance evaluation was conducted to ensure minimal Web server latency. The system demonstrates better availability of off-site and up-to-the-minute patient data, which can help detect health problems early and keep elderly patients out of the emergency room, thus providing a better and more comprehensive healthcare cloud computing service.}, } @article {pmid24303320, year = {2013}, author = {Ohno-Machado, L and Farcas, C and Kim, J and Wang, S and Jiang, X}, title = {Genomes in the cloud: balancing privacy rights and the public good.}, journal = {AMIA Joint Summits on Translational Science proceedings. AMIA Joint Summits on Translational Science}, volume = {2013}, number = {}, pages = {128}, pmid = {24303320}, issn = {2153-4063}, support = {R00 LM011392/LM/NLM NIH HHS/United States ; }, abstract = {The NIH-funded iDASH1 National Center for Biomedical Computing was created in 2010 with the goal of developing infrastructure, algorithms, and tools to integrate Data for Analysis, 'anonymization,' and SHaring. iDASH is based on the premise that, while a strong case for not sharing information to preserve individual privacy can be made, an equally compelling case for sharing genome information for the public good (i.e., to support new discoveries that promote health or alleviate the burden of disease) should also be made. In fact, these cases do not need to be mutually exclusive: genome data sharing on a cloud does not necessarily have to compromise individual privacy, although current practices need significant improvement. So far, protection of subject data from re-identification and misuse has been relying primarily on regulations such as HIPAA, the Common Rule, and GINA. However, protection of biometrics such as a genome requires specialized infrastructure and tools.}, } @article {pmid24298441, year = {2013}, author = {Rajkumar, R and Sriman Narayana Iyengar, NC}, title = {Dynamic Integration of Mobile JXTA with Cloud Computing for Emergency Rural Public Health Care.}, journal = {Osong public health and research perspectives}, volume = {4}, number = {5}, pages = {255-264}, pmid = {24298441}, issn = {2210-9099}, abstract = {OBJECTIVES: The existing processes of health care systems where data collection requires a great deal of labor with high-end tasks to retrieve and analyze information, are usually slow, tedious, and error prone, which restrains their clinical diagnostic and monitoring capabilities. Research is now focused on integrating cloud services with P2P JXTA to identify systematic dynamic process for emergency health care systems. The proposal is based on the concepts of a community cloud for preventative medicine, to help promote a healthy rural community. We investigate the approaches of patient health monitoring, emergency care, and an ambulance alert alarm (AAA) under mobile cloud-based telecare or community cloud controller systems.

METHODS: Considering permanent mobile users, an efficient health promotion method is proposed. Experiments were conducted to verify the effectiveness of the method. The performance was evaluated from September 2011 to July 2012. A total of 1,856,454 cases were transported and referred to hospital, identified with health problems, and were monitored. We selected all the peer groups and the control server N0 which controls N1, N2, and N3 proxied peer groups. The hospital cloud controller maintains the database of the patients through a JXTA network.

RESULTS: Among 1,856,454 transported cases with beneficiaries of 1,712,877 cases there were 1,662,834 lives saved and 8,500 cases transported per day with 104,530 transported cases found to be registered in a JXTA network.

CONCLUSION: The registered case histories were referred from the Hospital community cloud (HCC). SMS messages were sent from node N0 to the relay peers which connected to the N1, N2, and N3 nodes, controlled by the cloud controller through a JXTA network.}, } @article {pmid24296075, year = {2014}, author = {Lin, CW and Abdul, SS and Clinciu, DL and Scholl, J and Jin, X and Lu, H and Chen, SS and Iqbal, U and Heineck, MJ and Li, YC}, title = {Empowering village doctors and enhancing rural healthcare using cloud computing in a rural area of mainland China.}, journal = {Computer methods and programs in biomedicine}, volume = {113}, number = {2}, pages = {585-592}, doi = {10.1016/j.cmpb.2013.10.005}, pmid = {24296075}, issn = {1872-7565}, mesh = {China ; Computer Security ; *Electronic Health Records ; Humans ; *Medical Informatics ; Physicians/*psychology ; *Power, Psychological ; *Quality of Health Care ; *Rural Health Services/standards ; User-Computer Interface ; Workforce ; }, abstract = {BACKGROUND: China's healthcare system often struggles to meet the needs of its 900 million people living in rural areas due to major challenges in preventive medicine and management of chronic diseases. Here we address some of these challenges by equipping village doctors (ViDs) with Health Information Technology and developing an electronic health record (EHR) system which collects individual patient information electronically to aid with implementation of chronic disease management programs.

METHODS: An EHR system based on a cloud-computing architecture was developed and deployed in Xilingol county of Inner Mongolia using various computing resources (hardware and software) to deliver services over the health network using Internet when available. The system supports the work at all levels of the healthcare system, including the work of ViDs in rural areas. An analysis done on 291,087 EHRs created from November 2008 to June 2011 evaluated the impact the EHR system has on preventive medicine and chronic disease management programs in rural China.

RESULTS: From 2008 to 2011 health records were created for 291,087 (26.25%) from 1,108,951 total Xilingol residents with 10,240 cases of hypertension and 1152 cases of diabetes diagnosed and registered. Furthermore, 2945 hypertensive and 305 diabetic patients enrolled in follow-up. Implementing the EHR system revealed a high rate of cholecystectomies leading to investigations and findings of drinking water contaminated with metals. Measures were taken to inform the population and clean drinking water was supplied.

CONCLUSIONS: The cloud-based EHR approach improved the care provision for ViDs in rural China and increased the efficiency of the healthcare system to monitor the health status of the population and to manage preventive care efforts. It also helped discover contaminated water in one of the project areas revealing further benefits if the system is expanded and improved.}, } @article {pmid24288665, year = {2013}, author = {Lin, YC and Yu, CS and Lin, YJ}, title = {Enabling large-scale biomedical analysis in the cloud.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {185679}, pmid = {24288665}, issn = {2314-6141}, mesh = {*Biomedical Research ; *Computational Biology ; Genomics ; Software ; Translational Research, Biomedical ; }, abstract = {Recent progress in high-throughput instrumentations has led to an astonishing growth in both volume and complexity of biomedical data collected from various sources. The planet-size data brings serious challenges to the storage and computing technologies. Cloud computing is an alternative to crack the nut because it gives concurrent consideration to enable storage and high-performance computing on large-scale data. This work briefly introduces the data intensive computing system and summarizes existing cloud-based resources in bioinformatics. These developments and applications would facilitate biomedical research to make the vast amount of diversification data meaningful and usable.}, } @article {pmid24285552, year = {2014}, author = {Verheggen, K and Barsnes, H and Martens, L}, title = {Distributed computing and data storage in proteomics: many hands make light work, and a stronger memory.}, journal = {Proteomics}, volume = {14}, number = {4-5}, pages = {367-377}, doi = {10.1002/pmic.201300288}, pmid = {24285552}, issn = {1615-9861}, mesh = {Computational Biology/*methods ; Computers ; *Information Storage and Retrieval ; Internet ; Proteomics/*methods ; Software ; }, abstract = {Modern day proteomics generates ever more complex data, causing the requirements on the storage and processing of such data to outgrow the capacity of most desktop computers. To cope with the increased computational demands, distributed architectures have gained substantial popularity in the recent years. In this review, we provide an overview of the current techniques for distributed computing, along with examples of how the techniques are currently being employed in the field of proteomics. We thus underline the benefits of distributed computing in proteomics, while also pointing out the potential issues and pitfalls involved.}, } @article {pmid24282750, year = {2013}, author = {Schoenhagen, P and Zimmermann, M and Falkner, J}, title = {Advanced 3-D analysis, client-server systems, and cloud computing-Integration of cardiovascular imaging data into clinical workflows of transcatheter aortic valve replacement.}, journal = {Cardiovascular diagnosis and therapy}, volume = {3}, number = {2}, pages = {80-92}, pmid = {24282750}, issn = {2223-3652}, abstract = {Degenerative aortic stenosis is highly prevalent in the aging populations of industrialized countries and is associated with poor prognosis. Surgical valve replacement has been the only established treatment with documented improvement of long-term outcome. However, many of the older patients with aortic stenosis (AS) are high-risk or ineligible for surgery. For these patients, transcatheter aortic valve replacement (TAVR) has emerged as a treatment alternative. The TAVR procedure is characterized by a lack of visualization of the operative field. Therefore, pre- and intra-procedural imaging is critical for patient selection, pre-procedural planning, and intra-operative decision-making. Incremental to conventional angiography and 2-D echocardiography, multidetector computed tomography (CT) has assumed an important role before TAVR. The analysis of 3-D CT data requires extensive post-processing during direct interaction with the dataset, using advance analysis software. Organization and storage of the data according to complex clinical workflows and sharing of image information have become a critical part of these novel treatment approaches. Optimally, the data are integrated into a comprehensive image data file accessible to multiple groups of practitioners across the hospital. This creates new challenges for data management requiring a complex IT infrastructure, spanning across multiple locations, but is increasingly achieved with client-server solutions and private cloud technology. This article describes the challenges and opportunities created by the increased amount of patient-specific imaging data in the context of TAVR.}, } @article {pmid24278007, year = {2013}, author = {Hase, T and Ghosh, S and Yamanaka, R and Kitano, H}, title = {Harnessing diversity towards the reconstructing of large scale gene regulatory networks.}, journal = {PLoS computational biology}, volume = {9}, number = {11}, pages = {e1003361}, pmid = {24278007}, issn = {1553-7358}, mesh = {Algorithms ; Computational Biology/*methods ; Databases, Genetic ; Gene Expression/*genetics ; Gene Expression Profiling ; Gene Regulatory Networks/*genetics ; }, abstract = {Elucidating gene regulatory network (GRN) from large scale experimental data remains a central challenge in systems biology. Recently, numerous techniques, particularly consensus driven approaches combining different algorithms, have become a potentially promising strategy to infer accurate GRNs. Here, we develop a novel consensus inference algorithm, TopkNet that can integrate multiple algorithms to infer GRNs. Comprehensive performance benchmarking on a cloud computing framework demonstrated that (i) a simple strategy to combine many algorithms does not always lead to performance improvement compared to the cost of consensus and (ii) TopkNet integrating only high-performance algorithms provide significant performance improvement compared to the best individual algorithms and community prediction. These results suggest that a priori determination of high-performance algorithms is a key to reconstruct an unknown regulatory network. Similarity among gene-expression datasets can be useful to determine potential optimal algorithms for reconstruction of unknown regulatory networks, i.e., if expression-data associated with known regulatory network is similar to that with unknown regulatory network, optimal algorithms determined for the known regulatory network can be repurposed to infer the unknown regulatory network. Based on this observation, we developed a quantitative measure of similarity among gene-expression datasets and demonstrated that, if similarity between the two expression datasets is high, TopkNet integrating algorithms that are optimal for known dataset perform well on the unknown dataset. The consensus framework, TopkNet, together with the similarity measure proposed in this study provides a powerful strategy towards harnessing the wisdom of the crowds in reconstruction of unknown regulatory networks.}, } @article {pmid24261387, year = {2013}, author = {Gerard, P and Kapadia, N and Chang, PT and Acharya, J and Seiler, M and Lefkovitz, Z}, title = {Extended outlook: description, utilization, and daily applications of cloud technology in radiology.}, journal = {AJR. American journal of roentgenology}, volume = {201}, number = {6}, pages = {W809-11}, doi = {10.2214/AJR.12.9673}, pmid = {24261387}, issn = {1546-3141}, mesh = {*Access to Information ; *Computer Security ; Health Insurance Portability and Accountability Act ; Humans ; *Information Storage and Retrieval ; *Internet ; *Medical Records Systems, Computerized ; Privacy ; *Radiology ; Radiology Information Systems ; Software ; United States ; }, abstract = {OBJECTIVE: The purpose of this article is to discuss the concept of cloud technology, its role in medical applications and radiology, the role of the radiologist in using and accessing these vast resources of information, and privacy concerns and HIPAA compliance strategies.

CONCLUSION: Cloud computing is the delivery of shared resources, software, and information to computers and other devices as a metered service. This technology has a promising role in the sharing of patient medical information and appears to be particularly suited for application in radiology, given the field's inherent need for storage and access to large amounts of data. The radiology cloud has significant strengths, such as providing centralized storage and access, reducing unnecessary repeat radiologic studies, and potentially allowing radiologic second opinions more easily. There are significant cost advantages to cloud computing because of a decreased need for infrastructure and equipment by the institution. Private clouds may be used to ensure secure storage of data and compliance with HIPAA. In choosing a cloud service, there are important aspects, such as disaster recovery plans, uptime, and security audits, that must be considered. Given that the field of radiology has become almost exclusively digital in recent years, the future of secure storage and easy access to imaging studies lies within cloud computing technology.}, } @article {pmid24232290, year = {2013}, author = {Hsieh, JC and Li, AH and Yang, CC}, title = {Mobile, cloud, and big data computing: contributions, challenges, and new directions in telecardiology.}, journal = {International journal of environmental research and public health}, volume = {10}, number = {11}, pages = {6131-6153}, pmid = {24232290}, issn = {1660-4601}, mesh = {*Cardiology ; Echocardiography ; Electrocardiography ; Humans ; Image Interpretation, Computer-Assisted ; Information Storage and Retrieval/*methods ; *Remote Consultation ; }, abstract = {Many studies have indicated that computing technology can enable off-site cardiologists to read patients' electrocardiograph (ECG), echocardiography (ECHO), and relevant images via smart phones during pre-hospital, in-hospital, and post-hospital teleconsultation, which not only identifies emergency cases in need of immediate treatment, but also prevents the unnecessary re-hospitalizations. Meanwhile, several studies have combined cloud computing and mobile computing to facilitate better storage, delivery, retrieval, and management of medical files for telecardiology. In the future, the aggregated ECG and images from hospitals worldwide will become big data, which should be used to develop an e-consultation program helping on-site practitioners deliver appropriate treatment. With information technology, real-time tele-consultation and tele-diagnosis of ECG and images can be practiced via an e-platform for clinical, research, and educational purposes. While being devoted to promote the application of information technology onto telecardiology, we need to resolve several issues: (1) data confidentiality in the cloud, (2) data interoperability among hospitals, and (3) network latency and accessibility. If these challenges are overcome, tele-consultation will be ubiquitous, easy to perform, inexpensive, and beneficial. Most importantly, these services will increase global collaboration and advance clinical practice, education, and scientific research in cardiology.}, } @article {pmid24225647, year = {2013}, author = {Su, CJ and Chiang, CY}, title = {IAServ: an intelligent home care web services platform in a cloud for aging-in-place.}, journal = {International journal of environmental research and public health}, volume = {10}, number = {11}, pages = {6106-6130}, pmid = {24225647}, issn = {1660-4601}, mesh = {Aged ; Delivery of Health Care/*methods ; Health Services Needs and Demand ; *Home Care Services ; Humans ; Information Storage and Retrieval/*methods ; *Internet ; }, abstract = {As the elderly population has been rapidly expanding and the core tax-paying population has been shrinking, the need for adequate elderly health and housing services continues to grow while the resources to provide such services are becoming increasingly scarce. Thus, increasing the efficiency of the delivery of healthcare services through the use of modern technology is a pressing issue. The seamless integration of such enabling technologies as ontology, intelligent agents, web services, and cloud computing is transforming healthcare from hospital-based treatments to home-based self-care and preventive care. A ubiquitous healthcare platform based on this technological integration, which synergizes service providers with patients' needs to be developed to provide personalized healthcare services at the right time, in the right place, and the right manner. This paper presents the development and overall architecture of IAServ (the Intelligent Aging-in-place Home care Web Services Platform) to provide personalized healthcare service ubiquitously in a cloud computing setting to support the most desirable and cost-efficient method of care for the aged-aging in place. The IAServ is expected to offer intelligent, pervasive, accurate and contextually-aware personal care services. Architecturally the implemented IAServ leverages web services and cloud computing to provide economic, scalable, and robust healthcare services over the Internet.}, } @article {pmid24222895, year = {2013}, author = {Huang, CC and Lin, CY and Chang, CW and Tang, CY}, title = {Enzyme reaction annotation using cloud techniques.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {140237}, pmid = {24222895}, issn = {2314-6141}, mesh = {*Computational Biology ; Enzymes/*chemistry/metabolism ; Kinetics ; Molecular Sequence Annotation ; *Software ; }, abstract = {An understanding of the activities of enzymes could help to elucidate the metabolic pathways of thousands of chemical reactions that are catalyzed by enzymes in living systems. Sophisticated applications such as drug design and metabolic reconstruction could be developed using accurate enzyme reaction annotation. Because accurate enzyme reaction annotation methods create potential for enhanced production capacity in these applications, they have received greater attention in the global market. We propose the enzyme reaction prediction (ERP) method as a novel tool to deduce enzyme reactions from domain architecture. We used several frequency relationships between architectures and reactions to enhance the annotation rates for single and multiple catalyzed reactions. The deluge of information which arose from high-throughput techniques in the postgenomic era has improved our understanding of biological data, although it presents obstacles in the data-processing stage. The high computational capacity provided by cloud computing has resulted in an exponential growth in the volume of incoming data. Cloud services also relieve the requirement for large-scale memory space required by this approach to analyze enzyme kinetic data. Our tool is designed as a single execution file; thus, it could be applied to any cloud platform in which multiple queries are supported.}, } @article {pmid24191340, year = {2013}, author = {Puustjärvi, J and Puustjärvi, L}, title = {Practising cloud-based telemedicine in developing countries.}, journal = {International journal of electronic healthcare}, volume = {7}, number = {3}, pages = {181-204}, doi = {10.1504/IJEH.2013.057407}, pmid = {24191340}, issn = {1741-8453}, mesh = {Delivery of Health Care ; *Developing Countries ; Humans ; Internet ; Physicians ; *Telemedicine ; }, abstract = {In industrialised countries, telemedicine has proven to be a valuable tool for enabling access to knowledge and allowing information exchange, and showing that it is possible to provide good quality of healthcare to isolated communities. However, there are many barriers to the widespread implementation of telemedicine in rural areas of developing countries. These include deficient internet connectivity and sophisticated peripheral medical devices. Furthermore, developing countries have very high patients-per-doctor ratios. In this paper, we report our work on developing a cloud-based health information system, which promotes telemedicine and patient-centred healthcare by exploiting modern information and communication technologies such as OWL-ontologies and SQL-triggers. The reason for using cloud technology is twofold. First, cloud service models are easily adaptable for sharing patients health information, which is of prime importance in patient-centred healthcare as well as in telemedicine. Second, the cloud and the consulting physicians may locate anywhere in the internet.}, } @article {pmid24187234, year = {2013}, author = {Dogmus, Z and Papantoniou, A and Kilinc, M and Yildirim, SA and Erdem, E and Patoglu, V}, title = {Rehabilitation robotics ontology on the cloud.}, journal = {IEEE ... International Conference on Rehabilitation Robotics : [proceedings]}, volume = {2013}, number = {}, pages = {6650415}, doi = {10.1109/ICORR.2013.6650415}, pmid = {24187234}, issn = {1945-7901}, mesh = {Internet ; *Rehabilitation ; *Robotics ; }, abstract = {We introduce the first formal rehabilitation robotics ontology, called RehabRobo-Onto, to represent information about rehabilitation robots and their properties; and a software system RehabRobo-Query to facilitate access to this ontology. RehabRobo-Query is made available on the cloud, utilizing Amazon Web services, so that 1) rehabilitation robot designers around the world can add/modify information about their robots in RehabRobo-Onto, and 2) rehabilitation robot designers and physical medicine experts around the world can access the knowledge in RehabRobo-Onto by means of questions about robots, in natural language, with the guide of the intelligent userinterface of RehabRobo-Query. The ontology system consisting of RehabRobo-Onto and RehabRobo-Query is of great value to robot designers as well as physical therapists and medical doctors. On the one hand, robot designers can access various properties of the existing robots and to the related publications to further improve the state-of-the-art. On the other hand, physical therapists and medical doctors can utilize the ontology to compare rehabilitation robots and to identify the ones that serve best to cover their needs, or to evaluate the effects of various devices for targeted joint exercises on patients with specific disorders.}, } @article {pmid24180186, year = {2013}, author = {Waxer, N and Ninan, D and Ma, A and Dominguez, N}, title = {Continuous quality monitoring through analytics and cloud computing.}, journal = {Physician executive}, volume = {39}, number = {5}, pages = {36-39}, pmid = {24180186}, issn = {0898-2759}, mesh = {Humans ; Information Storage and Retrieval/*methods ; *Internet ; *Total Quality Management ; United States ; }, } @article {pmid24139021, year = {2014}, author = {Kaur, PD and Chana, I}, title = {Cloud based intelligent system for delivering health care as a service.}, journal = {Computer methods and programs in biomedicine}, volume = {113}, number = {1}, pages = {346-359}, doi = {10.1016/j.cmpb.2013.09.013}, pmid = {24139021}, issn = {1872-7565}, mesh = {Algorithms ; Delivery of Health Care/*organization & administration ; Health Status ; *Internet ; }, abstract = {The promising potential of cloud computing and its convergence with technologies such as mobile computing, wireless networks, sensor technologies allows for creation and delivery of newer type of cloud services. In this paper, we advocate the use of cloud computing for the creation and management of cloud based health care services. As a representative case study, we design a Cloud Based Intelligent Health Care Service (CBIHCS) that performs real time monitoring of user health data for diagnosis of chronic illness such as diabetes. Advance body sensor components are utilized to gather user specific health data and store in cloud based storage repositories for subsequent analysis and classification. In addition, infrastructure level mechanisms are proposed to provide dynamic resource elasticity for CBIHCS. Experimental results demonstrate that classification accuracy of 92.59% is achieved with our prototype system and the predicted patterns of CPU usage offer better opportunities for adaptive resource elasticity.}, } @article {pmid24131049, year = {2013}, author = {Zhou, S and Liao, R and Guan, J}, title = {When cloud computing meets bioinformatics: a review.}, journal = {Journal of bioinformatics and computational biology}, volume = {11}, number = {5}, pages = {1330002}, doi = {10.1142/S0219720013300025}, pmid = {24131049}, issn = {1757-6334}, mesh = {Computational Biology/*trends ; Computer Communication Networks/trends ; Computing Methodologies ; Humans ; Information Storage and Retrieval/*trends ; Software ; User-Computer Interface ; }, abstract = {In the past decades, with the rapid development of high-throughput technologies, biology research has generated an unprecedented amount of data. In order to store and process such a great amount of data, cloud computing and MapReduce were applied to many fields of bioinformatics. In this paper, we first introduce the basic concepts of cloud computing and MapReduce, and their applications in bioinformatics. We then highlight some problems challenging the applications of cloud computing and MapReduce to bioinformatics. Finally, we give a brief guideline for using cloud computing in biology research.}, } @article {pmid24111420, year = {2013}, author = {Ahnn, JH and Potkonjak, M}, title = {Toward energy-efficient and distributed mobile health monitoring using parallel offloading.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2013}, number = {}, pages = {7257-7261}, doi = {10.1109/EMBC.2013.6611233}, pmid = {24111420}, issn = {2694-0604}, mesh = {Algorithms ; Cell Phone ; Computer Communication Networks ; Computer Simulation ; *Computer Systems ; Conservation of Energy Resources ; Equipment Design ; Humans ; Stochastic Processes ; Telemedicine/*instrumentation/methods ; Wireless Technology ; }, abstract = {Although mobile health monitoring where mobile sensors continuously gather, process, and update sensor readings (e.g. vital signals) from patient's sensors is emerging, little effort has been investigated in an energy-efficient management of sensor information gathering and processing. Mobile health monitoring with the focus of energy consumption may instead be holistically analyzed and systematically designed as a global solution to optimization subproblems. We propose a distributed and energy-saving mobile health platform, called mHealthMon where mobile users publish/access sensor data via a cloud computing-based distributed P2P overlay network. The key objective is to satisfy the mobile health monitoring application's quality of service requirements by modeling each subsystem: mobile clients with medical sensors, wireless network medium, and distributed cloud services. By simulations based on experimental data, we present the proposed system can achieve up to 10.1 times more energy-efficient and 20.2 times faster compared to a standalone mobile health monitoring application, in various mobile health monitoring scenarios applying a realistic mobility model.}, } @article {pmid24110652, year = {2013}, author = {Bressan, N and James, A and McGregor, C}, title = {Integration of drug dosing data with physiological data streams using a cloud computing paradigm.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2013}, number = {}, pages = {4175-4178}, doi = {10.1109/EMBC.2013.6610465}, pmid = {24110652}, issn = {2694-0604}, support = {//Canadian Institutes of Health Research/Canada ; }, mesh = {*Database Management Systems ; *Drug Monitoring ; *Electronic Health Records ; Humans ; Intensive Care, Neonatal/*methods ; *Internet ; *Monitoring, Physiologic ; Pharmaceutical Preparations/administration & dosage ; Pharmacokinetics ; Systems Integration ; }, abstract = {Many drugs are used during the provision of intensive care for the preterm newborn infant. Recommendations for drug dosing in newborns depend upon data from population based pharmacokinetic research. There is a need to be able to modify drug dosing in response to the preterm infant's response to the standard dosing recommendations. The real-time integration of physiological data with drug dosing data would facilitate individualised drug dosing for these immature infants. This paper proposes the use of a novel computational framework that employs real-time, temporal data analysis for this task. Deployment of the framework within the cloud computing paradigm will enable widespread distribution of individualized drug dosing for newborn infants.}, } @article {pmid24110238, year = {2013}, author = {Stokes, TH and Venugopalan, J and Hubbard, EN and Wang, MD}, title = {A pilot biomedical engineering course in rapid prototyping for mobile health.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2013}, number = {}, pages = {2515-2518}, pmid = {24110238}, issn = {2694-0604}, support = {R01 CA108468/CA/NCI NIH HHS/United States ; U54CA119338/CA/NCI NIH HHS/United States ; P20 GM072069/GM/NIGMS NIH HHS/United States ; U54 CA119338/CA/NCI NIH HHS/United States ; P20GM072069/GM/NIGMS NIH HHS/United States ; R01CA108468/CA/NCI NIH HHS/United States ; }, mesh = {Asthma/pathology/prevention & control ; Biomedical Engineering/education ; Child ; Computer-Aided Design ; Curriculum ; Humans ; Pain Management/instrumentation/*methods ; Pilot Projects ; Radio Frequency Identification Device ; *Telemedicine ; }, abstract = {Rapid prototyping of medically assistive mobile devices promises to fuel innovation and provides opportunity for hands-on engineering training in biomedical engineering curricula. This paper presents the design and outcomes of a course offered during a 16-week semester in Fall 2011 with 11 students enrolled. The syllabus covered a mobile health design process from end-to-end, including storyboarding, non-functional prototypes, integrated circuit programming, 3D modeling, 3D printing, cloud computing database programming, and developing patient engagement through animated videos describing the benefits of a new device. Most technologies presented in this class are open source and thus provide unlimited "hackability". They are also cost-effective and easily transferrable to other departments.}, } @article {pmid24110214, year = {2013}, author = {Fu, J and Hao, W and White, T and Yan, Y and Jones, M and Jan, YK}, title = {Capturing and analyzing wheelchair maneuvering patterns with mobile cloud computing.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2013}, number = {}, pages = {2419-2422}, doi = {10.1109/EMBC.2013.6610027}, pmid = {24110214}, issn = {2694-0604}, support = {8P20GM103447/GM/NIGMS NIH HHS/United States ; }, mesh = {Cell Phone ; Humans ; Information Storage and Retrieval ; Internet ; Motion ; Signal Processing, Computer-Assisted ; *Wheelchairs ; }, abstract = {Power wheelchairs have been widely used to provide independent mobility to people with disabilities. Despite great advancements in power wheelchair technology, research shows that wheelchair related accidents occur frequently. To ensure safe maneuverability, capturing wheelchair maneuvering patterns is fundamental to enable other research, such as safe robotic assistance for wheelchair users. In this study, we propose to record, store, and analyze wheelchair maneuvering data by means of mobile cloud computing. Specifically, the accelerometer and gyroscope sensors in smart phones are used to record wheelchair maneuvering data in real-time. Then, the recorded data are periodically transmitted to the cloud for storage and analysis. The analyzed results are then made available to various types of users, such as mobile phone users, traditional desktop users, etc. The combination of mobile computing and cloud computing leverages the advantages of both techniques and extends the smart phone's capabilities of computing and data storage via the Internet. We performed a case study to implement the mobile cloud computing framework using Android smart phones and Google App Engine, a popular cloud computing platform. Experimental results demonstrated the feasibility of the proposed mobile cloud computing framework.}, } @article {pmid24110179, year = {2013}, author = {Cheng, C and Brown, RC and Cohen, LL and Venugopalan, J and Stokes, TH and Wang, MD}, title = {iACT--an interactive mHealth monitoring system to enhance psychotherapy for adolescents with sickle cell disease.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2013}, number = {}, pages = {2279-2282}, pmid = {24110179}, issn = {2694-0604}, support = {RC2 CA148265/CA/NCI NIH HHS/United States ; U54 CA119338/CA/NCI NIH HHS/United States ; 1RC2CA148265/CA/NCI NIH HHS/United States ; U54CA119338/CA/NCI NIH HHS/United States ; }, mesh = {*Acceptance and Commitment Therapy ; Adolescent ; Anemia, Sickle Cell/*psychology/*therapy ; *Cell Phone ; Child ; Humans ; *Psychotherapy ; *Telemedicine ; }, abstract = {Sickle cell disease (SCD) is the most common inherited disease, and SCD symptoms impact functioning and well-being. For example, adolescents with SCD have a higher tendency of psychological problems than the general population. Acceptance and Commitment Therapy (ACT), a cognitive-behavioral therapy, is an effective intervention to promote quality of life and functioning in adolescents with chronic illness. However, traditional visit-based therapy sessions are restrained by challenges, such as limited follow-up, insufficient data collection, low treatment adherence, and delayed intervention. In this paper, we present Instant Acceptance and Commitment Therapy (iACT), a system designed to enhance the quality of pediatric ACT. iACT utilizes text messaging technology, which is the most popular cell phone activity among adolescents, to conduct real-time psychotherapy interventions. The system is built on cloud computing technologies, which provides a convenient and cost-effective monitoring environment. To evaluate iACT, a trial with 60 adolescents with SCD is being conducted in conjunction with the Georgia Institute of Technology, Children's Healthcare of Atlanta, and Georgia State University.}, } @article {pmid24110019, year = {2013}, author = {Shen, CP and Zhou, W and Lin, FS and Sung, HY and Lam, YY and Chen, W and Lin, JW and Pan, MK and Chiu, MJ and Lai, F}, title = {Epilepsy analytic system with cloud computing.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2013}, number = {}, pages = {1644-1647}, doi = {10.1109/EMBC.2013.6609832}, pmid = {24110019}, issn = {2694-0604}, mesh = {Algorithms ; *Electroencephalography ; Epilepsy/*diagnosis ; Humans ; Internet ; Signal Processing, Computer-Assisted ; Support Vector Machine ; User-Computer Interface ; Wavelet Analysis ; }, abstract = {Biomedical data analytic system has played an important role in doing the clinical diagnosis for several decades. Today, it is an emerging research area of analyzing these big data to make decision support for physicians. This paper presents a parallelized web-based tool with cloud computing service architecture to analyze the epilepsy. There are many modern analytic functions which are wavelet transform, genetic algorithm (GA), and support vector machine (SVM) cascaded in the system. To demonstrate the effectiveness of the system, it has been verified by two kinds of electroencephalography (EEG) data, which are short term EEG and long term EEG. The results reveal that our approach achieves the total classification accuracy higher than 90%. In addition, the entire training time accelerate about 4.66 times and prediction time is also meet requirements in real time.}, } @article {pmid24109986, year = {2013}, author = {Lin, FS and Shen, CP and Sung, HY and Lam, YY and Lin, JW and Lai, F}, title = {A high performance cloud computing platform for mRNA analysis.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2013}, number = {}, pages = {1510-1513}, doi = {10.1109/EMBC.2013.6609799}, pmid = {24109986}, issn = {2694-0604}, mesh = {Algorithms ; Computational Biology/*methods ; Humans ; Models, Theoretical ; RNA, Messenger/*analysis/genetics ; Time Factors ; }, abstract = {Multiclass classification is an important technique to many complex bioinformatics problems. However, their performance is limited by the computation power. Based on the Apache Hadoop design framework, this study proposes a two layer architecture that exploits the inherent parallelism of GA-SVM classification to speed up the work. The performance evaluations on an mRNA benchmark cancer dataset have reduced 86.55% features and raised accuracy from 97.53% to 98.03%. With a user-friendly web interface, the system provides researchers an easy way to investigate the unrevealed secrets in the fast-growing repository of bioinformatics data.}, } @article {pmid24109549, year = {2013}, author = {Ferenchick, GS and Solomon, D}, title = {Using cloud-based mobile technology for assessment of competencies among medical students.}, journal = {PeerJ}, volume = {1}, number = {}, pages = {e164}, pmid = {24109549}, issn = {2167-8359}, abstract = {Valid, direct observation of medical student competency in clinical settings remains challenging and limits the opportunity to promote performance-based student advancement. The rationale for direct observation is to ascertain that students have acquired the core clinical competencies needed to care for patients. Too often student observation results in highly variable evaluations which are skewed by factors other than the student's actual performance. Among the barriers to effective direct observation and assessment include the lack of effective tools and strategies for assuring that transparent standards are used for judging clinical competency in authentic clinical settings. We developed a web-based content management system under the name, Just in Time Medicine (JIT), to address many of these issues. The goals of JIT were fourfold: First, to create a self-service interface allowing faculty with average computing skills to author customizable content and criterion-based assessment tools displayable on internet enabled devices, including mobile devices; second, to create an assessment and feedback tool capable of capturing learner progress related to hundreds of clinical skills; third, to enable easy access and utilization of these tools by faculty for learner assessment in authentic clinical settings as a means of just in time faculty development; fourth, to create a permanent record of the trainees' observed skills useful for both learner and program evaluation. From July 2010 through October 2012, we implemented a JIT enabled clinical evaluation exercise (CEX) among 367 third year internal medicine students. Observers (attending physicians and residents) performed CEX assessments using JIT to guide and document their observations, record their time observing and providing feedback to the students, and their overall satisfaction. Inter-rater reliability and validity were assessed with 17 observers who viewed six videotaped student-patient encounters and by measuring the correlation between student CEX scores and their scores on subsequent standardized-patient OSCE exams. A total of 3567 CEXs were completed by 516 observers. The average number of evaluations per student was 9.7 (±1.8 SD) and the average number of CEXs completed per observer was 6.9 (±15.8 SD). Observers spent less than 10 min on 43-50% of the CEXs and 68.6% on feedback sessions. A majority of observers (92%) reported satisfaction with the CEX. Inter-rater reliability was measured at 0.69 among all observers viewing the videotapes and these ratings adequately discriminated competent from non-competent performance. The measured CEX grades correlated with subsequent student performance on an end-of-year OSCE. We conclude that the use of JIT is feasible in capturing discrete clinical performance data with a high degree of user satisfaction. Our embedded checklists had adequate inter-rater reliability and concurrent and predictive validity.}, } @article {pmid24106693, year = {2013}, author = {D'Agostino, D and Clematis, A and Quarati, A and Cesini, D and Chiappori, F and Milanesi, L and Merelli, I}, title = {Cloud infrastructures for in silico drug discovery: economic and practical aspects.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {138012}, pmid = {24106693}, issn = {2314-6141}, mesh = {Biotechnology ; *Computer Simulation ; *Computing Methodologies ; *Drug Discovery ; Humans ; }, abstract = {Cloud computing opens new perspectives for small-medium biotechnology laboratories that need to perform bioinformatics analysis in a flexible and effective way. This seems particularly true for hybrid clouds that couple the scalability offered by general-purpose public clouds with the greater control and ad hoc customizations supplied by the private ones. A hybrid cloud broker, acting as an intermediary between users and public providers, can support customers in the selection of the most suitable offers, optionally adding the provisioning of dedicated services with higher levels of quality. This paper analyses some economic and practical aspects of exploiting cloud computing in a real research scenario for the in silico drug discovery in terms of requirements, costs, and computational load based on the number of expected users. In particular, our work is aimed at supporting both the researchers and the cloud broker delivering an IaaS cloud infrastructure for biotechnology laboratories exposing different levels of nonfunctional requirements.}, } @article {pmid24086314, year = {2013}, author = {Dashti, A and Komarov, I and D'Souza, RM}, title = {Efficient computation of k-Nearest Neighbour Graphs for large high-dimensional data sets on GPU clusters.}, journal = {PloS one}, volume = {8}, number = {9}, pages = {e74113}, pmid = {24086314}, issn = {1932-6203}, mesh = {Cluster Analysis ; *Computer Graphics ; }, abstract = {This paper presents an implementation of the brute-force exact k-Nearest Neighbor Graph (k-NNG) construction for ultra-large high-dimensional data cloud. The proposed method uses Graphics Processing Units (GPUs) and is scalable with multi-levels of parallelism (between nodes of a cluster, between different GPUs on a single node, and within a GPU). The method is applicable to homogeneous computing clusters with a varying number of nodes and GPUs per node. We achieve a 6-fold speedup in data processing as compared with an optimized method running on a cluster of CPUs and bring a hitherto impossible [Formula: see text]-NNG generation for a dataset of twenty million images with 15 k dimensionality into the realm of practical possibility.}, } @article {pmid24078906, year = {2013}, author = {Zhang, W and Wang, X and Lu, B and Kim, TH}, title = {Secure encapsulation and publication of biological services in the cloud computing environment.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {170580}, pmid = {24078906}, issn = {2314-6141}, mesh = {*Computational Biology ; Computer Communication Networks ; *Computer Security ; *Internet ; Search Engine ; *Software ; }, abstract = {Secure encapsulation and publication for bioinformatics software products based on web service are presented, and the basic function of biological information is realized in the cloud computing environment. In the encapsulation phase, the workflow and function of bioinformatics software are conducted, the encapsulation interfaces are designed, and the runtime interaction between users and computers is simulated. In the publication phase, the execution and management mechanisms and principles of the GRAM components are analyzed. The functions such as remote user job submission and job status query are implemented by using the GRAM components. The services of bioinformatics software are published to remote users. Finally the basic prototype system of the biological cloud is achieved.}, } @article {pmid24037463, year = {2014}, author = {Huang, HK and Deshpande, R and Documet, J and Le, AH and Lee, J and Ma, K and Liu, BJ}, title = {Medical imaging informatics simulators: a tutorial.}, journal = {International journal of computer assisted radiology and surgery}, volume = {9}, number = {3}, pages = {433-447}, pmid = {24037463}, issn = {1861-6429}, mesh = {Diagnosis, Computer-Assisted/*instrumentation ; Diagnostic Imaging/*methods ; Humans ; *Models, Educational ; Radiology/*education ; *Radiology Information Systems ; }, abstract = {PURPOSE: A medical imaging informatics infrastructure (MIII) platform is an organized method of selecting tools and synthesizing data from HIS/RIS/PACS/ePR systems with the aim of developing an imaging-based diagnosis or treatment system. Evaluation and analysis of these systems can be made more efficient by designing and implementing imaging informatics simulators. This tutorial introduces the MIII platform and provides the definition of treatment/diagnosis systems, while primarily focusing on the development of the related simulators.

METHODS: A medical imaging informatics (MII) simulator in this context is defined as a system integration of many selected imaging and data components from the MIII platform and clinical treatment protocols, which can be used to simulate patient workflow and data flow starting from diagnostic procedures to the completion of treatment. In these processes, DICOM and HL-7 standards, IHE workflow profiles, and Web-based tools are emphasized. From the information collected in the database of a specific simulator, evidence-based medicine can be hypothesized to choose and integrate optimal clinical decision support components. Other relevant, selected clinical resources in addition to data and tools from the HIS/RIS/PACS and ePRs platform may also be tailored to develop the simulator. These resources can include image content indexing, 3D rendering with visualization, data grid and cloud computing, computer-aided diagnosis (CAD) methods, specialized image-assisted surgical, and radiation therapy technologies.

RESULTS: Five simulators will be discussed in this tutorial. The PACS-ePR simulator with image distribution is the cradle of the other simulators. It supplies the necessary PACS-based ingredients and data security for the development of four other simulators: the data grid simulator for molecular imaging, CAD-PACS, radiation therapy simulator, and image-assisted surgery simulator. The purpose and benefits of each simulator with respect to its clinical relevance are presented.

CONCLUSION: The concept, design, and development of these five simulators have been implemented in laboratory settings for education and training. Some of them have been extended to clinical applications in hospital environments.}, } @article {pmid24027578, year = {2013}, author = {Deb, B and Srirama, SN}, title = {Social networks for eHealth solutions on cloud.}, journal = {Frontiers in genetics}, volume = {4}, number = {}, pages = {171}, pmid = {24027578}, issn = {1664-8021}, } @article {pmid24021384, year = {2013}, author = {Nordberg, H and Bhatia, K and Wang, K and Wang, Z}, title = {BioPig: a Hadoop-based analytic toolkit for large-scale sequence data.}, journal = {Bioinformatics (Oxford, England)}, volume = {29}, number = {23}, pages = {3014-3019}, doi = {10.1093/bioinformatics/btt528}, pmid = {24021384}, issn = {1367-4811}, mesh = {Algorithms ; Computational Biology/*methods ; *Data Interpretation, Statistical ; *High-Throughput Nucleotide Sequencing ; *Software ; }, abstract = {MOTIVATION: The recent revolution in sequencing technologies has led to an exponential growth of sequence data. As a result, most of the current bioinformatics tools become obsolete as they fail to scale with data. To tackle this 'data deluge', here we introduce the BioPig sequence analysis toolkit as one of the solutions that scale to data and computation.

RESULTS: We built BioPig on the Apache's Hadoop MapReduce system and the Pig data flow language. Compared with traditional serial and MPI-based algorithms, BioPig has three major advantages: first, BioPig's programmability greatly reduces development time for parallel bioinformatics applications; second, testing BioPig with up to 500 Gb sequences demonstrates that it scales automatically with size of data; and finally, BioPig can be ported without modification on many Hadoop infrastructures, as tested with Magellan system at National Energy Research Scientific Computing Center and the Amazon Elastic Compute Cloud. In summary, BioPig represents a novel program framework with the potential to greatly accelerate data-intensive bioinformatics analysis.}, } @article {pmid24002571, year = {2013}, author = {Na, YH and Suh, TS and Kapp, DS and Xing, L}, title = {Toward a web-based real-time radiation treatment planning system in a cloud computing environment.}, journal = {Physics in medicine and biology}, volume = {58}, number = {18}, pages = {6525-6540}, doi = {10.1088/0031-9155/58/18/6525}, pmid = {24002571}, issn = {1361-6560}, support = {1R01 CA133474/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; Computer Systems ; Computers ; Equipment Design ; Head and Neck Neoplasms/radiotherapy ; Humans ; Internet ; Lung Neoplasms/radiotherapy ; Male ; Monte Carlo Method ; Neoplasms/*radiotherapy ; Programming Languages ; Prostatic Neoplasms/radiotherapy ; Radiometry/methods ; Radiotherapy Planning, Computer-Assisted/*methods ; Radiotherapy, Intensity-Modulated/methods ; Software ; }, abstract = {To exploit the potential dosimetric advantages of intensity modulated radiation therapy (IMRT) and volumetric modulated arc therapy (VMAT), an in-depth approach is required to provide efficient computing methods. This needs to incorporate clinically related organ specific constraints, Monte Carlo (MC) dose calculations, and large-scale plan optimization. This paper describes our first steps toward a web-based real-time radiation treatment planning system in a cloud computing environment (CCE). The Amazon Elastic Compute Cloud (EC2) with a master node (named m2.xlarge containing 17.1 GB of memory, two virtual cores with 3.25 EC2 Compute Units each, 420 GB of instance storage, 64-bit platform) is used as the backbone of cloud computing for dose calculation and plan optimization. The master node is able to scale the workers on an 'on-demand' basis. MC dose calculation is employed to generate accurate beamlet dose kernels by parallel tasks. The intensity modulation optimization uses total-variation regularization (TVR) and generates piecewise constant fluence maps for each initial beam direction in a distributed manner over the CCE. The optimized fluence maps are segmented into deliverable apertures. The shape of each aperture is iteratively rectified to be a sequence of arcs using the manufacture's constraints. The output plan file from the EC2 is sent to the simple storage service. Three de-identified clinical cancer treatment plans have been studied for evaluating the performance of the new planning platform with 6 MV flattening filter free beams (40 × 40 cm(2)) from the Varian TrueBeam(TM) STx linear accelerator. A CCE leads to speed-ups of up to 14-fold for both dose kernel calculations and plan optimizations in the head and neck, lung, and prostate cancer cases considered in this study. The proposed system relies on a CCE that is able to provide an infrastructure for parallel and distributed computing. The resultant plans from the cloud computing are identical to PC-based IMRT and VMAT plans, confirming the reliability of the cloud computing platform. This cloud computing infrastructure has been established for a radiation treatment planning. It substantially improves the speed of inverse planning and makes future on-treatment adaptive re-planning possible.}, } @article {pmid23992916, year = {2013}, author = {Al-Zaiti, SS and Shusterman, V and Carey, MG}, title = {Novel technical solutions for wireless ECG transmission & analysis in the age of the internet cloud.}, journal = {Journal of electrocardiology}, volume = {46}, number = {6}, pages = {540-545}, doi = {10.1016/j.jelectrocard.2013.07.002}, pmid = {23992916}, issn = {1532-8430}, mesh = {Early Diagnosis ; Electrocardiography/*methods ; Humans ; *Internet ; Myocardial Infarction/*diagnosis/*prevention & control ; Telemedicine/*methods ; United States ; *Wireless Technology ; }, abstract = {Current guidelines recommend early reperfusion therapy for ST-elevation myocardial infarction (STEMI) within 90 min of first medical encounter. Telecardiology entails the use of advanced communication technologies to transmit the prehospital 12-lead electrocardiogram (ECG) to offsite cardiologists for early triage to the cath lab; which has been shown to dramatically reduce door-to-balloon time and total mortality. However, hospitals often find adopting ECG transmission technologies very challenging. The current review identifies seven major technical challenges of prehospital ECG transmission, including: paramedics inconvenience and transport delay; signal noise and interpretation errors; equipment malfunction and transmission failure; reliability of mobile phone networks; lack of compliance with the standards of digital ECG formats; poor integration with electronic medical records; and costly hardware and software pre-requisite installation. Current and potential solutions to address each of these technical challenges are discussed in details and include: automated ECG transmission protocols; annotatable waveform-based ECGs; optimal routing solutions; and the use of cloud computing systems rather than vendor-specific processing stations. Nevertheless, strategies to monitor transmission effectiveness and patient outcomes are essential to sustain initial gains of implementing ECG transmission technologies.}, } @article {pmid23974723, year = {2013}, author = {Baars, BJ and Franklin, S and Ramsoy, TZ}, title = {Global workspace dynamics: cortical "binding and propagation" enables conscious contents.}, journal = {Frontiers in psychology}, volume = {4}, number = {}, pages = {200}, pmid = {23974723}, issn = {1664-1078}, abstract = {A global workspace (GW) is a functional hub of binding and propagation in a population of loosely coupled signaling elements. In computational applications, GW architectures recruit many distributed, specialized agents to cooperate in resolving focal ambiguities. In the brain, conscious experiences may reflect a GW function. For animals, the natural world is full of unpredictable dangers and opportunities, suggesting a general adaptive pressure for brains to resolve focal ambiguities quickly and accurately. GW theory aims to understand the differences between conscious and unconscious brain events. In humans and related species the cortico-thalamic (C-T) core is believed to underlie conscious aspects of perception, thinking, learning, feelings of knowing (FOK), felt emotions, visual imagery, working memory, and executive control. Alternative theoretical perspectives are also discussed. The C-T core has many anatomical hubs, but conscious percepts are unitary and internally consistent at any given moment. Over time, conscious contents constitute a very large, open set. This suggests that a brain-based GW capacity cannot be localized in a single anatomical hub. Rather, it should be sought in a functional hub - a dynamic capacity for binding and propagation of neural signals over multiple task-related networks, a kind of neuronal cloud computing. In this view, conscious contents can arise in any region of the C-T core when multiple input streams settle on a winner-take-all equilibrium. The resulting conscious gestalt may ignite an any-to-many broadcast, lasting ∼100-200 ms, and trigger widespread adaptation in previously established networks. To account for the great range of conscious contents over time, the theory suggests an open repertoire of binding coalitions that can broadcast via theta/gamma or alpha/gamma phase coupling, like radio channels competing for a narrow frequency band. Conscious moments are thought to hold only 1-4 unrelated items; this small focal capacity may be the biological price to pay for global access. Visuotopic maps in cortex specialize in features like color, retinal size, motion, object identity, and egocentric/allocentric framing, so that a binding coalition for the sight of a rolling billiard ball in nearby space may resonate among activity maps of LGN, V1-V4, MT, IT, as well as the dorsal stream. Spatiotopic activity maps can bind into coherent gestalts using adaptive resonance (reentry). Single neurons can join a dominant coalition by phase tuning to regional oscillations in the 4-12 Hz range. Sensory percepts may bind and broadcast from posterior cortex, while non-sensory FOKs may involve prefrontal and frontotemporal areas. The anatomy and physiology of the hippocampal complex suggest a GW architecture as well. In the intact brain the hippocampal complex may support conscious event organization as well as episodic memory storage.}, } @article {pmid23971032, year = {2013}, author = {Kaján, L and Yachdav, G and Vicedo, E and Steinegger, M and Mirdita, M and Angermüller, C and Böhm, A and Domke, S and Ertl, J and Mertes, C and Reisinger, E and Staniewski, C and Rost, B}, title = {Cloud prediction of protein structure and function with PredictProtein for Debian.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {398968}, pmid = {23971032}, issn = {2314-6141}, support = {R01 LM007329/LM/NLM NIH HHS/United States ; R01-LM07329/LM/NLM NIH HHS/United States ; }, mesh = {Amino Acid Sequence ; Base Sequence ; Computer Simulation ; Data Mining/methods ; Databases, Protein ; *Internet ; *Models, Chemical ; *Models, Genetic ; *Models, Molecular ; Molecular Sequence Data ; *Programming Languages ; *Proteins/chemistry/genetics/ultrastructure ; Sequence Analysis, Protein/methods ; *Software ; Structure-Activity Relationship ; }, abstract = {We report the release of PredictProtein for the Debian operating system and derivatives, such as Ubuntu, Bio-Linux, and Cloud BioLinux. The PredictProtein suite is available as a standard set of open source Debian packages. The release covers the most popular prediction methods from the Rost Lab, including methods for the prediction of secondary structure and solvent accessibility (profphd), nuclear localization signals (predictnls), and intrinsically disordered regions (norsnet). We also present two case studies that successfully utilize PredictProtein packages for high performance computing in the cloud: the first analyzes protein disorder for whole organisms, and the second analyzes the effect of all possible single sequence variants in protein coding regions of the human genome.}, } @article {pmid23965254, year = {2013}, author = {Rodrigues, JJ and de la Torre, I and Fernández, G and López-Coronado, M}, title = {Analysis of the security and privacy requirements of cloud-based electronic health records systems.}, journal = {Journal of medical Internet research}, volume = {15}, number = {8}, pages = {e186}, pmid = {23965254}, issn = {1438-8871}, mesh = {*Computer Security ; Information Management ; *Medical Records Systems, Computerized ; *Privacy ; }, abstract = {BACKGROUND: The Cloud Computing paradigm offers eHealth systems the opportunity to enhance the features and functionality that they offer. However, moving patients' medical information to the Cloud implies several risks in terms of the security and privacy of sensitive health records. In this paper, the risks of hosting Electronic Health Records (EHRs) on the servers of third-party Cloud service providers are reviewed. To protect the confidentiality of patient information and facilitate the process, some suggestions for health care providers are made. Moreover, security issues that Cloud service providers should address in their platforms are considered.

OBJECTIVE: To show that, before moving patient health records to the Cloud, security and privacy concerns must be considered by both health care providers and Cloud service providers. Security requirements of a generic Cloud service provider are analyzed.

METHODS: To study the latest in Cloud-based computing solutions, bibliographic material was obtained mainly from Medline sources. Furthermore, direct contact was made with several Cloud service providers.

RESULTS: Some of the security issues that should be considered by both Cloud service providers and their health care customers are role-based access, network security mechanisms, data encryption, digital signatures, and access monitoring. Furthermore, to guarantee the safety of the information and comply with privacy policies, the Cloud service provider must be compliant with various certifications and third-party requirements, such as SAS70 Type II, PCI DSS Level 1, ISO 27001, and the US Federal Information Security Management Act (FISMA).

CONCLUSIONS: Storing sensitive information such as EHRs in the Cloud means that precautions must be taken to ensure the safety and confidentiality of the data. A relationship built on trust with the Cloud service provider is essential to ensure a transparent process. Cloud service providers must make certain that all security mechanisms are in place to avoid unauthorized access and data breaches. Patients must be kept informed about how their data are being managed.}, } @article {pmid23957008, year = {2013}, author = {Chen, J and Zhang, D and Yan, W and Yang, D and Shen, B}, title = {Translational bioinformatics for diagnostic and prognostic prediction of prostate cancer in the next-generation sequencing era.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {901578}, pmid = {23957008}, issn = {2314-6141}, mesh = {Computational Biology/*methods ; Genome, Human ; Genomics ; *High-Throughput Nucleotide Sequencing ; Humans ; Male ; Prognosis ; Prostatic Neoplasms/diagnosis/*genetics/pathology ; *Protein Biosynthesis ; }, abstract = {The discovery of prostate cancer biomarkers has been boosted by the advent of next-generation sequencing (NGS) technologies. Nevertheless, many challenges still exist in exploiting the flood of sequence data and translating them into routine diagnostics and prognosis of prostate cancer. Here we review the recent developments in prostate cancer biomarkers by high throughput sequencing technologies. We highlight some fundamental issues of translational bioinformatics and the potential use of cloud computing in NGS data processing for the improvement of prostate cancer treatment.}, } @article {pmid23943636, year = {2013}, author = {Lin, CF and Valladares, O and Childress, DM and Klevak, E and Geller, ET and Hwang, YC and Tsai, EA and Schellenberg, GD and Wang, LS}, title = {DRAW+SneakPeek: analysis workflow and quality metric management for DNA-seq experiments.}, journal = {Bioinformatics (Oxford, England)}, volume = {29}, number = {19}, pages = {2498-2500}, pmid = {23943636}, issn = {1367-4811}, support = {P30 AG010124/AG/NIA NIH HHS/United States ; U01 HG006375/HG/NHGRI NIH HHS/United States ; R01 MH094382/MH/NIMH NIH HHS/United States ; U24 AG021886/AG/NIA NIH HHS/United States ; P01 AG017586/AG/NIA NIH HHS/United States ; P50 NS053488/NS/NINDS NIH HHS/United States ; U24 AG041689/AG/NIA NIH HHS/United States ; U01 AG032984/AG/NIA NIH HHS/United States ; R01 MH089004/MH/NIMH NIH HHS/United States ; }, mesh = {Biometry/*methods ; DNA/*analysis ; Internet ; Programming Languages ; Sequence Analysis, DNA/*methods ; *Software Design ; }, abstract = {SUMMARY: We report our new DRAW+SneakPeek software for DNA-seq analysis. DNA resequencing analysis workflow (DRAW) automates the workflow of processing raw sequence reads including quality control, read alignment and variant calling on high-performance computing facilities such as Amazon elastic compute cloud. SneakPeek provides an effective interface for reviewing dozens of quality metrics reported by DRAW, so users can assess the quality of data and diagnose problems in their sequencing procedures. Both DRAW and SneakPeek are freely available under the MIT license, and are available as Amazon machine images to be used directly on Amazon cloud with minimal installation.

AVAILABILITY: DRAW+SneakPeek is released under the MIT license and is available for academic and nonprofit use for free. The information about source code, Amazon machine images and instructions on how to install and run DRAW+SneakPeek locally and on Amazon elastic compute cloud is available at the National Institute on Aging Genetics of Alzheimer's Disease Data Storage Site (http://www.niagads.org/) and Wang lab Web site (http://wanglab.pcbi.upenn.edu/).}, } @article {pmid23920993, year = {2013}, author = {Mirza, H and El-Masri, S}, title = {National electronic medical records integration on cloud computing system.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {1219}, pmid = {23920993}, issn = {1879-8365}, mesh = {Data Curation/*methods ; Electronic Health Records/*organization & administration ; *Health Information Exchange ; Information Storage and Retrieval/*methods ; Internet/*organization & administration ; Medical Record Linkage/*methods ; Saudi Arabia ; }, abstract = {Few Healthcare providers have an advanced level of Electronic Medical Record (EMR) adoption. Others have a low level and most have no EMR at all. Cloud computing technology is a new emerging technology that has been used in other industry and showed a great success. Despite the great features of Cloud computing, they haven't been utilized fairly yet in healthcare industry. This study presents an innovative Healthcare Cloud Computing system for Integrating Electronic Health Record (EHR). The proposed Cloud system applies the Cloud Computing technology on EHR system, to present a comprehensive EHR integrated environment.}, } @article {pmid23920851, year = {2013}, author = {Fujita, H and Uchimura, Y and Waki, K and Omae, K and Takeuchi, I and Ohe, K}, title = {Development and clinical study of mobile 12-lead electrocardiography based on cloud computing for cardiac emergency.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {1077}, pmid = {23920851}, issn = {1879-8365}, mesh = {*Cell Phone ; Electrocardiography/*instrumentation ; Emergency Medical Services/*methods ; Equipment Design ; Equipment Failure Analysis ; Humans ; *Internet ; Myocardial Infarction/*diagnosis ; Reproducibility of Results ; Sensitivity and Specificity ; Telemedicine/*instrumentation/methods ; Telemetry/*instrumentation ; }, abstract = {To improve emergency services for accurate diagnosis of cardiac emergency, we developed a low-cost new mobile electrocardiography system "Cloud Cardiology®" based upon cloud computing for prehospital diagnosis. This comprises a compact 12-lead ECG unit equipped with Bluetooth and Android Smartphone with an application for transmission. Cloud server enables us to share ECG simultaneously inside and outside the hospital. We evaluated the clinical effectiveness by conducting a clinical trial with historical comparison to evaluate this system in a rapid response car in the real emergency service settings. We found that this system has an ability to shorten the onset to balloon time of patients with acute myocardial infarction, resulting in better clinical outcome. Here we propose that cloud-computing based simultaneous data sharing could be powerful solution for emergency service for cardiology, along with its significant clinical outcome.}, } @article {pmid23920847, year = {2013}, author = {Kondoh, H and Teramoto, K and Kawai, T and Mochida, M and Nishimura, M}, title = {Development of the regional EPR and PACS sharing system on the infrastructure of cloud computing technology controlled by patient identifier cross reference manager.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {1073}, pmid = {23920847}, issn = {1879-8365}, mesh = {Computer Security ; *Confidentiality ; Electronic Health Records/*organization & administration ; *Health Records, Personal ; Internet/*organization & administration ; Japan ; Medical Record Linkage/*methods ; Patient Identification Systems/*organization & administration ; Radiology Information Systems/*organization & administration ; Regional Health Planning ; }, abstract = {A Newly developed Oshidori-Net2, providing medical professionals with remote access to electronic patient record systems (EPR) and PACSs of four hospitals, of different venders, using cloud computing technology and patient identifier cross reference manager. The operation was started from April 2012. The patients moved to other hospital were applied. Objective is to show the merit and demerit of the new system.}, } @article {pmid23920773, year = {2013}, author = {Gruetz, R and Franke, T and Dickmann, F}, title = {Concept for preservation and reuse of genome and biomedical imaging research data.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {999}, pmid = {23920773}, issn = {1879-8365}, mesh = {Data Curation/*standards ; Database Management Systems/*standards ; Databases, Genetic/*standards ; Genomics/*methods/*standards ; Germany ; Guidelines as Topic ; Information Storage and Retrieval/*standards ; Medical Record Linkage/*standards ; }, abstract = {The German Research Foundation (DFG) recommends preserving research data for at least ten years. The DFG funded project LABIMI/F establishes an infrastructure for preservation, retrieval and reuse of biomedical research data based on grid/cloud computing technology for two applications a) genome and b) imaging data. The requirements for this infrastructure were determined during workshops with relevant stakeholders. Afterwards product evaluations were conducted and the relevant products were integrated into the infrastructure concept. In this paper, we address the suitability of our solution concerning the fulfillment of the requirements. It is shown that the solution satisfies five of the eight requirement categories completely and the other three categories partly. Furthermore, in order to prove the adherence to the widely accepted Open Archival Information System (OAIS) standard, we successfully performed a mapping of our technical components to the functional entities of the OAIS.}, } @article {pmid23920710, year = {2013}, author = {Takeuchi, H and Mayuzumi, Y and Kodama, N and Sato, K}, title = {Personal healthcare system using cloud computing.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {936}, pmid = {23920710}, issn = {1879-8365}, mesh = {Data Mining/methods ; *Electronic Health Records ; *Health Records, Personal ; *Internet ; *Medical Records ; *Mobile Applications ; Remote Consultation/*methods ; *User-Computer Interface ; }, abstract = {A personal healthcare system used with cloud computing has been developed. It enables a daily time-series of personal health and lifestyle data to be stored in the cloud through mobile devices. The cloud automatically extracts personally useful information, such as rules and patterns concerning lifestyle and health conditions embedded in the personal big data, by using a data mining technology. The system provides three editions (Diet, Lite, and Pro) corresponding to users' needs.}, } @article {pmid23920671, year = {2013}, author = {Jayapandian, CP and Chen, CH and Bozorgi, A and Lhatoo, SD and Zhang, GQ and Sahoo, SS}, title = {Electrophysiological signal analysis and visualization using Cloudwave for epilepsy clinical research.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {817-821}, pmid = {23920671}, issn = {1879-8365}, support = {P20 NS076965/NS/NINDS NIH HHS/United States ; UL1 TR000439/TR/NCATS NIH HHS/United States ; UL1TR000439/TR/NCATS NIH HHS/United States ; 1-P20-NS076965-01/NS/NINDS NIH HHS/United States ; }, mesh = {Algorithms ; Biomedical Research/*methods ; Databases, Factual ; Diagnosis, Computer-Assisted/*methods ; Electroencephalography/*methods/statistics & numerical data ; Epilepsy/*diagnosis/physiopathology ; Humans ; Information Storage and Retrieval/*methods ; *Internet ; Software ; *User-Computer Interface ; }, abstract = {Epilepsy is the most common serious neurological disorder affecting 50-60 million persons worldwide. Electrophysiological data recordings, such as electroencephalogram (EEG), are the gold standard for diagnosis and pre-surgical evaluation in epilepsy patients. The increasing trend towards multi-center clinical studies require signal visualization and analysis tools to support real time interaction with signal data in a collaborative environment, which cannot be supported by traditional desktop-based standalone applications. As part of the Prevention and Risk Identification of SUDEP Mortality (PRISM) project, we have developed a Web-based electrophysiology data visualization and analysis platform called Cloudwave using highly scalable open source cloud computing infrastructure. Cloudwave is integrated with the PRISM patient cohort identification tool called MEDCIS (Multi-modality Epilepsy Data Capture and Integration System). The Epilepsy and Seizure Ontology (EpSO) underpins both Cloudwave and MEDCIS to support query composition and result retrieval. Cloudwave is being used by clinicians and research staff at the University Hospital - Case Medical Center (UH-CMC) Epilepsy Monitoring Unit (EMU) and will be progressively deployed at four EMUs in the United States and the United Kingdomas part of the PRISM project.}, } @article {pmid23920663, year = {2013}, author = {Figueiredo, JF and Motta, GH}, title = {SocialRAD: an infrastructure for a secure, cooperative, asynchronous teleradiology system.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {778-782}, pmid = {23920663}, issn = {1879-8365}, mesh = {Brazil ; *Computer Security ; *Confidentiality ; Information Storage and Retrieval/*methods ; Radiology Information Systems ; *Software ; Systems Integration ; Teleradiology/*methods ; *User-Computer Interface ; }, abstract = {The popularity of teleradiology services has enabled a major advance in the provision of health services to areas with difficult geographical access. However, this potential has also brought with it a number of challenges: the large volume of data, characteristic of imaging tests, and security requirements designed to ensure confidentiality and integrity. Moreover, there is also a number of ethical questions involving the dominant model on the market, whereby this service is outsourced to private companies, and is not directly undertaken by professional radiologists. Therefore, the present paper proposes a cooperative model of teleradiology, where health professionals interact directly with the hospitals providing patient care. This has involved the integration of a wide range of technologies, such as the interconnection models Peer-to-Peer, Cloud Computing, Dynamic DNS, RESTful Web Services, as well as security and interoperability standards, with the aim of promoting a secure, collaborative asynchronous environment. The developed model is currently being used on an experimental basis, providing teleradiology support to cities in the north-eastern hinterland of Brazil, and is fulfilling all expectations.}, } @article {pmid23920510, year = {2013}, author = {Ribeiro, LS and Rodrigues, RP and Costa, C and Oliveira, JL}, title = {Enabling outsourcing XDS for imaging on the public cloud.}, journal = {Studies in health technology and informatics}, volume = {192}, number = {}, pages = {33-37}, pmid = {23920510}, issn = {1879-8365}, mesh = {*Computer Security ; *Database Management Systems ; Information Dissemination/*methods ; Information Storage and Retrieval/*methods ; *Internet ; Medical Record Linkage/*methods ; Portugal ; Radiology Information Systems/*organization & administration ; }, abstract = {Picture Archiving and Communication System (PACS) has been the main paradigm in supporting medical imaging workflows during the last decades. Despite its consolidation, the appearance of Cross-Enterprise Document Sharing for imaging (XDS-I), within IHE initiative, constitutes a great opportunity to readapt PACS workflow for inter-institutional data exchange. XDS-I provides a centralized discovery of medical imaging and associated reports. However, the centralized XDS-I actors (document registry and repository) must be deployed in a trustworthy node in order to safeguard patient privacy, data confidentiality and integrity. This paper presents XDS for Protected Imaging (XDS-p), a new approach to XDS-I that is capable of being outsourced (e.g. Cloud Computing) while maintaining privacy, confidentiality, integrity and legal concerns about patients' medical information.}, } @article {pmid23897403, year = {2013}, author = {Ahnn, JH and Potkonjak, M}, title = {mHealthMon: toward energy-efficient and distributed mobile health monitoring using parallel offloading.}, journal = {Journal of medical systems}, volume = {37}, number = {5}, pages = {9957}, pmid = {23897403}, issn = {1573-689X}, mesh = {Computer Communication Networks ; Computer Systems ; Humans ; *Mobile Applications ; Models, Theoretical ; *Telemedicine ; Wireless Technology ; }, abstract = {Although mobile health monitoring where mobile sensors continuously gather, process, and update sensor readings (e.g. vital signals) from patient's sensors is emerging, little effort has been investigated in an energy-efficient management of sensor information gathering and processing. Mobile health monitoring with the focus of energy consumption may instead be holistically analyzed and systematically designed as a global solution to optimization subproblems. This paper presents an attempt to decompose the very complex mobile health monitoring system whose layer in the system corresponds to decomposed subproblems, and interfaces between them are quantified as functions of the optimization variables in order to orchestrate the subproblems. We propose a distributed and energy-saving mobile health platform, called mHealthMon where mobile users publish/access sensor data via a cloud computing-based distributed P2P overlay network. The key objective is to satisfy the mobile health monitoring application's quality of service requirements by modeling each subsystem: mobile clients with medical sensors, wireless network medium, and distributed cloud services. By simulations based on experimental data, we present the proposed system can achieve up to 10.1 times more energy-efficient and 20.2 times faster compared to a standalone mobile health monitoring application, in various mobile health monitoring scenarios applying a realistic mobility model.}, } @article {pmid23877155, year = {2013}, author = {Yokohama, N}, title = {[Design and study of parallel computing environment of Monte Carlo simulation for particle therapy planning using a public cloud-computing infrastructure].}, journal = {Nihon Hoshasen Gijutsu Gakkai zasshi}, volume = {69}, number = {7}, pages = {773-777}, doi = {10.6009/jjrt.2013_jsrt_69.7.773}, pmid = {23877155}, issn = {0369-4305}, mesh = {Computing Methodologies ; *Internet ; *Monte Carlo Method ; Proton Therapy ; Radiotherapy Planning, Computer-Assisted/*methods ; }, abstract = {This report was aimed at structuring the design of architectures and studying performance measurement of a parallel computing environment using a Monte Carlo simulation for particle therapy using a high performance computing (HPC) instance within a public cloud-computing infrastructure. Performance measurements showed an approximately 28 times faster speed than seen with single-thread architecture, combined with improved stability. A study of methods of optimizing the system operations also indicated lower cost.}, } @article {pmid23875683, year = {2013}, author = {Trinh, QM and Jen, FY and Zhou, Z and Chu, KM and Perry, MD and Kephart, ET and Contrino, S and Ruzanov, P and Stein, LD}, title = {Cloud-based uniform ChIP-Seq processing tools for modENCODE and ENCODE.}, journal = {BMC genomics}, volume = {14}, number = {}, pages = {494}, pmid = {23875683}, issn = {1471-2164}, support = {3U41HG004269-05S2/HG/NHGRI NIH HHS/United States ; }, mesh = {*Chromatin Immunoprecipitation ; *Software ; }, abstract = {BACKGROUND: Funded by the National Institutes of Health (NIH), the aim of the Model Organism ENCyclopedia of DNA Elements (modENCODE) project is to provide the biological research community with a comprehensive encyclopedia of functional genomic elements for both model organisms C. elegans (worm) and D. melanogaster (fly). With a total size of just under 10 terabytes of data collected and released to the public, one of the challenges faced by researchers is to extract biologically meaningful knowledge from this large data set. While the basic quality control, pre-processing, and analysis of the data has already been performed by members of the modENCODE consortium, many researchers will wish to reinterpret the data set using modifications and enhancements of the original protocols, or combine modENCODE data with other data sets. Unfortunately this can be a time consuming and logistically challenging proposition.

RESULTS: In recognition of this challenge, the modENCODE DCC has released uniform computing resources for analyzing modENCODE data on Galaxy (https://github.com/modENCODE-DCC/Galaxy), on the public Amazon Cloud (http://aws.amazon.com), and on the private Bionimbus Cloud for genomic research (http://www.bionimbus.org). In particular, we have released Galaxy workflows for interpreting ChIP-seq data which use the same quality control (QC) and peak calling standards adopted by the modENCODE and ENCODE communities. For convenience of use, we have created Amazon and Bionimbus Cloud machine images containing Galaxy along with all the modENCODE data, software and other dependencies.

CONCLUSIONS: Using these resources provides a framework for running consistent and reproducible analyses on modENCODE data, ultimately allowing researchers to use more of their time using modENCODE data, and less time moving it around.}, } @article {pmid23875173, year = {2013}, author = {Trevarton, AJ and Mann, MB and Knapp, C and Araki, H and Wren, JD and Stones-Havas, S and Black, MA and Print, CG}, title = {MelanomaDB: A Web Tool for Integrative Analysis of Melanoma Genomic Information to Identify Disease-Associated Molecular Pathways.}, journal = {Frontiers in oncology}, volume = {3}, number = {}, pages = {184}, pmid = {23875173}, issn = {2234-943X}, support = {P20 GM103636/GM/NIGMS NIH HHS/United States ; }, abstract = {Despite on-going research, metastatic melanoma survival rates remain low and treatment options are limited. Researchers can now access a rapidly growing amount of molecular and clinical information about melanoma. This information is becoming difficult to assemble and interpret due to its dispersed nature, yet as it grows it becomes increasingly valuable for understanding melanoma. Integration of this information into a comprehensive resource to aid rational experimental design and patient stratification is needed. As an initial step in this direction, we have assembled a web-accessible melanoma database, MelanomaDB, which incorporates clinical and molecular data from publically available sources, which will be regularly updated as new information becomes available. This database allows complex links to be drawn between many different aspects of melanoma biology: genetic changes (e.g., mutations) in individual melanomas revealed by DNA sequencing, associations between gene expression and patient survival, data concerning drug targets, biomarkers, druggability, and clinical trials, as well as our own statistical analysis of relationships between molecular pathways and clinical parameters that have been produced using these data sets. The database is freely available at http://genesetdb.auckland.ac.nz/melanomadb/about.html. A subset of the information in the database can also be accessed through a freely available web application in the Illumina genomic cloud computing platform BaseSpace at http://www.biomatters.com/apps/melanoma-profiler-for-research. The MelanomaDB database illustrates dysregulation of specific signaling pathways across 310 exome-sequenced melanomas and in individual tumors and identifies the distribution of somatic variants in melanoma. We suggest that MelanomaDB can provide a context in which to interpret the tumor molecular profiles of individual melanoma patients relative to biological information and available drug therapies.}, } @article {pmid23872175, year = {2013}, author = {O'Driscoll, A and Daugelaite, J and Sleator, RD}, title = {'Big data', Hadoop and cloud computing in genomics.}, journal = {Journal of biomedical informatics}, volume = {46}, number = {5}, pages = {774-781}, doi = {10.1016/j.jbi.2013.07.001}, pmid = {23872175}, issn = {1532-0480}, mesh = {*Human Genome Project ; Humans ; *Internet ; Software ; }, abstract = {Since the completion of the Human Genome project at the turn of the Century, there has been an unprecedented proliferation of genomic sequence data. A consequence of this is that the medical discoveries of the future will largely depend on our ability to process and analyse large genomic data sets, which continue to expand as the cost of sequencing decreases. Herein, we provide an overview of cloud computing and big data technologies, and discuss how such expertise can be used to deal with biology's big data sets. In particular, big data technologies such as the Apache Hadoop project, which provides distributed and parallelised data processing and analysis of petabyte (PB) scale data sets will be discussed, together with an overview of the current usage of Hadoop within the bioinformatics community.}, } @article {pmid23835611, year = {2013}, author = {Ebejer, JP and Fulle, S and Morris, GM and Finn, PW}, title = {The emerging role of cloud computing in molecular modelling.}, journal = {Journal of molecular graphics & modelling}, volume = {44}, number = {}, pages = {177-187}, doi = {10.1016/j.jmgm.2013.06.002}, pmid = {23835611}, issn = {1873-4243}, mesh = {*Computer Simulation ; Data Mining/*methods ; *Models, Molecular ; }, abstract = {There is a growing recognition of the importance of cloud computing for large-scale and data-intensive applications. The distinguishing features of cloud computing and their relationship to other distributed computing paradigms are described, as are the strengths and weaknesses of the approach. We review the use made to date of cloud computing for molecular modelling projects and the availability of front ends for molecular modelling applications. Although the use of cloud computing technologies for molecular modelling is still in its infancy, we demonstrate its potential by presenting several case studies. Rapid growth can be expected as more applications become available and costs continue to fall; cloud computing can make a major contribution not just in terms of the availability of on-demand computing power, but could also spur innovation in the development of novel approaches that utilize that capacity in more effective ways.}, } @article {pmid23828721, year = {2013}, author = {Krintz, C}, title = {The AppScale Cloud Platform: Enabling Portable, Scalable Web Application Deployment.}, journal = {IEEE internet computing}, volume = {17}, number = {2}, pages = {72-75}, doi = {10.1109/MIC.2013.38}, pmid = {23828721}, issn = {1089-7801}, support = {R01 EB014877/EB/NIBIB NIH HHS/United States ; }, abstract = {AppScale is an open source distributed software system that implements a cloud platform as a service (PaaS). AppScale makes cloud applications easy to deploy and scale over disparate cloud fabrics, implementing a set of APIs and architecture that also makes apps portable across the services they employ. AppScale is API-compatible with Google App Engine (GAE) and thus executes GAE applications on-premise or over other cloud infrastructures, without modification.}, } @article {pmid23823405, year = {2013}, author = {Koufi, V and Malamateniou, F and Vassilacopoulos, G}, title = {A sophisticated mechanism for enabling real-time mobile access to PHR data.}, journal = {Studies in health technology and informatics}, volume = {190}, number = {}, pages = {148-150}, pmid = {23823405}, issn = {1879-8365}, mesh = {Computer Systems ; *Computers, Handheld ; Data Mining/*methods ; *Health Records, Personal ; Internet/*organization & administration ; Medical Records Systems, Computerized/*organization & administration ; *Search Engine ; Software ; *User-Computer Interface ; }, abstract = {Faced with rapid changes, such as growing complexity in care delivery, health systems nowadays fall short in their ability to translate knowledge into practice. Mobile technology holds enormous potential for transforming healthcare delivery systems which currently involve cumbersome processes that slow down care and decrease rather than improve safety. However, the limited computing, energy and information storage capabilities of mobile devices are hampering their ability to support increasingly sophisticated applications required by certain application fields, such as healthcare. This paper is concerned with a framework which provides ubiquitous mobile access to comprehensive health information at any point of care or decision making in a way that efficient utilization of mobile device resources is achieved. To this end, a cloud-based push messaging mechanism is utilized which draws upon and enhances Google Cloud Messaging service.}, } @article {pmid23822402, year = {2013}, author = {Kagadis, GC and Kloukinas, C and Moore, K and Philbin, J and Papadimitroulas, P and Alexakos, C and Nagy, PG and Visvikis, D and Hendee, WR}, title = {Cloud computing in medical imaging.}, journal = {Medical physics}, volume = {40}, number = {7}, pages = {070901}, doi = {10.1118/1.4811272}, pmid = {23822402}, issn = {2473-4209}, mesh = {Computer Security ; Delivery of Health Care ; Diagnostic Imaging/ethics/*methods ; Humans ; *Internet/ethics ; Research ; Software ; }, abstract = {Over the past century technology has played a decisive role in defining, driving, and reinventing procedures, devices, and pharmaceuticals in healthcare. Cloud computing has been introduced only recently but is already one of the major topics of discussion in research and clinical settings. The provision of extensive, easily accessible, and reconfigurable resources such as virtual systems, platforms, and applications with low service cost has caught the attention of many researchers and clinicians. Healthcare researchers are moving their efforts to the cloud, because they need adequate resources to process, store, exchange, and use large quantities of medical data. This Vision 20/20 paper addresses major questions related to the applicability of advanced cloud computing in medical imaging. The paper also considers security and ethical issues that accompany cloud computing.}, } @article {pmid23808052, year = {2013}, author = {Cox, M}, title = {Room for computing on the healthcare cloud.}, journal = {MGMA connexion}, volume = {13}, number = {5}, pages = {13}, pmid = {23808052}, issn = {1537-0240}, mesh = {Group Practice ; Information Storage and Retrieval/*statistics & numerical data ; *Internet ; *Medical Informatics ; United States ; }, } @article {pmid23802613, year = {2013}, author = {Zhao, S and Prenger, K and Smith, L and Messina, T and Fan, H and Jaeger, E and Stephens, S}, title = {Rainbow: a tool for large-scale whole-genome sequencing data analysis using cloud computing.}, journal = {BMC genomics}, volume = {14}, number = {}, pages = {425}, pmid = {23802613}, issn = {1471-2164}, mesh = {Cluster Analysis ; Genomics/*methods ; Humans ; *Internet ; Polymorphism, Single Nucleotide/genetics ; Sequence Analysis/*methods ; *Software ; Statistics as Topic/*methods ; }, abstract = {BACKGROUND: Technical improvements have decreased sequencing costs and, as a result, the size and number of genomic datasets have increased rapidly. Because of the lower cost, large amounts of sequence data are now being produced by small to midsize research groups. Crossbow is a software tool that can detect single nucleotide polymorphisms (SNPs) in whole-genome sequencing (WGS) data from a single subject; however, Crossbow has a number of limitations when applied to multiple subjects from large-scale WGS projects. The data storage and CPU resources that are required for large-scale whole genome sequencing data analyses are too large for many core facilities and individual laboratories to provide. To help meet these challenges, we have developed Rainbow, a cloud-based software package that can assist in the automation of large-scale WGS data analyses.

RESULTS: Here, we evaluated the performance of Rainbow by analyzing 44 different whole-genome-sequenced subjects. Rainbow has the capacity to process genomic data from more than 500 subjects in two weeks using cloud computing provided by the Amazon Web Service. The time includes the import and export of the data using Amazon Import/Export service. The average cost of processing a single sample in the cloud was less than 120 US dollars. Compared with Crossbow, the main improvements incorporated into Rainbow include the ability: (1) to handle BAM as well as FASTQ input files; (2) to split large sequence files for better load balance downstream; (3) to log the running metrics in data processing and monitoring multiple Amazon Elastic Compute Cloud (EC2) instances; and (4) to merge SOAPsnp outputs for multiple individuals into a single file to facilitate downstream genome-wide association studies.

CONCLUSIONS: Rainbow is a scalable, cost-effective, and open-source tool for large-scale WGS data analysis. For human WGS data sequenced by either the Illumina HiSeq 2000 or HiSeq 2500 platforms, Rainbow can be used straight out of the box. Rainbow is available for third-party implementation and use, and can be downloaded from http://s3.amazonaws.com/jnj_rainbow/index.html.}, } @article {pmid23780997, year = {2014}, author = {Aldinucci, M and Torquati, M and Spampinato, C and Drocco, M and Misale, C and Calcagno, C and Coppo, M}, title = {Parallel stochastic systems biology in the cloud.}, journal = {Briefings in bioinformatics}, volume = {15}, number = {5}, pages = {798-813}, doi = {10.1093/bib/bbt040}, pmid = {23780997}, issn = {1477-4054}, mesh = {Computational Biology ; Computer Simulation ; *Information Storage and Retrieval ; *Stochastic Processes ; *Systems Biology ; }, abstract = {The stochastic modelling of biological systems, coupled with Monte Carlo simulation of models, is an increasingly popular technique in bioinformatics. The simulation-analysis workflow may result computationally expensive reducing the interactivity required in the model tuning. In this work, we advocate the high-level software design as a vehicle for building efficient and portable parallel simulators for the cloud. In particular, the Calculus of Wrapped Components (CWC) simulator for systems biology, which is designed according to the FastFlow pattern-based approach, is presented and discussed. Thanks to the FastFlow framework, the CWC simulator is designed as a high-level workflow that can simulate CWC models, merge simulation results and statistically analyse them in a single parallel workflow in the cloud. To improve interactivity, successive phases are pipelined in such a way that the workflow begins to output a stream of analysis results immediately after simulation is started. Performance and effectiveness of the CWC simulator are validated on the Amazon Elastic Compute Cloud.}, } @article {pmid23775173, year = {2013}, author = {Bourgeat, P and Dore, V and Villemagne, VL and Rowe, CC and Salvado, O and Fripp, J}, title = {MilxXplore: a web-based system to explore large imaging datasets.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {20}, number = {6}, pages = {1046-1052}, pmid = {23775173}, issn = {1527-974X}, mesh = {Data Mining/*methods ; Database Management Systems ; Databases as Topic/*organization & administration ; *Diagnostic Imaging ; Humans ; *Image Interpretation, Computer-Assisted ; Internet ; *Software ; }, abstract = {OBJECTIVE: As large-scale medical imaging studies are becoming more common, there is an increasing reliance on automated software to extract quantitative information from these images. As the size of the cohorts keeps increasing with large studies, there is a also a need for tools that allow results from automated image processing and analysis to be presented in a way that enables fast and efficient quality checking, tagging and reporting on cases in which automatic processing failed or was problematic.

MATERIALS AND METHODS: MilxXplore is an open source visualization platform, which provides an interface to navigate and explore imaging data in a web browser, giving the end user the opportunity to perform quality control and reporting in a user friendly, collaborative and efficient way.

DISCUSSION: Compared to existing software solutions that often provide an overview of the results at the subject's level, MilxXplore pools the results of individual subjects and time points together, allowing easy and efficient navigation and browsing through the different acquisitions of a subject over time, and comparing the results against the rest of the population.

CONCLUSIONS: MilxXplore is fast, flexible and allows remote quality checks of processed imaging data, facilitating data sharing and collaboration across multiple locations, and can be easily integrated into a cloud computing pipeline. With the growing trend of open data and open science, such a tool will become increasingly important to share and publish results of imaging analysis.}, } @article {pmid23769601, year = {2013}, author = {Dimenstein, IB and Dimenstein, SI}, title = {Development of a laboratory niche Web site.}, journal = {Annals of diagnostic pathology}, volume = {17}, number = {5}, pages = {448-456}, doi = {10.1016/j.anndiagpath.2013.05.002}, pmid = {23769601}, issn = {1532-8198}, mesh = {Humans ; *Internet ; *Laboratories ; *Pathology, Surgical ; }, abstract = {This technical note presents the development of a methodological laboratory niche Web site. The "Grossing Technology in Surgical Pathology" (www.grossing-technology.com) Web site is used as an example. Although common steps in creation of most Web sites are followed, there are particular requirements for structuring the template's menu on methodological laboratory Web sites. The "nested doll principle," in which one object is placed inside another, most adequately describes the methodological approach to laboratory Web site design. Fragmentation in presenting the Web site's material highlights the discrete parts of the laboratory procedure. An optimally minimal triad of components can be recommended for the creation of a laboratory niche Web site: a main set of media, a blog, and an ancillary component (host, contact, and links). The inclusion of a blog makes the Web site a dynamic forum for professional communication. By forming links and portals, cloud computing opens opportunities for connecting a niche Web site with other Web sites and professional organizations. As an additional source of information exchange, methodological laboratory niche Web sites are destined to parallel both traditional and new forms, such as books, journals, seminars, webinars, and internal educational materials.}, } @article {pmid23763909, year = {2013}, author = {McClatchey, R and Branson, A and Anjum, A and Bloodsworth, P and Habib, I and Munir, K and Shamdasani, J and Soomro, K and , }, title = {Providing traceability for neuroimaging analyses.}, journal = {International journal of medical informatics}, volume = {82}, number = {9}, pages = {882-894}, doi = {10.1016/j.ijmedinf.2013.05.005}, pmid = {23763909}, issn = {1872-8243}, mesh = {Algorithms ; Brain Mapping/*methods ; Computer Systems/*statistics & numerical data ; Humans ; *Medical Informatics Computing ; *Neuroimaging ; *Software ; Workflow ; }, abstract = {INTRODUCTION: With the increasingly digital nature of biomedical data and as the complexity of analyses in medical research increases, the need for accurate information capture, traceability and accessibility has become crucial to medical researchers in the pursuance of their research goals. Grid- or Cloud-based technologies, often based on so-called Service Oriented Architectures (SOA), are increasingly being seen as viable solutions for managing distributed data and algorithms in the bio-medical domain. For neuroscientific analyses, especially those centred on complex image analysis, traceability of processes and datasets is essential but up to now this has not been captured in a manner that facilitates collaborative study.

PURPOSE AND METHOD: Few examples exist, of deployed medical systems based on Grids that provide the traceability of research data needed to facilitate complex analyses and none have been evaluated in practice. Over the past decade, we have been working with mammographers, paediatricians and neuroscientists in three generations of projects to provide the data management and provenance services now required for 21st century medical research. This paper outlines the finding of a requirements study and a resulting system architecture for the production of services to support neuroscientific studies of biomarkers for Alzheimer's disease.

RESULTS: The paper proposes a software infrastructure and services that provide the foundation for such support. It introduces the use of the CRISTAL software to provide provenance management as one of a number of services delivered on a SOA, deployed to manage neuroimaging projects that have been studying biomarkers for Alzheimer's disease.

CONCLUSIONS: In the neuGRID and N4U projects a Provenance Service has been delivered that captures and reconstructs the workflow information needed to facilitate researchers in conducting neuroimaging analyses. The software enables neuroscientists to track the evolution of workflows and datasets. It also tracks the outcomes of various analyses and provides provenance traceability throughout the lifecycle of their studies. As the Provenance Service has been designed to be generic it can be applied across the medical domain as a reusable tool for supporting medical researchers thus providing communities of researchers for the first time with the necessary tools to conduct widely distributed collaborative programmes of medical analysis.}, } @article {pmid23762824, year = {2013}, author = {Hung, CL and Hua, GJ}, title = {Cloud computing for protein-ligand binding site comparison.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {170356}, pmid = {23762824}, issn = {2314-6141}, mesh = {Binding Sites ; Computational Biology/*methods ; Internet ; Ligands ; Proteins/*metabolism ; Software ; Time Factors ; }, abstract = {The proteome-wide analysis of protein-ligand binding sites and their interactions with ligands is important in structure-based drug design and in understanding ligand cross reactivity and toxicity. The well-known and commonly used software, SMAP, has been designed for 3D ligand binding site comparison and similarity searching of a structural proteome. SMAP can also predict drug side effects and reassign existing drugs to new indications. However, the computing scale of SMAP is limited. We have developed a high availability, high performance system that expands the comparison scale of SMAP. This cloud computing service, called Cloud-PLBS, combines the SMAP and Hadoop frameworks and is deployed on a virtual cloud computing platform. To handle the vast amount of experimental data on protein-ligand binding site pairs, Cloud-PLBS exploits the MapReduce paradigm as a management and parallelizing tool. Cloud-PLBS provides a web portal and scalability through which biologists can address a wide range of computer-intensive questions in biology and drug discovery.}, } @article {pmid23762819, year = {2013}, author = {Lin, CH and Wen, CH and Lin, YC and Tung, KY and Lin, RW and Lin, CY}, title = {A P2P Framework for Developing Bioinformatics Applications in Dynamic Cloud Environments.}, journal = {International journal of genomics}, volume = {2013}, number = {}, pages = {361327}, pmid = {23762819}, issn = {2314-436X}, abstract = {Bioinformatics is advanced from in-house computing infrastructure to cloud computing for tackling the vast quantity of biological data. This advance enables large number of collaborative researches to share their works around the world. In view of that, retrieving biological data over the internet becomes more and more difficult because of the explosive growth and frequent changes. Various efforts have been made to address the problems of data discovery and delivery in the cloud framework, but most of them suffer the hindrance by a MapReduce master server to track all available data. In this paper, we propose an alternative approach, called PRKad, which exploits a Peer-to-Peer (P2P) model to achieve efficient data discovery and delivery. PRKad is a Kademlia-based implementation with Round-Trip-Time (RTT) as the associated key, and it locates data according to Distributed Hash Table (DHT) and XOR metric. The simulation results exhibit that our PRKad has the low link latency to retrieve data. As an interdisciplinary application of P2P computing for bioinformatics, PRKad also provides good scalability for servicing a greater number of users in dynamic cloud environments.}, } @article {pmid23739354, year = {2013}, author = {Diaz, J and Arrizabalaga, S and Bustamante, P and Mesa, I and Añorga, J and Goya, J}, title = {Towards large-scale data analysis: challenges in the design of portable systems and use of Cloud computing.}, journal = {Studies in health technology and informatics}, volume = {189}, number = {}, pages = {38-43}, pmid = {23739354}, issn = {1879-8365}, mesh = {Database Management Systems/*instrumentation ; *Databases, Factual ; Equipment Design ; Equipment Failure Analysis ; Humans ; Information Storage and Retrieval/*methods ; Internet/*instrumentation ; Miniaturization ; Monitoring, Ambulatory/*instrumentation/methods ; Precision Medicine/*instrumentation/methods ; Telemedicine/*instrumentation/methods ; }, abstract = {Portable systems and global communications open a broad spectrum for new health applications. In the framework of electrophysiological applications, several challenges are faced when developing portable systems embedded in Cloud computing services. In order to facilitate new developers in this area based on our experience, five areas of interest are presented in this paper where strategies can be applied for improving the performance of portable systems: transducer and conditioning, processing, wireless communications, battery and power management. Likewise, for Cloud services, scalability, portability, privacy and security guidelines have been highlighted.}, } @article {pmid23715317, year = {2013}, author = {Noreña, T and Romero, E}, title = {[Medical image compression: a review].}, journal = {Biomedica : revista del Instituto Nacional de Salud}, volume = {33}, number = {1}, pages = {137-151}, doi = {10.1590/S0120-41572013000100017}, pmid = {23715317}, issn = {2590-7379}, mesh = {Algorithms ; Data Compression/*methods ; Diagnostic Imaging/*methods ; Humans ; Information Storage and Retrieval ; Visual Perception/physiology ; }, abstract = {Modern medicine is an increasingly complex activity , based on the evidence ; it consists of information from multiple sources : medical record text , sound recordings , images and videos generated by a large number of devices . Medical imaging is one of the most important sources of information since they offer comprehensive support of medical procedures for diagnosis and follow-up . However , the amount of information generated by image capturing gadgets quickly exceeds storage availability in radiology services , generating additional costs in devices with greater storage capacity . Besides , the current trend of developing applications in cloud computing has limitations, even though virtual storage is available from anywhere, connections are made through internet . In these scenarios the optimal use of information necessarily requires powerful compression algorithms adapted to medical activity needs . In this paper we present a review of compression techniques used for image storage , and a critical analysis of them from the point of view of their use in clinical settings.}, } @article {pmid23710465, year = {2013}, author = {Jo, H and Jeong, J and Lee, M and Choi, DH}, title = {Exploiting GPUs in virtual machine for BioCloud.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {939460}, pmid = {23710465}, issn = {2314-6141}, mesh = {Algorithms ; *Computers ; Humans ; *Software ; }, abstract = {Recently, biological applications start to be reimplemented into the applications which exploit many cores of GPUs for better computation performance. Therefore, by providing virtualized GPUs to VMs in cloud computing environment, many biological applications will willingly move into cloud environment to enhance their computation performance and utilize infinite cloud computing resource while reducing expenses for computations. In this paper, we propose a BioCloud system architecture that enables VMs to use GPUs in cloud environment. Because much of the previous research has focused on the sharing mechanism of GPUs among VMs, they cannot achieve enough performance for biological applications of which computation throughput is more crucial rather than sharing. The proposed system exploits the pass-through mode of PCI express (PCI-E) channel. By making each VM be able to access underlying GPUs directly, applications can show almost the same performance as when those are in native environment. In addition, our scheme multiplexes GPUs by using hot plug-in/out device features of PCI-E channel. By adding or removing GPUs in each VM in on-demand manner, VMs in the same physical host can time-share their GPUs. We implemented the proposed system using the Xen VMM and NVIDIA GPUs and showed that our prototype is highly effective for biological GPU applications in cloud environment.}, } @article {pmid23710461, year = {2013}, author = {Issa, SA and Kienzler, R and El-Kalioby, M and Tonellato, PJ and Wall, D and Bruggmann, R and Abouelhoda, M}, title = {Streaming support for data intensive cloud-based sequence analysis.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {791051}, pmid = {23710461}, issn = {2314-6141}, support = {R01 LM011566/LM/NLM NIH HHS/United States ; T15 LM007092/LM/NLM NIH HHS/United States ; }, mesh = {*Computational Biology ; Genomics/methods ; *High-Throughput Nucleotide Sequencing ; Internet ; Sequence Analysis, DNA ; *Software ; Workflow ; }, abstract = {Cloud computing provides a promising solution to the genomics data deluge problem resulting from the advent of next-generation sequencing (NGS) technology. Based on the concepts of "resources-on-demand" and "pay-as-you-go", scientists with no or limited infrastructure can have access to scalable and cost-effective computational resources. However, the large size of NGS data causes a significant data transfer latency from the client's site to the cloud, which presents a bottleneck for using cloud computing services. In this paper, we provide a streaming-based scheme to overcome this problem, where the NGS data is processed while being transferred to the cloud. Our scheme targets the wide class of NGS data analysis tasks, where the NGS sequences can be processed independently from one another. We also provide the elastream package that supports the use of this scheme with individual analysis programs or with workflow systems. Experiments presented in this paper show that our solution mitigates the effect of data transfer latency and saves both time and cost of computation.}, } @article {pmid23709190, year = {2013}, author = {Tan, CH and Teh, YW}, title = {Synthetic hardware performance analysis in virtualized cloud environment for healthcare organization.}, journal = {Journal of medical systems}, volume = {37}, number = {4}, pages = {9950}, doi = {10.1007/s10916-013-9950-7}, pmid = {23709190}, issn = {1573-689X}, mesh = {*Computer Security ; Computers ; *Internet ; Software ; }, abstract = {The main obstacles in mass adoption of cloud computing for database operations in healthcare organization are the data security and privacy issues. In this paper, it is shown that IT services particularly in hardware performance evaluation in virtual machine can be accomplished effectively without IT personnel gaining access to actual data for diagnostic and remediation purposes. The proposed mechanisms utilized the hypothetical data from TPC-H benchmark, to achieve 2 objectives. First, the underlying hardware performance and consistency is monitored via a control system, which is constructed using TPC-H queries. Second, the mechanism to construct stress-testing scenario is envisaged in the host, using a single or combination of TPC-H queries, so that the resource threshold point can be verified, if the virtual machine is still capable of serving critical transactions at this constraining juncture. This threshold point uses server run queue size as input parameter, and it serves 2 purposes: It provides the boundary threshold to the control system, so that periodic learning of the synthetic data sets for performance evaluation does not reach the host's constraint level. Secondly, when the host undergoes hardware change, stress-testing scenarios are simulated in the host by loading up to this resource threshold level, for subsequent response time verification from real and critical transactions.}, } @article {pmid23691504, year = {2013}, author = {De Paris, R and Frantz, FA and de Souza, ON and Ruiz, DD}, title = {wFReDoW: a cloud-based web environment to handle molecular docking simulations of a fully flexible receptor model.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {469363}, pmid = {23691504}, issn = {2314-6141}, mesh = {*Algorithms ; Bacterial Proteins/chemistry ; *Internet ; *Molecular Docking Simulation ; Mycobacterium tuberculosis/enzymology ; Oxidoreductases/chemistry ; Receptors, Cell Surface/*chemistry ; }, abstract = {Molecular docking simulations of fully flexible protein receptor (FFR) models are coming of age. In our studies, an FFR model is represented by a series of different conformations derived from a molecular dynamic simulation trajectory of the receptor. For each conformation in the FFR model, a docking simulation is executed and analyzed. An important challenge is to perform virtual screening of millions of ligands using an FFR model in a sequential mode since it can become computationally very demanding. In this paper, we propose a cloud-based web environment, called web Flexible Receptor Docking Workflow (wFReDoW), which reduces the CPU time in the molecular docking simulations of FFR models to small molecules. It is based on the new workflow data pattern called self-adaptive multiple instances (P-SaMIs) and on a middleware built on Amazon EC2 instances. P-SaMI reduces the number of molecular docking simulations while the middleware speeds up the docking experiments using a High Performance Computing (HPC) environment on the cloud. The experimental results show a reduction in the total elapsed time of docking experiments and the quality of the new reduced receptor models produced by discarding the nonpromising conformations from an FFR model ruled by the P-SaMI data pattern.}, } @article {pmid23678688, year = {2013}, author = {Rajendran, J}, title = {What CFOs should know before venturing into the cloud.}, journal = {Healthcare financial management : journal of the Healthcare Financial Management Association}, volume = {67}, number = {5}, pages = {40-43}, pmid = {23678688}, issn = {0735-0732}, mesh = {*Financial Management, Hospital ; *Health Knowledge, Attitudes, Practice ; *Hospital Administrators ; Information Storage and Retrieval/*methods ; *Internet ; United States ; }, abstract = {There are three major trends in the use of cloud-based services for healthcare IT: Cloud computing involves the hosting of health IT applications in a service provider cloud. Cloud storage is a data storage service that can involve, for example, long-term storage and archival of information such as clinical data, medical images, and scanned documents. Data center colocation involves rental of secure space in the cloud from a vendor, an approach that allows a hospital to share power capacity and proven security protocols, reducing costs.}, } @article {pmid23671843, year = {2013}, author = {Hung, CL and Lin, CY}, title = {Open reading frame phylogenetic analysis on the cloud.}, journal = {International journal of genomics}, volume = {2013}, number = {}, pages = {614923}, pmid = {23671843}, issn = {2314-436X}, abstract = {Phylogenetic analysis has become essential in researching the evolutionary relationships between viruses. These relationships are depicted on phylogenetic trees, in which viruses are grouped based on sequence similarity. Viral evolutionary relationships are identified from open reading frames rather than from complete sequences. Recently, cloud computing has become popular for developing internet-based bioinformatics tools. Biocloud is an efficient, scalable, and robust bioinformatics computing service. In this paper, we propose a cloud-based open reading frame phylogenetic analysis service. The proposed service integrates the Hadoop framework, virtualization technology, and phylogenetic analysis methods to provide a high-availability, large-scale bioservice. In a case study, we analyze the phylogenetic relationships among Norovirus. Evolutionary relationships are elucidated by aligning different open reading frame sequences. The proposed platform correctly identifies the evolutionary relationships between members of Norovirus.}, } @article {pmid23657089, year = {2013}, author = {Nagasaki, H and Mochizuki, T and Kodama, Y and Saruhashi, S and Morizaki, S and Sugawara, H and Ohyanagi, H and Kurata, N and Okubo, K and Takagi, T and Kaminuma, E and Nakamura, Y}, title = {DDBJ read annotation pipeline: a cloud computing-based pipeline for high-throughput analysis of next-generation sequencing data.}, journal = {DNA research : an international journal for rapid publication of reports on genes and genomes}, volume = {20}, number = {4}, pages = {383-390}, pmid = {23657089}, issn = {1756-1663}, mesh = {*Genomics ; High-Throughput Nucleotide Sequencing ; Internet ; Molecular Sequence Annotation/*methods ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {High-performance next-generation sequencing (NGS) technologies are advancing genomics and molecular biological research. However, the immense amount of sequence data requires computational skills and suitable hardware resources that are a challenge to molecular biologists. The DNA Data Bank of Japan (DDBJ) of the National Institute of Genetics (NIG) has initiated a cloud computing-based analytical pipeline, the DDBJ Read Annotation Pipeline (DDBJ Pipeline), for a high-throughput annotation of NGS reads. The DDBJ Pipeline offers a user-friendly graphical web interface and processes massive NGS datasets using decentralized processing by NIG supercomputers currently free of charge. The proposed pipeline consists of two analysis components: basic analysis for reference genome mapping and de novo assembly and subsequent high-level analysis of structural and functional annotations. Users may smoothly switch between the two components in the pipeline, facilitating web-based operations on a supercomputer for high-throughput data analysis. Moreover, public NGS reads of the DDBJ Sequence Read Archive located on the same supercomputer can be imported into the pipeline through the input of only an accession number. This proposed pipeline will facilitate research by utilizing unified analytical workflows applied to the NGS data. The DDBJ Pipeline is accessible at http://p.ddbj.nig.ac.jp/.}, } @article {pmid23653898, year = {2013}, author = {Lee, ST and Lin, CY and Hung, CL}, title = {GPU-based cloud service for Smith-Waterman algorithm using frequency distance filtration scheme.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {721738}, pmid = {23653898}, issn = {2314-6141}, mesh = {*Algorithms ; *Computer Graphics ; Databases, Protein ; Humans ; *Internet ; Time Factors ; }, abstract = {As the conventional means of analyzing the similarity between a query sequence and database sequences, the Smith-Waterman algorithm is feasible for a database search owing to its high sensitivity. However, this algorithm is still quite time consuming. CUDA programming can improve computations efficiently by using the computational power of massive computing hardware as graphics processing units (GPUs). This work presents a novel Smith-Waterman algorithm with a frequency-based filtration method on GPUs rather than merely accelerating the comparisons yet expending computational resources to handle such unnecessary comparisons. A user friendly interface is also designed for potential cloud server applications with GPUs. Additionally, two data sets, H1N1 protein sequences (query sequence set) and human protein database (database set), are selected, followed by a comparison of CUDA-SW and CUDA-SW with the filtration method, referred to herein as CUDA-SWf. Experimental results indicate that reducing unnecessary sequence alignments can improve the computational time by up to 41%. Importantly, by using CUDA-SWf as a cloud service, this application can be accessed from any computing environment of a device with an Internet connection without time constraints.}, } @article {pmid23621068, year = {2013}, author = {Waxer, N and Ninan, D and Ma, A and Dominguez, N}, title = {How cloud computing and social media are changing the face of health care.}, journal = {Physician executive}, volume = {39}, number = {2}, pages = {58-60, 62}, pmid = {23621068}, issn = {0898-2759}, mesh = {*Delivery of Health Care ; *Information Storage and Retrieval ; *Internet ; *Social Media ; United States ; }, } @article {pmid23599560, year = {2012}, author = {Kharat, AT and Safvi, A and Thind, S and Singh, A}, title = {Cloud Computing for radiologists.}, journal = {The Indian journal of radiology & imaging}, volume = {22}, number = {3}, pages = {150-154}, doi = {10.4103/0971-3026.107166}, pmid = {23599560}, issn = {0971-3026}, abstract = {Cloud computing is a concept wherein a computer grid is created using the Internet with the sole purpose of utilizing shared resources such as computer software, hardware, on a pay-per-use model. Using Cloud computing, radiology users can efficiently manage multimodality imaging units by using the latest software and hardware without paying huge upfront costs. Cloud computing systems usually work on public, private, hybrid, or community models. Using the various components of a Cloud, such as applications, client, infrastructure, storage, services, and processing power, Cloud computing can help imaging units rapidly scale and descale operations and avoid huge spending on maintenance of costly applications and storage. Cloud computing allows flexibility in imaging. It sets free radiology from the confines of a hospital and creates a virtual mobile office. The downsides to Cloud computing involve security and privacy issues which need to be addressed to ensure the success of Cloud computing in the future.}, } @article {pmid23586054, year = {2013}, author = {Chen, J and Qian, F and Yan, W and Shen, B}, title = {Translational biomedical informatics in the cloud: present and future.}, journal = {BioMed research international}, volume = {2013}, number = {}, pages = {658925}, pmid = {23586054}, issn = {2314-6141}, mesh = {Animals ; *Biomedical Research ; Computational Biology ; High-Throughput Nucleotide Sequencing ; Humans ; *Information Storage and Retrieval ; Internet ; Medical Informatics ; *Software ; }, abstract = {Next generation sequencing and other high-throughput experimental techniques of recent decades have driven the exponential growth in publicly available molecular and clinical data. This information explosion has prepared the ground for the development of translational bioinformatics. The scale and dimensionality of data, however, pose obvious challenges in data mining, storage, and integration. In this paper we demonstrated the utility and promise of cloud computing for tackling the big data problems. We also outline our vision that cloud computing could be an enabling tool to facilitate translational bioinformatics research.}, } @article {pmid23548097, year = {2013}, author = {Williams, DR and Tang, Y}, title = {Impact of office productivity cloud computing on energy consumption and greenhouse gas emissions.}, journal = {Environmental science & technology}, volume = {47}, number = {9}, pages = {4333-4340}, doi = {10.1021/es3041362}, pmid = {23548097}, issn = {1520-5851}, mesh = {*Conservation of Energy Resources ; *Efficiency ; *Efficiency, Organizational ; *Gases ; *Greenhouse Effect ; Models, Theoretical ; }, abstract = {Cloud computing is usually regarded as being energy efficient and thus emitting less greenhouse gases (GHG) than traditional forms of computing. When the energy consumption of Microsoft's cloud computing Office 365 (O365) and traditional Office 2010 (O2010) software suites were tested and modeled, some cloud services were found to consume more energy than the traditional form. The developed model in this research took into consideration the energy consumption at the three main stages of data transmission; data center, network, and end user device. Comparable products from each suite were selected and activities were defined for each product to represent a different computing type. Microsoft provided highly confidential data for the data center stage, while the networking and user device stages were measured directly. A new measurement and software apportionment approach was defined and utilized allowing the power consumption of cloud services to be directly measured for the user device stage. Results indicated that cloud computing is more energy efficient for Excel and Outlook which consumed less energy and emitted less GHG than the standalone counterpart. The power consumption of the cloud based Outlook (8%) and Excel (17%) was lower than their traditional counterparts. However, the power consumption of the cloud version of Word was 17% higher than its traditional equivalent. A third mixed access method was also measured for Word which emitted 5% more GHG than the traditional version. It is evident that cloud computing may not provide a unified way forward to reduce energy consumption and GHG. Direct conversion from the standalone package into the cloud provision platform can now consider energy and GHG emissions at the software development and cloud service design stage using the methods described in this research.}, } @article {pmid23543352, year = {2013}, author = {Watson-Haigh, NS and Shang, CA and Haimel, M and Kostadima, M and Loos, R and Deshpande, N and Duesing, K and Li, X and McGrath, A and McWilliam, S and Michnowicz, S and Moolhuijzen, P and Quenette, S and Revote, JN and Tyagi, S and Schneider, MV}, title = {Next-generation sequencing: a challenge to meet the increasing demand for training workshops in Australia.}, journal = {Briefings in bioinformatics}, volume = {14}, number = {5}, pages = {563-574}, pmid = {23543352}, issn = {1477-4054}, mesh = {Australia ; Computational Biology/*education ; Computer-Assisted Instruction/methods ; Cooperative Behavior ; Curriculum ; High-Throughput Nucleotide Sequencing/*statistics & numerical data ; Teaching ; }, abstract = {The widespread adoption of high-throughput next-generation sequencing (NGS) technology among the Australian life science research community is highlighting an urgent need to up-skill biologists in tools required for handling and analysing their NGS data. There is currently a shortage of cutting-edge bioinformatics training courses in Australia as a consequence of a scarcity of skilled trainers with time and funding to develop and deliver training courses. To address this, a consortium of Australian research organizations, including Bioplatforms Australia, the Commonwealth Scientific and Industrial Research Organisation and the Australian Bioinformatics Network, have been collaborating with EMBL-EBI training team. A group of Australian bioinformaticians attended the train-the-trainer workshop to improve training skills in developing and delivering bioinformatics workshop curriculum. A 2-day NGS workshop was jointly developed to provide hands-on knowledge and understanding of typical NGS data analysis workflows. The road show-style workshop was successfully delivered at five geographically distant venues in Australia using the newly established Australian NeCTAR Research Cloud. We highlight the challenges we had to overcome at different stages from design to delivery, including the establishment of an Australian bioinformatics training network and the computing infrastructure and resource development. A virtual machine image, workshop materials and scripts for configuring a machine with workshop contents have all been made available under a Creative Commons Attribution 3.0 Unported License. This means participants continue to have convenient access to an environment they had become familiar and bioinformatics trainers are able to access and reuse these resources.}, } @article {pmid23522030, year = {2013}, author = {Ji, J and Ling, J and Jiang, H and Wen, Q and Whitin, JC and Tian, L and Cohen, HJ and Ling, XB}, title = {Cloud-based solution to identify statistically significant MS peaks differentiating sample categories.}, journal = {BMC research notes}, volume = {6}, number = {}, pages = {109}, pmid = {23522030}, issn = {1756-0500}, mesh = {Algorithms ; Area Under Curve ; Biomarkers/metabolism ; Computational Biology/*methods ; Data Interpretation, Statistical ; Humans ; Internet ; Mass Spectrometry/*methods ; Proteome ; Proteomics/*methods ; Reproducibility of Results ; Statistics as Topic ; }, abstract = {BACKGROUND: Mass spectrometry (MS) has evolved to become the primary high throughput tool for proteomics based biomarker discovery. Until now, multiple challenges in protein MS data analysis remain: large-scale and complex data set management; MS peak identification, indexing; and high dimensional peak differential analysis with the concurrent statistical tests based false discovery rate (FDR). "Turnkey" solutions are needed for biomarker investigations to rapidly process MS data sets to identify statistically significant peaks for subsequent validation.

FINDINGS: Here we present an efficient and effective solution, which provides experimental biologists easy access to "cloud" computing capabilities to analyze MS data. The web portal can be accessed at http://transmed.stanford.edu/ssa/.

CONCLUSIONS: Presented web application supplies large scale MS data online uploading and analysis with a simple user interface. This bioinformatic tool will facilitate the discovery of the potential protein biomarkers using MS.}, } @article {pmid23514937, year = {2013}, author = {Miras, H and Jiménez, R and Miras, C and Gomà, C}, title = {CloudMC: a cloud computing application for Monte Carlo simulation.}, journal = {Physics in medicine and biology}, volume = {58}, number = {8}, pages = {N125-33}, doi = {10.1088/0031-9155/58/8/N125}, pmid = {23514937}, issn = {1361-6560}, mesh = {Database Management Systems ; *Internet ; *Monte Carlo Method ; Radiotherapy Planning, Computer-Assisted ; Software ; Time Factors ; }, abstract = {This work presents CloudMC, a cloud computing application-developed in Windows Azure®, the platform of the Microsoft® cloud-for the parallelization of Monte Carlo simulations in a dynamic virtual cluster. CloudMC is a web application designed to be independent of the Monte Carlo code in which the simulations are based-the simulations just need to be of the form: input files → executable → output files. To study the performance of CloudMC in Windows Azure®, Monte Carlo simulations with penelope were performed on different instance (virtual machine) sizes, and for different number of instances. The instance size was found to have no effect on the simulation runtime. It was also found that the decrease in time with the number of instances followed Amdahl's law, with a slight deviation due to the increase in the fraction of non-parallelizable time with increasing number of instances. A simulation that would have required 30 h of CPU on a single instance was completed in 48.6 min when executed on 64 instances in parallel (speedup of 37 ×). Furthermore, the use of cloud computing for parallel computing offers some advantages over conventional clusters: high accessibility, scalability and pay per usage. Therefore, it is strongly believed that cloud computing will play an important role in making Monte Carlo dose calculation a reality in future clinical practice.}, } @article {pmid23496912, year = {2013}, author = {Vilaplana, J and Solsona, F and Abella, and Filgueira, R and Rius, J}, title = {The cloud paradigm applied to e-Health.}, journal = {BMC medical informatics and decision making}, volume = {13}, number = {}, pages = {35}, pmid = {23496912}, issn = {1472-6947}, mesh = {Computer Communication Networks ; Humans ; Information Storage and Retrieval ; *Internet ; Program Development ; Software ; Telemedicine/*organization & administration ; User-Computer Interface ; }, abstract = {BACKGROUND: Cloud computing is a new paradigm that is changing how enterprises, institutions and people understand, perceive and use current software systems. With this paradigm, the organizations have no need to maintain their own servers, nor host their own software. Instead, everything is moved to the cloud and provided on demand, saving energy, physical space and technical staff. Cloud-based system architectures provide many advantages in terms of scalability, maintainability and massive data processing.

METHODS: We present the design of an e-health cloud system, modelled by an M/M/m queue with QoS capabilities, i.e. maximum waiting time of requests.

RESULTS: Detailed results for the model formed by a Jackson network of two M/M/m queues from the queueing theory perspective are presented. These results show a significant performance improvement when the number of servers increases.

CONCLUSIONS: Platform scalability becomes a critical issue since we aim to provide the system with high Quality of Service (QoS). In this paper we define an architecture capable of adapting itself to different diseases and growing numbers of patients. This platform could be applied to the medical field to greatly enhance the results of those therapies that have an important psychological component, such as addictions and chronic diseases.}, } @article {pmid23485880, year = {2013}, author = {Regola, N and Chawla, NV}, title = {Storing and using health data in a virtual private cloud.}, journal = {Journal of medical Internet research}, volume = {15}, number = {3}, pages = {e63}, pmid = {23485880}, issn = {1438-8871}, mesh = {Computer Security ; Health Insurance Portability and Accountability Act ; *Information Storage and Retrieval ; *Medical Records Systems, Computerized ; Privacy ; Software ; United States ; }, abstract = {Electronic health records are being adopted at a rapid rate due to increased funding from the US federal government. Health data provide the opportunity to identify possible improvements in health care delivery by applying data mining and statistical methods to the data and will also enable a wide variety of new applications that will be meaningful to patients and medical professionals. Researchers are often granted access to health care data to assist in the data mining process, but HIPAA regulations mandate comprehensive safeguards to protect the data. Often universities (and presumably other research organizations) have an enterprise information technology infrastructure and a research infrastructure. Unfortunately, both of these infrastructures are generally not appropriate for sensitive research data such as HIPAA, as they require special accommodations on the part of the enterprise information technology (or increased security on the part of the research computing environment). Cloud computing, which is a concept that allows organizations to build complex infrastructures on leased resources, is rapidly evolving to the point that it is possible to build sophisticated network architectures with advanced security capabilities. We present a prototype infrastructure in Amazon's Virtual Private Cloud to allow researchers and practitioners to utilize the data in a HIPAA-compliant environment.}, } @article {pmid23482117, year = {2013}, author = {Rofoee, BR and Zervas, G and Yan, Y and Simeonidou, D and Bernini, G and Carrozzo, G and Ciulli, N and Levins, J and Basham, M and Dunne, J and Georgiades, M and Belovidov, A and Andreou, L and Sanchez, D and Aracil, J and Lopez, V and Fernández-Palacios, JP}, title = {Demonstration of low latency Intra/Inter Data-Centre heterogeneous optical sub-wavelength network using extended GMPLS-PCE control-plane.}, journal = {Optics express}, volume = {21}, number = {5}, pages = {5463-5474}, doi = {10.1364/OE.21.005463}, pmid = {23482117}, issn = {1094-4087}, abstract = {This paper reports on the first user/application-driven multi-technology optical sub-wavelength network for intra/inter Data-Centre (DC) communications. Two DCs each with distinct sub-wavelength switching technologies, frame based synchronous TSON and packet based asynchronous OPST are interconnected by a WSON inter-DC communication. The intra/inter DC testbed demonstrates ultra-low latency (packet-delay <270 µs and packet-delay-variation (PDV)<10 µs) flexible data-rate traffic transfer by point-to-point, point-to-multipoint, and multipoint-to-(multi)point connectivity, highly suitable for cloud based applications and high performance computing (HPC). The extended GMPLS-PCE-SLAE based control-plane enables innovative application-driven end-to-end sub-wavelength path setup and resource reservation across the multi technology data-plane, which has been assessed for as many as 25 concurrent requests.}, } @article {pmid23480664, year = {2013}, author = {Patiny, L and Borel, A}, title = {ChemCalc: a building block for tomorrow's chemical infrastructure.}, journal = {Journal of chemical information and modeling}, volume = {53}, number = {5}, pages = {1223-1228}, doi = {10.1021/ci300563h}, pmid = {23480664}, issn = {1549-960X}, abstract = {Web services, as an aspect of cloud computing, are becoming an important part of the general IT infrastructure, and scientific computing is no exception to this trend. We propose a simple approach to develop chemical Web services, through which servers could expose the essential data manipulation functionality that students and researchers need for chemical calculations. These services return their results as JSON (JavaScript Object Notation) objects, which facilitates their use for Web applications. The ChemCalc project http://www.chemcalc.org demonstrates this approach: we present three Web services related with mass spectrometry, namely isotopic distribution simulation, peptide fragmentation simulation, and molecular formula determination. We also developed a complete Web application based on these three Web services, taking advantage of modern HTML5 and JavaScript libraries (ChemDoodle and jQuery).}, } @article {pmid23473595, year = {2013}, author = {Johnson, DE}, title = {Fusion of nonclinical and clinical data to predict human drug safety.}, journal = {Expert review of clinical pharmacology}, volume = {6}, number = {2}, pages = {185-195}, doi = {10.1586/ecp.13.3}, pmid = {23473595}, issn = {1751-2441}, mesh = {Adverse Drug Reaction Reporting Systems/*statistics & numerical data ; Animals ; Clinical Trials as Topic ; *Computer Simulation ; Drug Discovery/*methods ; *Drug-Related Side Effects and Adverse Reactions ; Humans ; *Pharmacovigilance ; }, abstract = {Adverse drug reactions continue to be a major cause of morbidity in both patients receiving therapeutics and in drug R&D programs. Predicting and possibly eliminating these adverse events remains a high priority in industry, government agencies and healthcare systems. With small molecule candidates, the fusion of nonclinical and clinical data is essential in establishing an overall system that creates a true translational science approach. Several new advances are taking place that attempt to create a 'patient context' mechanism early in drug research and development and ultimately into the marketplace. This 'life-cycle' approach has as its core the development of human-oriented, nonclinical end points and the incorporation of clinical knowledge at the drug design stage. The next 5 years should witness an explosion of what the author views as druggable and safe chemical space, pharmacosafety molecular targets and the most important aspect, an understanding of unique susceptibilities in patients developing adverse drug reactions. Our current knowledge of clinical safety relies completely on pharmacovigilance data from approved and marketed drugs, with a few exceptions of drugs failing in clinical trials. Massive data repositories now and soon to be available via cloud computing should stimulate a major effort in expanding our view of clinical drug safety and its incorporation into early drug research and development.}, } @article {pmid23439071, year = {2013}, author = {Singh, S and Bansal, M and Maheshwari, P and Adams, D and Sengupta, SP and Price, R and Dantin, L and Smith, M and Kasliwal, RR and Pellikka, PA and Thomas, JD and Narula, J and Sengupta, PP and , }, title = {American Society of Echocardiography: Remote Echocardiography with Web-Based Assessments for Referrals at a Distance (ASE-REWARD) Study.}, journal = {Journal of the American Society of Echocardiography : official publication of the American Society of Echocardiography}, volume = {26}, number = {3}, pages = {221-233}, doi = {10.1016/j.echo.2012.12.012}, pmid = {23439071}, issn = {1097-6795}, mesh = {Cardiovascular Diseases/*diagnostic imaging/epidemiology ; Echocardiography/*instrumentation ; Feasibility Studies ; Female ; Humans ; India/epidemiology ; *Internet ; Male ; *Remote Consultation ; Societies, Medical ; }, abstract = {BACKGROUND: Developing countries face the dual burden of high rates of cardiovascular disease and barriers in accessing diagnostic and referral programs. The aim of this study was to test the feasibility of performing focused echocardiographic studies with long-distance Web-based assessments of recorded images for facilitating care of patients with cardiovascular disease.

METHODS: Subjects were recruited using newspaper advertisements and were prescreened by paramedical workers during a community event in rural north India. Focused echocardiographic studies were performed by nine sonographers using pocket-sized or handheld devices; the scans were uploaded on a Web-based viewing system for remote worldwide interpretation by 75 physicians.

RESULTS: A total of 1,023 studies were interpreted at a median time of 11:44 hours. Of the 1,021 interpretable scans, 207 (20.3%) had minor and 170 (16.7%) had major abnormalities. Left ventricular systolic dysfunction was the most frequent major abnormality (45.9%), followed by valvular (32.9%) and congenital (13.5%) defects. There was excellent agreement in assessing valvular lesions (κ = 0.85), whereas the on-site readings were frequently modified by expert reviewers for left ventricular function and hypertrophy (κ = 0.40 and 0.29, respectively). Six-month telephone follow-up in 71 subjects (41%) with major abnormalities revealed that 57 (80.3%) had improvement in symptoms, 11 (15.5%) experienced worsening symptoms, and three died.

CONCLUSIONS: This study demonstrates the feasibility of performing sonographer-driven focused echocardiographic studies for identifying the burden of structural heart disease in a community. Remote assessment of echocardiograms using a cloud-computing environment may be helpful in expediting care in remote areas.}, } @article {pmid23424149, year = {2013}, author = {Kasson, PM}, title = {Computational biology in the cloud: methods and new insights from computing at scale.}, journal = {Pacific Symposium on Biocomputing. Pacific Symposium on Biocomputing}, volume = {}, number = {}, pages = {451-453}, pmid = {23424149}, issn = {2335-6936}, mesh = {Computational Biology/*methods ; Databases, Factual/statistics & numerical data ; Humans ; Information Dissemination ; Information Storage and Retrieval/*methods ; User-Computer Interface ; }, abstract = {The past few years have seen both explosions in the size of biological data sets and the proliferation of new, highly flexible on-demand computing capabilities. The sheer amount of information available from genomic and metagenomic sequencing, high-throughput proteomics, experimental and simulation datasets on molecular structure and dynamics affords an opportunity for greatly expanded insight, but it creates new challenges of scale for computation, storage, and interpretation of petascale data. Cloud computing resources have the potential to help solve these problems by offering a utility model of computing and storage: near-unlimited capacity, the ability to burst usage, and cheap and flexible payment models. Effective use of cloud computing on large biological datasets requires dealing with non-trivial problems of scale and robustness, since performance-limiting factors can change substantially when a dataset grows by a factor of 10,000 or more. New computing paradigms are thus often needed. The use of cloud platforms also creates new opportunities to share data, reduce duplication, and to provide easy reproducibility by making the datasets and computational methods easily available.}, } @article {pmid23407358, year = {2013}, author = {Pronk, S and Páll, S and Schulz, R and Larsson, P and Bjelkmar, P and Apostolov, R and Shirts, MR and Smith, JC and Kasson, PM and van der Spoel, D and Hess, B and Lindahl, E}, title = {GROMACS 4.5: a high-throughput and highly parallel open source molecular simulation toolkit.}, journal = {Bioinformatics (Oxford, England)}, volume = {29}, number = {7}, pages = {845-854}, pmid = {23407358}, issn = {1367-4811}, support = {R01 GM098304/GM/NIGMS NIH HHS/United States ; R01GM098304/GM/NIGMS NIH HHS/United States ; }, mesh = {Algorithms ; *Molecular Dynamics Simulation ; Proteins/chemistry ; *Software ; }, abstract = {MOTIVATION: Molecular simulation has historically been a low-throughput technique, but faster computers and increasing amounts of genomic and structural data are changing this by enabling large-scale automated simulation of, for instance, many conformers or mutants of biomolecules with or without a range of ligands. At the same time, advances in performance and scaling now make it possible to model complex biomolecular interaction and function in a manner directly testable by experiment. These applications share a need for fast and efficient software that can be deployed on massive scale in clusters, web servers, distributed computing or cloud resources.

RESULTS: Here, we present a range of new simulation algorithms and features developed during the past 4 years, leading up to the GROMACS 4.5 software package. The software now automatically handles wide classes of biomolecules, such as proteins, nucleic acids and lipids, and comes with all commonly used force fields for these molecules built-in. GROMACS supports several implicit solvent models, as well as new free-energy algorithms, and the software now uses multithreading for efficient parallelization even on low-end systems, including windows-based workstations. Together with hand-tuned assembly kernels and state-of-the-art parallelization, this provides extremely high performance and cost efficiency for high-throughput as well as massively parallel simulations.

AVAILABILITY: GROMACS is an open source and free software available from http://www.gromacs.org.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid23396756, year = {2014}, author = {Zou, Q and Li, XB and Jiang, WR and Lin, ZY and Li, GL and Chen, K}, title = {Survey of MapReduce frame operation in bioinformatics.}, journal = {Briefings in bioinformatics}, volume = {15}, number = {4}, pages = {637-647}, doi = {10.1093/bib/bbs088}, pmid = {23396756}, issn = {1477-4054}, mesh = {*Computational Biology ; Data Collection ; Programming Languages ; }, abstract = {Bioinformatics is challenged by the fact that traditional analysis tools have difficulty in processing large-scale data from high-throughput sequencing. The open source Apache Hadoop project, which adopts the MapReduce framework and a distributed file system, has recently given bioinformatics researchers an opportunity to achieve scalable, efficient and reliable computing performance on Linux clusters and on cloud computing services. In this article, we present MapReduce frame-based applications that can be employed in the next-generation sequencing and other biological domains. In addition, we discuss the challenges faced by this field as well as the future works on parallel computing in bioinformatics.}, } @article {pmid23366803, year = {2012}, author = {Yoshida, H and Wu, Y and Cai, W and Brett, B}, title = {Scalable, high-performance 3D imaging software platform: system architecture and application to virtual colonoscopy.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2012}, number = {}, pages = {3994-3997}, pmid = {23366803}, issn = {2694-0604}, support = {R01 CA166816/CA/NCI NIH HHS/United States ; R03 CA156664/CA/NCI NIH HHS/United States ; R01CA166816/CA/NCI NIH HHS/United States ; R01 CA131718/CA/NCI NIH HHS/United States ; R01 CA095279/CA/NCI NIH HHS/United States ; R03CA156664/CA/NCI NIH HHS/United States ; R01CA095279/CA/NCI NIH HHS/United States ; }, mesh = {Colon/pathology ; Colonography, Computed Tomographic/*methods/*standards ; Humans ; Imaging, Three-Dimensional/*methods ; Software/*standards ; Time Factors ; }, abstract = {One of the key challenges in three-dimensional (3D) medical imaging is to enable the fast turn-around time, which is often required for interactive or real-time response. This inevitably requires not only high computational power but also high memory bandwidth due to the massive amount of data that need to be processed. In this work, we have developed a software platform that is designed to support high-performance 3D medical image processing for a wide range of applications using increasingly available and affordable commodity computing systems: multi-core, clusters, and cloud computing systems. To achieve scalable, high-performance computing, our platform (1) employs size-adaptive, distributable block volumes as a core data structure for efficient parallelization of a wide range of 3D image processing algorithms; (2) supports task scheduling for efficient load distribution and balancing; and (3) consists of a layered parallel software libraries that allow a wide range of medical applications to share the same functionalities. We evaluated the performance of our platform by applying it to an electronic cleansing system in virtual colonoscopy, with initial experimental results showing a 10 times performance improvement on an 8-core workstation over the original sequential implementation of the system.}, } @article {pmid23361087, year = {2013}, author = {Hu, YS and Nan, X and Sengupta, P and Lippincott-Schwartz, J and Cang, H}, title = {Accelerating 3B single-molecule super-resolution microscopy with cloud computing.}, journal = {Nature methods}, volume = {10}, number = {2}, pages = {96-97}, pmid = {23361087}, issn = {1548-7105}, support = {Z99 HD999999/ImNIH/Intramural NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; *Computers ; Fluorescent Dyes/*chemistry ; Microscopy, Fluorescence ; *Nanotechnology ; }, } @article {pmid23346476, year = {2012}, author = {Yoo, S and Kim, S and Kim, T and Kim, JS and Baek, RM and Suh, CS and Chung, CY and Hwang, H}, title = {Implementation Issues of Virtual Desktop Infrastructure and Its Case Study for a Physician's Round at Seoul National University Bundang Hospital.}, journal = {Healthcare informatics research}, volume = {18}, number = {4}, pages = {259-565}, pmid = {23346476}, issn = {2093-3681}, abstract = {OBJECTIVES: The cloud computing-based virtual desktop infrastructure (VDI) allows access to computing environments with no limitations in terms of time or place such that it can permit the rapid establishment of a mobile hospital environment. The objective of this study was to investigate the empirical issues to be considered when establishing a virtual mobile environment using VDI technology in a hospital setting and to examine the utility of the technology with an Apple iPad during a physician's rounds as a case study.

METHODS: Empirical implementation issues were derived from a 910-bed tertiary national university hospital that recently launched a VDI system. During the physicians' rounds, we surveyed patient satisfaction levels with the VDI-based mobile consultation service with the iPad and the relationship between these levels of satisfaction and hospital revisits, hospital recommendations, and the hospital brand image. Thirty-five inpatients (including their next-of-kin) and seven physicians participated in the survey.

RESULTS: Implementation issues pertaining to the VDI system arose with regard to the highly availability system architecture, wireless network infrastructure, and screen resolution of the system. Other issues were related to privacy and security, mobile device management, and user education. When the system was used in rounds, patients and their next-of-kin expressed high satisfaction levels, and a positive relationship was noted as regards patients' decisions to revisit the hospital and whether the use of the VDI system improved the brand image of the hospital.

CONCLUSIONS: Mobile hospital environments have the potential to benefit both physicians and patients. The issues related to the implementation of VDI system discussed here should be examined in advance for its successful adoption and implementation.}, } @article {pmid23335858, year = {2011}, author = {Petrou, S and Sloan, TM and Mewissen, M and Forster, T and Piotrowski, M and Dobrzelecki, B and Ghazal, P and Trew, A and Hill, J}, title = {Optimization of a parallel permutation testing function for the SPRINT R package.}, journal = {Concurrency and computation : practice & experience}, volume = {23}, number = {17}, pages = {2258-2268}, pmid = {23335858}, issn = {1532-0626}, support = {086696//Wellcome Trust/United Kingdom ; }, abstract = {The statistical language R and its Bioconductor package are favoured by many biostatisticians for processing microarray data. The amount of data produced by some analyses has reached the limits of many common bioinformatics computing infrastructures. High Performance Computing systems offer a solution to this issue. The Simple Parallel R Interface (SPRINT) is a package that provides biostatisticians with easy access to High Performance Computing systems and allows the addition of parallelized functions to R. Previous work has established that the SPRINT implementation of an R permutation testing function has close to optimal scaling on up to 512 processors on a supercomputer. Access to supercomputers, however, is not always possible, and so the work presented here compares the performance of the SPRINT implementation on a supercomputer with benchmarks on a range of platforms including cloud resources and a common desktop machine with multiprocessing capabilities. Copyright © 2011 John Wiley & Sons, Ltd.}, } @article {pmid23321962, year = {2013}, author = {de la Torre-Díez, I and González, S and López-Coronado, M}, title = {EHR systems in the Spanish Public Health National System: the lack of interoperability between primary and specialty care.}, journal = {Journal of medical systems}, volume = {37}, number = {1}, pages = {9914}, pmid = {23321962}, issn = {1573-689X}, mesh = {Computer Communication Networks/*organization & administration ; Electronic Health Records/*organization & administration ; Humans ; Medicine ; Primary Health Care/*organization & administration ; Spain ; State Medicine/organization & administration ; *Systems Integration ; }, abstract = {One of the problems of the Spanish Public Health National System is the lack of interoperability in the implemented Electronic Health Records (EHRs) systems in primary and specialty care. There is a deficiency in the electronic health systems that store the data of primary care patients, so one of the basic problems that prevent that every hospital and health center working on the same method is that deficiency. In this paper we research on this problem and to give expression to a series of solutions to it. Bibliographic material in this work has been obtained mainly from MEDLINE source. Additionally, due to the lack of information and privacy about the different EHRs systems, we have resorted to making direct contact with the organizations that have implemented those systems and technological providers. Two solutions have been propounded given several aspects for a feasibility study. The first solution is based upon in the execution of backups in different EHRs databases, which implies a huge economical and infrastructure development. The second of these solutions so that due to the creation of protocols by means of Cloud Computing Technologies. It is crucial the need to reach a homogeneity concerning to the storage of patients clinical data. On the results achieved we can emphasize that maybe the main problems are not the economical handicaps or the large technological development needed, but, as for Health each Region manages its own competences, each one governs with independent policies and decisions.}, } @article {pmid23318694, year = {2013}, author = {Eugster, MJ and Schmid, M and Binder, H and Schmidberger, M}, title = {Grid and cloud computing methods in biomedical research.}, journal = {Methods of information in medicine}, volume = {52}, number = {1}, pages = {62-64}, pmid = {23318694}, issn = {2511-705X}, mesh = {Biomedical Research ; Biometry ; Computer Graphics ; Database Management Systems ; Epidemiology ; Germany ; Humans ; Information Storage and Retrieval/*methods ; Internet ; Medical Informatics/*methods ; Medical Informatics Computing ; Natural Language Processing ; }, } @article {pmid23305951, year = {2013}, author = {Muth, T and Peters, J and Blackburn, J and Rapp, E and Martens, L}, title = {ProteoCloud: a full-featured open source proteomics cloud computing pipeline.}, journal = {Journal of proteomics}, volume = {88}, number = {}, pages = {104-108}, doi = {10.1016/j.jprot.2012.12.026}, pmid = {23305951}, issn = {1876-7737}, mesh = {*Internet ; Proteomics/*methods ; *Software ; }, abstract = {We here present the ProteoCloud pipeline, a freely available, full-featured cloud-based platform to perform computationally intensive, exhaustive searches in a cloud environment using five different peptide identification algorithms. ProteoCloud is entirely open source, and is built around an easy to use and cross-platform software client with a rich graphical user interface. This client allows full control of the number of cloud instances to initiate and of the spectra to assign for identification. It also enables the user to track progress, and to visualize and interpret the results in detail. Source code, binaries and documentation are all available at http://proteocloud.googlecode.com.}, } @article {pmid23282094, year = {2012}, author = {Chang, YJ and Chen, CC and Chen, CL and Ho, JM}, title = {A de novo next generation genomic sequence assembler based on string graph and MapReduce cloud computing framework.}, journal = {BMC genomics}, volume = {13 Suppl 7}, number = {Suppl 7}, pages = {S28}, pmid = {23282094}, issn = {1471-2164}, mesh = {Algorithms ; Databases, Factual ; *High-Throughput Nucleotide Sequencing ; Information Storage and Retrieval ; Internet ; *Software ; User-Computer Interface ; }, abstract = {BACKGROUND: State-of-the-art high-throughput sequencers, e.g., the Illumina HiSeq series, generate sequencing reads that are longer than 150 bp up to a total of 600 Gbp of data per run. The high-throughput sequencers generate lengthier reads with greater sequencing depth than those generated by previous technologies. Two major challenges exist in using the high-throughput technology for de novo assembly of genomes. First, the amount of physical memory may be insufficient to store the data structure of the assembly algorithm, even for high-end multicore processors. Moreover, the graph-theoretical model used to capture intersection relationships of the reads may contain structural defects that are not well managed by existing assembly algorithms.

RESULTS: We developed a distributed genome assembler based on string graphs and MapReduce framework, known as the CloudBrush. The assembler includes a novel edge-adjustment algorithm to detect structural defects by examining the neighboring reads of a specific read for sequencing errors and adjusting the edges of the string graph, if necessary. CloudBrush is evaluated against GAGE benchmarks to compare its assembly quality with the other assemblers. The results show that our assemblies have a moderate N50, a low misassembly rate of misjoins, and indels of > 5 bp. In addition, we have introduced two measures, known as precision and recall, to address the issues of faithfully aligned contigs to target genomes. Compared with the assembly tools used in the GAGE benchmarks, CloudBrush is shown to produce contigs with high precision and recall. We also verified the effectiveness of the edge-adjustment algorithm using simulated datasets and ran CloudBrush on a nematode dataset using a commercial cloud. CloudBrush assembler is available at https://github.com/ice91/CloudBrush.}, } @article {pmid23281941, year = {2012}, author = {El-Kalioby, M and Abouelhoda, M and Krüger, J and Giegerich, R and Sczyrba, A and Wall, DP and Tonellato, P}, title = {Personalized cloud-based bioinformatics services for research and education: use cases and the elasticHPC package.}, journal = {BMC bioinformatics}, volume = {13 Suppl 17}, number = {Suppl 17}, pages = {S22}, pmid = {23281941}, issn = {1471-2105}, support = {R01 LM010130/LM/NLM NIH HHS/United States ; }, mesh = {*Computational Biology ; Education/*methods ; *Information Services ; *Internet ; Research ; *Software ; }, abstract = {BACKGROUND: Bioinformatics services have been traditionally provided in the form of a web-server that is hosted at institutional infrastructure and serves multiple users. This model, however, is not flexible enough to cope with the increasing number of users, increasing data size, and new requirements in terms of speed and availability of service. The advent of cloud computing suggests a new service model that provides an efficient solution to these problems, based on the concepts of "resources-on-demand" and "pay-as-you-go". However, cloud computing has not yet been introduced within bioinformatics servers due to the lack of usage scenarios and software layers that address the requirements of the bioinformatics domain.

RESULTS: In this paper, we provide different use case scenarios for providing cloud computing based services, considering both the technical and financial aspects of the cloud computing service model. These scenarios are for individual users seeking computational power as well as bioinformatics service providers aiming at provision of personalized bioinformatics services to their users. We also present elasticHPC, a software package and a library that facilitates the use of high performance cloud computing resources in general and the implementation of the suggested bioinformatics scenarios in particular. Concrete examples that demonstrate the suggested use case scenarios with whole bioinformatics servers and major sequence analysis tools like BLAST are presented. Experimental results with large datasets are also included to show the advantages of the cloud model.

CONCLUSIONS: Our use case scenarios and the elasticHPC package are steps towards the provision of cloud based bioinformatics services, which would help in overcoming the data challenge of recent biological research. All resources related to elasticHPC and its web-interface are available at http://www.elasticHPC.org.}, } @article {pmid25937948, year = {2013}, author = {Zhao, S and Prenger, K and Smith, L}, title = {Stormbow: A Cloud-Based Tool for Reads Mapping and Expression Quantification in Large-Scale RNA-Seq Studies.}, journal = {ISRN bioinformatics}, volume = {2013}, number = {}, pages = {481545}, pmid = {25937948}, issn = {2090-7338}, abstract = {RNA-Seq is becoming a promising replacement to microarrays in transcriptome profiling and differential gene expression study. Technical improvements have decreased sequencing costs and, as a result, the size and number of RNA-Seq datasets have increased rapidly. However, the increasing volume of data from large-scale RNA-Seq studies poses a practical challenge for data analysis in a local environment. To meet this challenge, we developed Stormbow, a cloud-based software package, to process large volumes of RNA-Seq data in parallel. The performance of Stormbow has been tested by practically applying it to analyse 178 RNA-Seq samples in the cloud. In our test, it took 6 to 8 hours to process an RNA-Seq sample with 100 million reads, and the average cost was $3.50 per sample. Utilizing Amazon Web Services as the infrastructure for Stormbow allows us to easily scale up to handle large datasets with on-demand computational resources. Stormbow is a scalable, cost effective, and open-source based tool for large-scale RNA-Seq data analysis. Stormbow can be freely downloaded and can be used out of box to process Illumina RNA-Seq datasets.}, } @article {pmid25825658, year = {2013}, author = {Chae, H and Jung, I and Lee, H and Marru, S and Lee, SW and Kim, S}, title = {Bio and health informatics meets cloud : BioVLab as an example.}, journal = {Health information science and systems}, volume = {1}, number = {}, pages = {6}, pmid = {25825658}, issn = {2047-2501}, abstract = {The exponential increase of genomic data brought by the advent of the next or the third generation sequencing (NGS) technologies and the dramatic drop in sequencing cost have driven biological and medical sciences to data-driven sciences. This revolutionary paradigm shift comes with challenges in terms of data transfer, storage, computation, and analysis of big bio/medical data. Cloud computing is a service model sharing a pool of configurable resources, which is a suitable workbench to address these challenges. From the medical or biological perspective, providing computing power and storage is the most attractive feature of cloud computing in handling the ever increasing biological data. As data increases in size, many research organizations start to experience the lack of computing power, which becomes a major hurdle in achieving research goals. In this paper, we review the features of publically available bio and health cloud systems in terms of graphical user interface, external data integration, security and extensibility of features. We then discuss about issues and limitations of current cloud systems and conclude with suggestion of a biological cloud environment concept, which can be defined as a total workbench environment assembling computational tools and databases for analyzing bio/medical big data in particular application domains.}, } @article {pmid23267176, year = {2013}, author = {Maguire, E and González-Beltrán, A and Whetzel, PL and Sansone, SA and Rocca-Serra, P}, title = {OntoMaton: a bioportal powered ontology widget for Google Spreadsheets.}, journal = {Bioinformatics (Oxford, England)}, volume = {29}, number = {4}, pages = {525-527}, pmid = {23267176}, issn = {1367-4811}, support = {BB/I000917/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/I000771/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; U54 HG004028/HG/NHGRI NIH HHS/United States ; BB/E025080/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/I025840/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {Internet ; *Software ; *Vocabulary, Controlled ; }, abstract = {MOTIVATION: Data collection in spreadsheets is ubiquitous, but current solutions lack support for collaborative semantic annotation that would promote shared and interdisciplinary annotation practices, supporting geographically distributed players.

RESULTS: OntoMaton is an open source solution that brings ontology lookup and tagging capabilities into a cloud-based collaborative editing environment, harnessing Google Spreadsheets and the NCBO Web services. It is a general purpose, format-agnostic tool that may serve as a component of the ISA software suite. OntoMaton can also be used to assist the ontology development process.

AVAILABILITY: OntoMaton is freely available from Google widgets under the CPAL open source license; documentation and examples at: https://github.com/ISA-tools/OntoMaton.}, } @article {pmid23261079, year = {2013}, author = {Xia, H and Asif, I and Zhao, X}, title = {Cloud-ECG for real time ECG monitoring and analysis.}, journal = {Computer methods and programs in biomedicine}, volume = {110}, number = {3}, pages = {253-259}, doi = {10.1016/j.cmpb.2012.11.008}, pmid = {23261079}, issn = {1872-7565}, mesh = {Algorithms ; Cell Phone ; Computer Systems/*statistics & numerical data ; Electrocardiography/standards/*statistics & numerical data ; Humans ; Internet ; Mobile Applications ; Remote Sensing Technology/*statistics & numerical data ; Software ; }, abstract = {Recent advances in mobile technology and cloud computing have inspired numerous designs of cloud-based health care services and devices. Within the cloud system, medical data can be collected and transmitted automatically to medical professionals from anywhere and feedback can be returned to patients through the network. In this article, we developed a cloud-based system for clients with mobile devices or web browsers. Specially, we aim to address the issues regarding the usefulness of the ECG data collected from patients themselves. Algorithms for ECG enhancement, ECG quality evaluation and ECG parameters extraction were implemented in the system. The system was demonstrated by a use case, in which ECG data was uploaded to the web server from a mobile phone at a certain frequency and analysis was performed in real time using the server. The system has been proven to be functional, accurate and efficient.}, } @article {pmid23248759, year = {2012}, author = {Qi, X and Kim, H and Xing, F and Parashar, M and Foran, DJ and Yang, L}, title = {The analysis of image feature robustness using cometcloud.}, journal = {Journal of pathology informatics}, volume = {3}, number = {}, pages = {33}, pmid = {23248759}, issn = {2153-3539}, support = {UL1 TR000117/TR/NCATS NIH HHS/United States ; P30 CA072720/CA/NCI NIH HHS/United States ; R01 LM009239/LM/NLM NIH HHS/United States ; R01 CA156386/CA/NCI NIH HHS/United States ; UL1 RR033173/RR/NCRR NIH HHS/United States ; R01 CA161375/CA/NCI NIH HHS/United States ; R01 LM011119/LM/NLM NIH HHS/United States ; }, abstract = {The robustness of image features is a very important consideration in quantitative image analysis. The objective of this paper is to investigate the robustness of a range of image texture features using hematoxylin stained breast tissue microarray slides which are assessed while simulating different imaging challenges including out of focus, changes in magnification and variations in illumination, noise, compression, distortion, and rotation. We employed five texture analysis methods and tested them while introducing all of the challenges listed above. The texture features that were evaluated include co-occurrence matrix, center-symmetric auto-correlation, texture feature coding method, local binary pattern, and texton. Due to the independence of each transformation and texture descriptor, a network structured combination was proposed and deployed on the Rutgers private cloud. The experiments utilized 20 randomly selected tissue microarray cores. All the combinations of the image transformations and deformations are calculated, and the whole feature extraction procedure was completed in 70 minutes using a cloud equipped with 20 nodes. Center-symmetric auto-correlation outperforms all the other four texture descriptors but also requires the longest computational time. It is roughly 10 times slower than local binary pattern and texton. From a speed perspective, both the local binary pattern and texton features provided excellent performance for classification and content-based image retrieval.}, } @article {pmid23248640, year = {2012}, author = {Thakur, RS and Bandopadhyay, R and Chaudhary, B and Chatterjee, S}, title = {Now and next-generation sequencing techniques: future of sequence analysis using cloud computing.}, journal = {Frontiers in genetics}, volume = {3}, number = {}, pages = {280}, pmid = {23248640}, issn = {1664-8021}, abstract = {Advances in the field of sequencing techniques have resulted in the greatly accelerated production of huge sequence datasets. This presents immediate challenges in database maintenance at datacenters. It provides additional computational challenges in data mining and sequence analysis. Together these represent a significant overburden on traditional stand-alone computer resources, and to reach effective conclusions quickly and efficiently, the virtualization of the resources and computation on a pay-as-you-go concept (together termed "cloud computing") has recently appeared. The collective resources of the datacenter, including both hardware and software, can be available publicly, being then termed a public cloud, the resources being provided in a virtual mode to the clients who pay according to the resources they employ. Examples of public companies providing these resources include Amazon, Google, and Joyent. The computational workload is shifted to the provider, which also implements required hardware and software upgrades over time. A virtual environment is created in the cloud corresponding to the computational and data storage needs of the user via the internet. The task is then performed, the results transmitted to the user, and the environment finally deleted after all tasks are completed. In this discussion, we focus on the basics of cloud computing, and go on to analyze the prerequisites and overall working of clouds. Finally, the applications of cloud computing in biological systems, particularly in comparative genomics, genome informatics, and SNP detection are discussed with reference to traditional workflows.}, } @article {pmid23230164, year = {2013}, author = {Suresh, V and Ezhilchelvan, P and Watson, P}, title = {Scalable and responsive event processing in the cloud.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {371}, number = {1983}, pages = {20120095}, pmid = {23230164}, issn = {1364-503X}, abstract = {Event processing involves continuous evaluation of queries over streams of events. Response-time optimization is traditionally done over a fixed set of nodes and/or by using metrics measured at query-operator levels. Cloud computing makes it easy to acquire and release computing nodes as required. Leveraging this flexibility, we propose a novel, queueing-theory-based approach for meeting specified response-time targets against fluctuating event arrival rates by drawing only the necessary amount of computing resources from a cloud platform. In the proposed approach, the entire processing engine of a distinct query is modelled as an atomic unit for predicting response times. Several such units hosted on a single node are modelled as a multiple class M/G/1 system. These aspects eliminate intrusive, low-level performance measurements at run-time, and also offer portability and scalability. Using model-based predictions, cloud resources are efficiently used to meet response-time targets. The efficacy of the approach is demonstrated through cloud-based experiments.}, } @article {pmid23230158, year = {2013}, author = {Djemame, K and Barnitzke, B and Corrales, M and Kiran, M and Jiang, M and Armstrong, D and Forgó, N and Nwankwo, I}, title = {Legal issues in clouds: towards a risk inventory.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {371}, number = {1983}, pages = {20120075}, doi = {10.1098/rsta.2012.0075}, pmid = {23230158}, issn = {1364-503X}, abstract = {Cloud computing technologies have reached a high level of development, yet a number of obstacles still exist that must be overcome before widespread commercial adoption can become a reality. In a cloud environment, end users requesting services and cloud providers negotiate service-level agreements (SLAs) that provide explicit statements of all expectations and obligations of the participants. If cloud computing is to experience widespread commercial adoption, then incorporating risk assessment techniques is essential during SLA negotiation and service operation. This article focuses on the legal issues surrounding risk assessment in cloud computing. Specifically, it analyses risk regarding data protection and security, and presents the requirements of an inherent risk inventory. The usefulness of such a risk inventory is described in the context of the OPTIMIS project.}, } @article {pmid23230157, year = {2013}, author = {Cohen, J and Filippis, I and Woodbridge, M and Bauer, D and Hong, NC and Jackson, M and Butcher, S and Colling, D and Darlington, J and Fuchs, B and Harvey, M}, title = {RAPPORT: running scientific high-performance computing applications on the cloud.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {371}, number = {1983}, pages = {20120073}, doi = {10.1098/rsta.2012.0073}, pmid = {23230157}, issn = {1364-503X}, support = {BB/C519670/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BB/G003912/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BBS/B/16488/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; BEP17014/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {*Algorithms ; *Computing Methodologies ; *Internet ; Science/*methods ; *Software ; }, abstract = {Cloud computing infrastructure is now widely used in many domains, but one area where there has been more limited adoption is research computing, in particular for running scientific high-performance computing (HPC) software. The Robust Application Porting for HPC in the Cloud (RAPPORT) project took advantage of existing links between computing researchers and application scientists in the fields of bioinformatics, high-energy physics (HEP) and digital humanities, to investigate running a set of scientific HPC applications from these domains on cloud infrastructure. In this paper, we focus on the bioinformatics and HEP domains, describing the applications and target cloud platforms. We conclude that, while there are many factors that need consideration, there is no fundamental impediment to the use of cloud infrastructure for running many types of HPC applications and, in some cases, there is potential for researchers to benefit significantly from the flexibility offered by cloud platforms.}, } @article {pmid23230155, year = {2013}, author = {Tablan, V and Roberts, I and Cunningham, H and Bontcheva, K}, title = {GATECloud.net: a platform for large-scale, open-source text processing on the cloud.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {371}, number = {1983}, pages = {20120071}, doi = {10.1098/rsta.2012.0071}, pmid = {23230155}, issn = {1364-503X}, mesh = {*Algorithms ; *Artificial Intelligence ; *Internet ; *Natural Language Processing ; *Software ; *User-Computer Interface ; }, abstract = {Cloud computing is increasingly being regarded as a key enabler of the 'democratization of science', because on-demand, highly scalable cloud computing facilities enable researchers anywhere to carry out data-intensive experiments. In the context of natural language processing (NLP), algorithms tend to be complex, which makes their parallelization and deployment on cloud platforms a non-trivial task. This study presents a new, unique, cloud-based platform for large-scale NLP research--GATECloud. net. It enables researchers to carry out data-intensive NLP experiments by harnessing the vast, on-demand compute power of the Amazon cloud. Important infrastructural issues are dealt with by the platform, completely transparently for the researcher: load balancing, efficient data upload and storage, deployment on the virtual machines, security and fault tolerance. We also include a cost-benefit analysis and usage evaluation.}, } @article {pmid23230153, year = {2013}, author = {Turilli, M and Wallom, D and Williams, C and Gough, S and Curran, N and Tarrant, R and Bretherton, D and Powell, A and Johnson, M and Harmer, T and Wright, P and Gordon, J}, title = {Flexible services for the support of research.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {371}, number = {1983}, pages = {20120067}, doi = {10.1098/rsta.2012.0067}, pmid = {23230153}, issn = {1364-503X}, abstract = {Cloud computing has been increasingly adopted by users and providers to promote a flexible, scalable and tailored access to computing resources. Nonetheless, the consolidation of this paradigm has uncovered some of its limitations. Initially devised by corporations with direct control over large amounts of computational resources, cloud computing is now being endorsed by organizations with limited resources or with a more articulated, less direct control over these resources. The challenge for these organizations is to leverage the benefits of cloud computing while dealing with limited and often widely distributed computing resources. This study focuses on the adoption of cloud computing by higher education institutions and addresses two main issues: flexible and on-demand access to a large amount of storage resources, and scalability across a heterogeneous set of cloud infrastructures. The proposed solutions leverage a federated approach to cloud resources in which users access multiple and largely independent cloud infrastructures through a highly customizable broker layer. This approach allows for a uniform authentication and authorization infrastructure, a fine-grained policy specification and the aggregation of accounting and monitoring. Within a loosely coupled federation of cloud infrastructures, users can access vast amount of data without copying them across cloud infrastructures and can scale their resource provisions when the local cloud resources become insufficient.}, } @article {pmid23230152, year = {2013}, author = {Berriman, GB and Deelman, E and Juve, G and Rynge, M and Vöckler, JS}, title = {The application of cloud computing to scientific workflows: a study of cost and performance.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {371}, number = {1983}, pages = {20120066}, doi = {10.1098/rsta.2012.0066}, pmid = {23230152}, issn = {1364-503X}, abstract = {The current model of transferring data from data centres to desktops for analysis will soon be rendered impractical by the accelerating growth in the volume of science datasets. Processing will instead often take place on high-performance servers co-located with data. Evaluations of how new technologies such as cloud computing would support such a new distributed computing model are urgently needed. Cloud computing is a new way of purchasing computing and storage resources on demand through virtualization technologies. We report here the results of investigations of the applicability of commercial cloud computing to scientific computing, with an emphasis on astronomy, including investigations of what types of applications can be run cheaply and efficiently on the cloud, and an example of an application well suited to the cloud: processing a large dataset to create a new science product.}, } @article {pmid23223611, year = {2013}, author = {Piotrowski, M and McGilvary, GA and Sloan, TM and Mewissen, M and Lloyd, AD and Forster, T and Mitchell, L and Ghazal, P and Hill, J}, title = {Exploiting parallel R in the cloud with SPRINT.}, journal = {Methods of information in medicine}, volume = {52}, number = {1}, pages = {80-90}, pmid = {23223611}, issn = {2511-705X}, support = {/WT_/Wellcome Trust/United Kingdom ; 086696/WT_/Wellcome Trust/United Kingdom ; WT086696MA/WT_/Wellcome Trust/United Kingdom ; }, mesh = {Animals ; Computer Graphics/economics ; *Computing Methodologies ; Costs and Cost Analysis ; Database Management Systems/economics ; *Genomics/economics ; Humans ; Information Storage and Retrieval/economics/*methods ; Internet/economics ; Medical Informatics/economics/*methods ; *Microarray Analysis/economics ; Natural Language Processing ; Sequence Analysis, DNA/economics ; }, abstract = {BACKGROUND: Advances in DNA Microarray devices and next-generation massively parallel DNA sequencing platforms have led to an exponential growth in data availability but the arising opportunities require adequate computing resources. High Performance Computing (HPC) in the Cloud offers an affordable way of meeting this need.

OBJECTIVES: Bioconductor, a popular tool for high-throughput genomic data analysis, is distributed as add-on modules for the R statistical programming language but R has no native capabilities for exploiting multi-processor architectures. SPRINT is an R package that enables easy access to HPC for genomics researchers. This paper investigates: setting up and running SPRINT-enabled genomic analyses on Amazon's Elastic Compute Cloud (EC2), the advantages of submitting applications to EC2 from different parts of the world and, if resource underutilization can improve application performance.

METHODS: The SPRINT parallel implementations of correlation, permutation testing, partitioning around medoids and the multi-purpose papply have been benchmarked on data sets of various size on Amazon EC2. Jobs have been submitted from both the UK and Thailand to investigate monetary differences.

RESULTS: It is possible to obtain good, scalable performance but the level of improvement is dependent upon the nature of the algorithm. Resource underutilization can further improve the time to result. End-user's location impacts on costs due to factors such as local taxation.

CONCLUSIONS: Although not designed to satisfy HPC requirements, Amazon EC2 and cloud computing in general provides an interesting alternative and provides new possibilities for smaller organisations with limited funds.}, } @article {pmid23190475, year = {2012}, author = {Dai, L and Gao, X and Guo, Y and Xiao, J and Zhang, Z}, title = {Bioinformatics clouds for big data manipulation.}, journal = {Biology direct}, volume = {7}, number = {}, pages = {43; discussion 43}, pmid = {23190475}, issn = {1745-6150}, mesh = {Access to Information ; Computational Biology/*methods ; Data Collection ; Information Storage and Retrieval/classification/*methods ; *Internet ; Software ; User-Computer Interface ; }, abstract = {UNLABELLED: As advances in life sciences and information technology bring profound influences on bioinformatics due to its interdisciplinary nature, bioinformatics is experiencing a new leap-forward from in-house computing infrastructure into utility-supplied cloud computing delivered over the Internet, in order to handle the vast quantities of biological data generated by high-throughput experimental technologies. Albeit relatively new, cloud computing promises to address big data storage and analysis issues in the bioinformatics field. Here we review extant cloud-based services in bioinformatics, classify them into Data as a Service (DaaS), Software as a Service (SaaS), Platform as a Service (PaaS), and Infrastructure as a Service (IaaS), and present our perspectives on the adoption of cloud computing in bioinformatics.

REVIEWERS: This article was reviewed by Frank Eisenhaber, Igor Zhulin, and Sandor Pongor.}, } @article {pmid23188699, year = {2012}, author = {Poole, CM and Cornelius, I and Trapp, JV and Langton, CM}, title = {Radiotherapy Monte Carlo simulation using cloud computing technology.}, journal = {Australasian physical & engineering sciences in medicine}, volume = {35}, number = {4}, pages = {497-502}, doi = {10.1007/s13246-012-0167-8}, pmid = {23188699}, issn = {0158-9938}, mesh = {Computer Simulation ; *Internet ; *Models, Statistical ; *Monte Carlo Method ; Radiotherapy Dosage ; Radiotherapy Planning, Computer-Assisted/*methods ; Radiotherapy, Computer-Assisted/*methods ; Scattering, Radiation ; }, abstract = {Cloud computing allows for vast computational resources to be leveraged quickly and easily in bursts as and when required. Here we describe a technique that allows for Monte Carlo radiotherapy dose calculations to be performed using GEANT4 and executed in the cloud, with relative simulation cost and completion time evaluated as a function of machine count. As expected, simulation completion time decreases as 1/n for n parallel machines, and relative simulation cost is found to be optimal where n is a factor of the total simulation time in hours. Using the technique, we demonstrate the potential usefulness of cloud computing as a solution for rapid Monte Carlo simulation for radiotherapy dose calculation without the need for dedicated local computer hardware as a proof of principal.}, } @article {pmid23188548, year = {2013}, author = {Knaus, J and Hieke, S and Binder, H and Schwarzer, G}, title = {Costs of cloud computing for a biometry department. A case study.}, journal = {Methods of information in medicine}, volume = {52}, number = {1}, pages = {72-79}, doi = {10.3414/ME11-02-0048}, pmid = {23188548}, issn = {2511-705X}, mesh = {*Biometry ; Computational Biology/*economics ; Computer Communication Networks/economics ; Computer Graphics ; Computers/economics ; Costs and Cost Analysis ; Database Management Systems/*economics ; Germany ; Humans ; Information Storage and Retrieval/*economics ; Internet ; Medical Informatics/*economics ; Medical Informatics Computing/*economics ; Natural Language Processing ; }, abstract = {BACKGROUND: "Cloud" computing providers, such as the Amazon Web Services (AWS), offer stable and scalable computational resources based on hardware virtualization, with short, usually hourly, billing periods. The idea of pay-as-you-use seems appealing for biometry research units which have only limited access to university or corporate data center resources or grids.

OBJECTIVES: This case study compares the costs of an existing heterogeneous on-site hardware pool in a Medical Biometry and Statistics department to a comparable AWS offer.

METHODS: The "total cost of ownership", including all direct costs, is determined for the on-site hardware, and hourly prices are derived, based on actual system utilization during the year 2011. Indirect costs, which are difficult to quantify are not included in this comparison, but nevertheless some rough guidance from our experience is given. To indicate the scale of costs for a methodological research project, a simulation study of a permutation-based statistical approach is performed using AWS and on-site hardware.

RESULTS: In the presented case, with a system utilization of 25-30 percent and 3-5-year amortization, on-site hardware can result in smaller costs, compared to hourly rental in the cloud dependent on the instance chosen. Renting cloud instances with sufficient main memory is a deciding factor in this comparison.

CONCLUSIONS: Costs for on-site hardware may vary, depending on the specific infrastructure at a research unit, but have only moderate impact on the overall comparison and subsequent decision for obtaining affordable scientific computing resources. Overall utilization has a much stronger impact as it determines the actual computing hours needed per year. Taking this into ac count, cloud computing might still be a viable option for projects with limited maturity, or as a supplement for short peaks in demand.}, } @article {pmid23188517, year = {2013}, author = {Bernau, C and Boulesteix, AL and Knaus, J}, title = {Application of microarray analysis on computer cluster and cloud platforms.}, journal = {Methods of information in medicine}, volume = {52}, number = {1}, pages = {65-71}, doi = {10.3414/ME11-02-0043}, pmid = {23188517}, issn = {2511-705X}, mesh = {Biostatistics ; *Computing Methodologies ; *Databases, Genetic ; Efficiency ; *Genomics ; Germany ; Humans ; *Mathematical Computing ; *Microarray Analysis ; Workflow ; }, abstract = {BACKGROUND: Analysis of recent high-dimensional biological data tends to be computationally intensive as many common approaches such as resampling or permutation tests require the basic statistical analysis to be repeated many times. A crucial advantage of these methods is that they can be easily parallelized due to the computational independence of the resampling or permutation iterations, which has induced many statistics departments to establish their own computer clusters. An alternative is to rent computing resources in the cloud, e.g. at Amazon Web Services.

OBJECTIVES: In this article we analyze whether a selection of statistical projects, recently implemented at our department, can be efficiently realized on these cloud resources. Moreover, we illustrate an opportunity to combine computer cluster and cloud resources.

METHODS: In order to compare the efficiency of computer cluster and cloud implementations and their respective parallelizations we use microarray analysis procedures and compare their runtimes on the different platforms.

RESULTS: Amazon Web Services provide various instance types which meet the particular needs of the different statistical projects we analyzed in this paper. Moreover, the network capacity is sufficient and the parallelization is comparable in efficiency to standard computer cluster implementations.

CONCLUSION: Our results suggest that many statistical projects can be efficiently realized on cloud resources. It is important to mention, however, that workflows can change substantially as a result of a shift from computer cluster to cloud computing.}, } @article {pmid23185310, year = {2012}, author = {Pareja-Tobes, P and Manrique, M and Pareja-Tobes, E and Pareja, E and Tobes, R}, title = {BG7: a new approach for bacterial genome annotation designed for next generation sequencing data.}, journal = {PloS one}, volume = {7}, number = {11}, pages = {e49239}, pmid = {23185310}, issn = {1932-6203}, mesh = {*Databases, Genetic ; Escherichia coli/*genetics ; Genes, Bacterial/genetics ; Genome, Bacterial/*genetics ; Molecular Sequence Annotation/*methods ; Sequence Analysis, DNA/*methods ; }, abstract = {BG7 is a new system for de novo bacterial, archaeal and viral genome annotation based on a new approach specifically designed for annotating genomes sequenced with next generation sequencing technologies. The system is versatile and able to annotate genes even in the step of preliminary assembly of the genome. It is especially efficient detecting unexpected genes horizontally acquired from bacterial or archaeal distant genomes, phages, plasmids, and mobile elements. From the initial phases of the gene annotation process, BG7 exploits the massive availability of annotated protein sequences in databases. BG7 predicts ORFs and infers their function based on protein similarity with a wide set of reference proteins, integrating ORF prediction and functional annotation phases in just one step. BG7 is especially tolerant to sequencing errors in start and stop codons, to frameshifts, and to assembly or scaffolding errors. The system is also tolerant to the high level of gene fragmentation which is frequently found in not fully assembled genomes. BG7 current version - which is developed in Java, takes advantage of Amazon Web Services (AWS) cloud computing features, but it can also be run locally in any operating system. BG7 is a fast, automated and scalable system that can cope with the challenge of analyzing the huge amount of genomes that are being sequenced with NGS technologies. Its capabilities and efficiency were demonstrated in the 2011 EHEC Germany outbreak in which BG7 was used to get the first annotations right the next day after the first entero-hemorrhagic E. coli genome sequences were made publicly available. The suitability of BG7 for genome annotation has been proved for Illumina, 454, Ion Torrent, and PacBio sequencing technologies. Besides, thanks to its plasticity, our system could be very easily adapted to work with new technologies in the future.}, } @article {pmid23181507, year = {2012}, author = {Afgan, E and Chapman, B and Taylor, J}, title = {CloudMan as a platform for tool, data, and analysis distribution.}, journal = {BMC bioinformatics}, volume = {13}, number = {}, pages = {315}, pmid = {23181507}, issn = {1471-2105}, support = {U41 HG006620/HG/NHGRI NIH HHS/United States ; HG005133/HG/NHGRI NIH HHS/United States ; HG005542/HG/NHGRI NIH HHS/United States ; HG004909/HG/NHGRI NIH HHS/United States ; }, mesh = {*Information Storage and Retrieval ; *Software ; }, abstract = {BACKGROUND: Cloud computing provides an infrastructure that facilitates large scale computational analysis in a scalable, democratized fashion, However, in this context it is difficult to ensure sharing of an analysis environment and associated data in a scalable and precisely reproducible way.

RESULTS: CloudMan (usecloudman.org) enables individual researchers to easily deploy, customize, and share their entire cloud analysis environment, including data, tools, and configurations.

CONCLUSIONS: With the enabled customization and sharing of instances, CloudMan can be used as a platform for collaboration. The presented solution improves accessibility of cloud resources, tools, and data to the level of an individual researcher and contributes toward reproducibility and transparency of research solutions.}, } @article {pmid23134663, year = {2012}, author = {Evani, US and Challis, D and Yu, J and Jackson, AR and Paithankar, S and Bainbridge, MN and Jakkamsetti, A and Pham, P and Coarfa, C and Milosavljevic, A and Yu, F}, title = {Atlas2 Cloud: a framework for personal genome analysis in the cloud.}, journal = {BMC genomics}, volume = {13 Suppl 6}, number = {Suppl 6}, pages = {S19}, pmid = {23134663}, issn = {1471-2164}, support = {1U01HG005211-0109/HG/NHGRI NIH HHS/United States ; 5U54HG003273/HG/NHGRI NIH HHS/United States ; R01HG004009/HG/NHGRI NIH HHS/United States ; U01DA025956/DA/NIDA NIH HHS/United States ; }, mesh = {Databases, Genetic ; *Genome, Human ; Humans ; Internet ; *Software ; User-Computer Interface ; }, abstract = {BACKGROUND: Until recently, sequencing has primarily been carried out in large genome centers which have invested heavily in developing the computational infrastructure that enables genomic sequence analysis. The recent advancements in next generation sequencing (NGS) have led to a wide dissemination of sequencing technologies and data, to highly diverse research groups. It is expected that clinical sequencing will become part of diagnostic routines shortly. However, limited accessibility to computational infrastructure and high quality bioinformatic tools, and the demand for personnel skilled in data analysis and interpretation remains a serious bottleneck. To this end, the cloud computing and Software-as-a-Service (SaaS) technologies can help address these issues.

RESULTS: We successfully enabled the Atlas2 Cloud pipeline for personal genome analysis on two different cloud service platforms: a community cloud via the Genboree Workbench, and a commercial cloud via the Amazon Web Services using Software-as-a-Service model. We report a case study of personal genome analysis using our Atlas2 Genboree pipeline. We also outline a detailed cost structure for running Atlas2 Amazon on whole exome capture data, providing cost projections in terms of storage, compute and I/O when running Atlas2 Amazon on a large data set.

CONCLUSIONS: We find that providing a web interface and an optimized pipeline clearly facilitates usage of cloud computing for personal genome analysis, but for it to be routinely used for large scale projects there needs to be a paradigm shift in the way we develop tools, in standard operating procedures, and in funding mechanisms.}, } @article {pmid23156073, year = {2012}, author = {Franceschini, B}, title = {Cloud computing puts stakeholders on same page.}, journal = {Managed care (Langhorne, Pa.)}, volume = {21}, number = {10}, pages = {35-6, 51}, pmid = {23156073}, issn = {1062-3388}, mesh = {Cost Control ; Delivery of Health Care/economics ; Information Storage and Retrieval/*trends ; Internet/*trends ; Managed Care Programs/economics ; Quality of Health Care ; }, } @article {pmid23142544, year = {2013}, author = {Hoopmann, MR and Moritz, RL}, title = {Current algorithmic solutions for peptide-based proteomics data generation and identification.}, journal = {Current opinion in biotechnology}, volume = {24}, number = {1}, pages = {31-38}, pmid = {23142544}, issn = {1879-0429}, support = {P50 GM076547/GM/NIGMS NIH HHS/United States ; RC2 HG005805/HG/NHGRI NIH HHS/United States ; S10 RR027584/RR/NCRR NIH HHS/United States ; R01 HG005805/HG/NHGRI NIH HHS/United States ; }, mesh = {*Algorithms ; Databases, Protein ; Humans ; Mass Spectrometry/*methods ; Peptides/*analysis/*chemistry ; Proteins/analysis/chemistry ; Proteomics/*methods ; Tandem Mass Spectrometry/methods ; }, abstract = {Peptide-based proteomic data sets are ever increasing in size and complexity. These data sets provide computational challenges when attempting to quickly analyze spectra and obtain correct protein identifications. Database search and de novo algorithms must consider high-resolution MS/MS spectra and alternative fragmentation methods. Protein inference is a tricky problem when analyzing large data sets of degenerate peptide identifications. Combining multiple algorithms for improved peptide identification puts significant strain on computational systems when investigating large data sets. This review highlights some of the recent developments in peptide and protein identification algorithms for analyzing shotgun mass spectrometry data when encountering the aforementioned hurdles. Also explored are the roles that analytical pipelines, public spectral libraries, and cloud computing play in the evolution of peptide-based proteomics.}, } @article {pmid23113397, year = {2012}, author = {Mesa-Gutiérrez, JC and Bardají, C and Brun, N and Núñez, B and Sánchez, B and Sanvicente, B and Obiols, P and Rigol, S}, title = {[Pediatric surgery 2.0].}, journal = {Cirugia pediatrica : organo oficial de la Sociedad Espanola de Cirugia Pediatrica}, volume = {25}, number = {2}, pages = {91-97}, pmid = {23113397}, issn = {0214-1221}, mesh = {Information Management ; Information Storage and Retrieval ; *Internet ; *Pediatrics ; *Specialties, Surgical ; }, abstract = {New tools from the web are a complete breakthrough in management of information. The aim of this paper is to present different resources in a friendly way, with apps and examples in the different phases of the knowledge management for the paediatric surgeon: search, filter, reception, classification, sharing, collaborative work and publication. We are assisting to a real revolution on how to manage knowledge and information. The main charateristics are: immediateness, social component, growing interaction, and easiness. Every physician has clinical questions and the Internet gives us more and more resources to make searchs easier. Along with them we need electronic resources to filter information of quality and to make easier transfer of knowledge to clinical practice. Cloud computing is on continuous development and makes possible sharing information with differents users and computers. The main feature of the apps from the Intenet is the social component, that makes possible interaction, sharing and collaborative work.}, } @article {pmid23088505, year = {2012}, author = {Trudgian, DC and Mirzaei, H}, title = {Cloud CPFP: a shotgun proteomics data analysis pipeline using cloud and high performance computing.}, journal = {Journal of proteome research}, volume = {11}, number = {12}, pages = {6282-6290}, doi = {10.1021/pr300694b}, pmid = {23088505}, issn = {1535-3907}, mesh = {Cell Line ; Computational Biology/economics/*methods ; Databases, Protein ; Electronic Data Processing/methods ; Humans ; Internet ; Proteomics/economics/*methods ; Reproducibility of Results ; Search Engine ; *Software ; Time Factors ; }, abstract = {We have extended the functionality of the Central Proteomics Facilities Pipeline (CPFP) to allow use of remote cloud and high performance computing (HPC) resources for shotgun proteomics data processing. CPFP has been modified to include modular local and remote scheduling for data processing jobs. The pipeline can now be run on a single PC or server, a local cluster, a remote HPC cluster, and/or the Amazon Web Services (AWS) cloud. We provide public images that allow easy deployment of CPFP in its entirety in the AWS cloud. This significantly reduces the effort necessary to use the software, and allows proteomics laboratories to pay for compute time ad hoc, rather than obtaining and maintaining expensive local server clusters. Alternatively the Amazon cloud can be used to increase the throughput of a local installation of CPFP as necessary. We demonstrate that cloud CPFP allows users to process data at higher speed than local installations but with similar cost and lower staff requirements. In addition to the computational improvements, the web interface to CPFP is simplified, and other functionalities are enhanced. The software is under active development at two leading institutions and continues to be released under an open-source license at http://cpfp.sourceforge.net.}, } @article {pmid23066431, year = {2012}, author = {Wriggers, W and Olson, WK and Dos Remedios, CG}, title = {Computational opportunities for remote collaboration and capacity building afforded by Web 2.0 and cloud computing.}, journal = {Biophysical reviews}, volume = {4}, number = {3}, pages = {153-160}, pmid = {23066431}, issn = {1867-2450}, support = {R01 GM034809/GM/NIGMS NIH HHS/United States ; R01 GM062968/GM/NIGMS NIH HHS/United States ; }, abstract = {In this paper, we state our aims and aspirations for building a global network of likeminded people interested in developing and encouraging students in the field of computational biophysics (CB). Global capacity building efforts have uncovered local computational talent in virtually every community regardless of where the students reside. Our vision is to discover and encourage these aspiring investigators by suggesting ways that they and other "garage scientists" can participate in new science even if they have no access to sophisticated research infrastructure. We argue that participatory computing in the "cloud" is particularly suitable for CB and available to any budding computational biophysicist if he or she is provided with open-minded mentors who have the necessary skills and generosity. We recognize that there are barriers to the development of such remote collaborations, and we discuss possible pathways to overcome these barriers. We point out that this Special Issue of Biophysical Reviews provides a much-needed forum for the development of several specific applications of CB.}, } @article {pmid23061642, year = {2012}, author = {Piette, JD and Datwani, H and Gaudioso, S and Foster, SM and Westphal, J and Perry, W and Rodríguez-Saldaña, J and Mendoza-Avelares, MO and Marinec, N}, title = {Hypertension management using mobile technology and home blood pressure monitoring: results of a randomized trial in two low/middle-income countries.}, journal = {Telemedicine journal and e-health : the official journal of the American Telemedicine Association}, volume = {18}, number = {8}, pages = {613-620}, pmid = {23061642}, issn = {1556-3669}, support = {P30 DK092926/DK/NIDDK NIH HHS/United States ; }, mesh = {Blood Pressure ; Blood Pressure Monitoring, Ambulatory/*methods ; *Cardiology ; Cardiovascular Diseases/prevention & control ; Computer Simulation ; Confidence Intervals ; Developed Countries ; Developing Countries ; Female ; Honduras ; Humans ; Hypertension/diagnosis/*prevention & control ; *Income ; Male ; Mexico ; Middle Aged ; Patient Selection ; Poverty ; Psychometrics ; Socioeconomic Factors ; Telemedicine/methods/*organization & administration ; }, abstract = {OBJECTIVE: Hypertension and other noncommunicable diseases represent a growing threat to low/middle-income countries (LMICs). Mobile health technologies may improve noncommunicable disease outcomes, but LMICs lack resources to provide these services. We evaluated the efficacy of a cloud computing model using automated self-management calls plus home blood pressure (BP) monitoring as a strategy for improving systolic BPs (SBPs) and other outcomes of hypertensive patients in two LMICs.

SUBJECTS AND METHODS: This was a randomized trial with a 6-week follow-up. Participants with high SBPs (≥140 mm Hg if nondiabetic and ≥130 mm Hg if diabetic) were enrolled from clinics in Honduras and Mexico. Intervention patients received weekly automated monitoring and behavior change telephone calls sent from a server in the United States, plus a home BP monitor. At baseline, control patients received BP results, hypertension information, and usual healthcare. The primary outcome, SBP, was examined for all patients in addition to a preplanned subgroup with low literacy or high hypertension information needs. Secondary outcomes included perceived health status and medication-related problems.

RESULTS: Of the 200 patients recruited, 181 (90%) completed follow-up, and 117 of 181 had low literacy or high hypertension information needs. The median annual income was $2,900 USD, and average educational attainment was 6.5 years. At follow-up intervention patients' SBPs decreased 4.2 mm Hg relative to controls (95% confidence interval -9.1, 0.7; p=0.09). In the subgroup with high information needs, intervention patients' average SBPs decreased 8.8 mm Hg (-14.2, -3.4, p=0.002). Compared with controls, intervention patients at follow-up reported fewer depressive symptoms (p=0.004), fewer medication problems (p<0.0001), better general health (p<0.0001), and greater satisfaction with care (p≤0.004).

CONCLUSIONS: Automated telephone care management plus home BP monitors can improve outcomes for hypertensive patients in LMICs. A cloud computing model within regional telecommunication centers could make these services available in areas with limited infrastructure for patient-focused informatics support.}, } @article {pmid23060318, year = {2013}, author = {He, C and Fan, X and Li, Y}, title = {Toward ubiquitous healthcare services with a novel efficient cloud platform.}, journal = {IEEE transactions on bio-medical engineering}, volume = {60}, number = {1}, pages = {230-234}, doi = {10.1109/TBME.2012.2222404}, pmid = {23060318}, issn = {1558-2531}, mesh = {Algorithms ; Electrocardiography ; Electronic Health Records ; Health Services Accessibility ; Humans ; Information Storage and Retrieval/*methods ; *Internet ; Medical Informatics/*methods ; Models, Theoretical ; Monitoring, Physiologic ; Telemedicine ; }, abstract = {Ubiquitous healthcare services are becoming more and more popular, especially under the urgent demand of the global aging issue. Cloud computing owns the pervasive and on-demand service-oriented natures, which can fit the characteristics of healthcare services very well. However, the abilities in dealing with multimodal, heterogeneous, and nonstationary physiological signals to provide persistent personalized services, meanwhile keeping high concurrent online analysis for public, are challenges to the general cloud. In this paper, we proposed a private cloud platform architecture which includes six layers according to the specific requirements. This platform utilizes message queue as a cloud engine, and each layer thereby achieves relative independence by this loosely coupled means of communications with publish/subscribe mechanism. Furthermore, a plug-in algorithm framework is also presented, and massive semistructure or unstructured medical data are accessed adaptively by this cloud architecture. As the testing results showing, this proposed cloud platform, with robust, stable, and efficient features, can satisfy high concurrent requests from ubiquitous healthcare services.}, } @article {pmid23032609, year = {2012}, author = {Kim, I and Jung, JY and Deluca, TF and Nelson, TH and Wall, DP}, title = {Cloud computing for comparative genomics with windows azure platform.}, journal = {Evolutionary bioinformatics online}, volume = {8}, number = {}, pages = {527-534}, pmid = {23032609}, issn = {1176-9343}, support = {R01 MH090611/MH/NIMH NIH HHS/United States ; }, abstract = {Cloud computing services have emerged as a cost-effective alternative for cluster systems as the number of genomes and required computation power to analyze them increased in recent years. Here we introduce the Microsoft Azure platform with detailed execution steps and a cost comparison with Amazon Web Services.}, } @article {pmid23017886, year = {2012}, author = {Kang, L and Guo, Q and Wang, X}, title = {A hierarchical method for molecular docking using cloud computing.}, journal = {Bioorganic & medicinal chemistry letters}, volume = {22}, number = {21}, pages = {6568-6572}, doi = {10.1016/j.bmcl.2012.09.016}, pmid = {23017886}, issn = {1464-3405}, mesh = {*Computers/trends ; Drug Discovery ; Information Storage and Retrieval/trends ; *Molecular Docking Simulation ; Protein Binding ; Small Molecule Libraries/chemistry ; }, abstract = {Discovering small molecules that interact with protein targets will be a key part of future drug discovery efforts. Molecular docking of drug-like molecules is likely to be valuable in this field; however, the great number of such molecules makes the potential size of this task enormous. In this paper, a method to screen small molecular databases using cloud computing is proposed. This method is called the hierarchical method for molecular docking and can be completed in a relatively short period of time. In this method, the optimization of molecular docking is divided into two subproblems based on the different effects on the protein-ligand interaction energy. An adaptive genetic algorithm is developed to solve the optimization problem and a new docking program (FlexGAsDock) based on the hierarchical docking method has been developed. The implementation of docking on a cloud computing platform is then discussed. The docking results show that this method can be conveniently used for the efficient molecular design of drugs.}, } @article {pmid22988693, year = {2012}, author = {Wilhite, SE}, title = {Cloud computing?.}, journal = {HDA now}, volume = {}, number = {}, pages = {12}, pmid = {22988693}, mesh = {Computing Methodologies ; Humans ; Information Storage and Retrieval/methods ; *Internet ; *Practice Management, Dental ; Software ; }, } @article {pmid22987133, year = {2012}, author = {Lee, H and Yang, Y and Chae, H and Nam, S and Choi, D and Tangchaisin, P and Herath, C and Marru, S and Nephew, KP and Kim, S}, title = {BioVLAB-MMIA: a cloud environment for microRNA and mRNA integrated analysis (MMIA) on Amazon EC2.}, journal = {IEEE transactions on nanobioscience}, volume = {11}, number = {3}, pages = {266-272}, doi = {10.1109/TNB.2012.2212030}, pmid = {22987133}, issn = {1558-2639}, support = {U54 CA113001/CA/NCI NIH HHS/United States ; }, mesh = {Cell Line, Tumor ; *Database Management Systems ; *Databases, Genetic ; Drug Resistance, Neoplasm ; Genomics/*methods ; Humans ; *Internet ; MicroRNAs/*genetics/metabolism ; RNA, Messenger/*genetics/metabolism ; }, abstract = {MicroRNAs, by regulating the expression of hundreds of target genes, play critical roles in developmental biology and the etiology of numerous diseases, including cancer. As a vast amount of microRNA expression profile data are now publicly available, the integration of microRNA expression data sets with gene expression profiles is a key research problem in life science research. However, the ability to conduct genome-wide microRNA-mRNA (gene) integration currently requires sophisticated, high-end informatics tools, significant expertise in bioinformatics and computer science to carry out the complex integration analysis. In addition, increased computing infrastructure capabilities are essential in order to accommodate large data sets. In this study, we have extended the BioVLAB cloud workbench to develop an environment for the integrated analysis of microRNA and mRNA expression data, named BioVLAB-MMIA. The workbench facilitates computations on the Amazon EC2 and S3 resources orchestrated by the XBaya Workflow Suite. The advantages of BioVLAB-MMIA over the web-based MMIA system include: 1) readily expanded as new computational tools become available; 2) easily modifiable by re-configuring graphic icons in the workflow; 3) on-demand cloud computing resources can be used on an "as needed" basis; 4) distributed orchestration supports complex and long running workflows asynchronously. We believe that BioVLAB-MMIA will be an easy-to-use computing environment for researchers who plan to perform genome-wide microRNA-mRNA (gene) integrated analysis tasks.}, } @article {pmid22960169, year = {2012}, author = {Xu, B and Gao, J and Li, C}, title = {An efficient algorithm for DNA fragment assembly in MapReduce.}, journal = {Biochemical and biophysical research communications}, volume = {426}, number = {3}, pages = {395-398}, doi = {10.1016/j.bbrc.2012.08.101}, pmid = {22960169}, issn = {1090-2104}, mesh = {Algorithms ; *Computer Graphics ; DNA/genetics ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {Fragment assembly is one of the most important problems of sequence assembly. Algorithms for DNA fragment assembly using de Bruijn graph have been widely used. These algorithms require a large amount of memory and running time to build the de Bruijn graph. Another drawback of the conventional de Bruijn approach is the loss of information. To overcome these shortcomings, this paper proposes a parallel strategy to construct de Bruijin graph. Its main characteristic is to avoid the division of de Bruijin graph. A novel fragment assembly algorithm based on our parallel strategy is implemented in the MapReduce framework. The experimental results show that the parallel strategy can effectively improve the computational efficiency and remove the memory limitations of the assembly algorithm based on Euler superpath. This paper provides a useful attempt to the assembly of large-scale genome sequence using Cloud Computing.}, } @article {pmid22948818, year = {2012}, author = {Morimae, T and Fujii, K}, title = {Blind topological measurement-based quantum computation.}, journal = {Nature communications}, volume = {3}, number = {}, pages = {1036}, pmid = {22948818}, issn = {2041-1723}, abstract = {Blind quantum computation is a novel secure quantum-computing protocol that enables Alice, who does not have sufficient quantum technology at her disposal, to delegate her quantum computation to Bob, who has a fully fledged quantum computer, in such a way that Bob cannot learn anything about Alice's input, output and algorithm. A recent proof-of-principle experiment demonstrating blind quantum computation in an optical system has raised new challenges regarding the scalability of blind quantum computation in realistic noisy conditions. Here we show that fault-tolerant blind quantum computation is possible in a topologically protected manner using the Raussendorf-Harrington-Goyal scheme. The error threshold of our scheme is 4.3 × 10(-3), which is comparable to that (7.5 × 10(-3)) of non-blind topological quantum computation. As the error per gate of the order 10(-3) was already achieved in some experimental systems, our result implies that secure cloud quantum computation is within reach.}, } @article {pmid22948728, year = {2012}, author = {Gurtowski, J and Schatz, MC and Langmead, B}, title = {Genotyping in the cloud with Crossbow.}, journal = {Current protocols in bioinformatics}, volume = {Chapter 15}, number = {}, pages = {15.3.1-15.3.15}, pmid = {22948728}, issn = {1934-340X}, support = {P41 HG004059/HG/NHGRI NIH HHS/United States ; R01 HG006102/HG/NHGRI NIH HHS/United States ; R01 HG006677/HG/NHGRI NIH HHS/United States ; R01 HG006677-12/HG/NHGRI NIH HHS/United States ; }, mesh = {Algorithms ; *Genotype ; Polymorphism, Single Nucleotide ; Sequence Analysis, DNA ; *Software ; }, abstract = {Crossbow is a scalable, portable, and automatic cloud computing tool for identifying SNPs from high-coverage, short-read resequencing data. It is built on Apache Hadoop, an implementation of the MapReduce software framework. Hadoop allows Crossbow to distribute read alignment and SNP calling subtasks over a cluster of commodity computers. Two robust tools, Bowtie and SOAPsnp, implement the fundamental alignment and variant calling operations respectively, and have demonstrated capabilities within Crossbow of analyzing approximately one billion short reads per hour on a commodity Hadoop cluster with 320 cores. Through protocol examples, this unit will demonstrate the use of Crossbow for identifying variations in three different operating modes: on a Hadoop cluster, on a single computer, and on the Amazon Elastic MapReduce cloud computing service.}, } @article {pmid22941994, year = {2012}, author = {Visser, T}, title = {Tutorial on academic high-performance cloud computing.}, journal = {Studies in health technology and informatics}, volume = {175}, number = {}, pages = {103}, pmid = {22941994}, issn = {0926-9630}, mesh = {*Databases, Factual ; Health Services Research/*methods ; Information Dissemination/*methods ; Information Storage and Retrieval/*methods ; *Internet ; *Universities ; }, abstract = {This documents shortly describes the background and structure of the academic high-performance cloud computing tutorial at the Healthgrid conference.}, } @article {pmid22941989, year = {2012}, author = {Carrión, A and Blanquer, I and Hernández, V}, title = {A service-based BLAST command tool supported by cloud infrastructures.}, journal = {Studies in health technology and informatics}, volume = {175}, number = {}, pages = {69-77}, pmid = {22941989}, issn = {0926-9630}, mesh = {*Algorithms ; Data Mining/*methods ; *Internet ; *Programming Languages ; Sequence Alignment/*methods ; Sequence Analysis/*methods ; *Software ; *User-Computer Interface ; }, abstract = {Notwithstanding the benefits of distributed-computing infrastructures for empowering bioinformatics analysis tools with the needed computing and storage capability, the actual use of these infrastructures is still low. Learning curves and deployment difficulties have reduced the impact on the wide research community. This article presents a porting strategy of BLAST based on a multiplatform client and a service that provides the same interface as sequential BLAST, thus reducing learning curve and with minimal impact on their integration on existing workflows. The porting has been done using the execution and data access components from the EC project Venus-C and the Windows Azure infrastructure provided in this project. The results obtained demonstrate a low overhead on the global execution framework and reasonable speed-up and cost-efficiency with respect to a sequential version.}, } @article {pmid22934238, year = {2012}, author = {Almeida, JS and Iriabho, EE and Gorrepati, VL and Wilkinson, SR and Grüneberg, A and Robbins, DE and Hackney, JR}, title = {ImageJS: Personalized, participated, pervasive, and reproducible image bioinformatics in the web browser.}, journal = {Journal of pathology informatics}, volume = {3}, number = {}, pages = {25}, pmid = {22934238}, issn = {2153-3539}, abstract = {BACKGROUND: Image bioinformatics infrastructure typically relies on a combination of server-side high-performance computing and client desktop applications tailored for graphic rendering. On the server side, matrix manipulation environments are often used as the back-end where deployment of specialized analytical workflows takes place. However, neither the server-side nor the client-side desktop solution, by themselves or combined, is conducive to the emergence of open, collaborative, computational ecosystems for image analysis that are both self-sustained and user driven.

MATERIALS AND METHODS: ImageJS was developed as a browser-based webApp, untethered from a server-side backend, by making use of recent advances in the modern web browser such as a very efficient compiler, high-end graphical rendering capabilities, and I/O tailored for code migration.

RESULTS: Multiple versioned code hosting services were used to develop distinct ImageJS modules to illustrate its amenability to collaborative deployment without compromise of reproducibility or provenance. The illustrative examples include modules for image segmentation, feature extraction, and filtering. The deployment of image analysis by code migration is in sharp contrast with the more conventional, heavier, and less safe reliance on data transfer. Accordingly, code and data are loaded into the browser by exactly the same script tag loading mechanism, which offers a number of interesting applications that would be hard to attain with more conventional platforms, such as NIH's popular ImageJ application.

CONCLUSIONS: The modern web browser was found to be advantageous for image bioinformatics in both the research and clinical environments. This conclusion reflects advantages in deployment scalability and analysis reproducibility, as well as the critical ability to deliver advanced computational statistical procedures machines where access to sensitive data is controlled, that is, without local "download and installation".}, } @article {pmid22926919, year = {2012}, author = {Chen, TS and Liu, CH and Chen, TL and Chen, CS and Bau, JG and Lin, TC}, title = {Secure Dynamic access control scheme of PHR in cloud computing.}, journal = {Journal of medical systems}, volume = {36}, number = {6}, pages = {4005-4020}, pmid = {22926919}, issn = {0148-5598}, mesh = {*Access to Information ; Algorithms ; *Computer Security ; Confidentiality ; *Electronic Health Records ; *Health Records, Personal ; Information Storage and Retrieval/*methods ; *Internet ; }, abstract = {With the development of information technology and medical technology, medical information has been developed from traditional paper records into electronic medical records, which have now been widely applied. The new-style medical information exchange system "personal health records (PHR)" is gradually developed. PHR is a kind of health records maintained and recorded by individuals. An ideal personal health record could integrate personal medical information from different sources and provide complete and correct personal health and medical summary through the Internet or portable media under the requirements of security and privacy. A lot of personal health records are being utilized. The patient-centered PHR information exchange system allows the public autonomously maintain and manage personal health records. Such management is convenient for storing, accessing, and sharing personal medical records. With the emergence of Cloud computing, PHR service has been transferred to storing data into Cloud servers that the resources could be flexibly utilized and the operation cost can be reduced. Nevertheless, patients would face privacy problem when storing PHR data into Cloud. Besides, it requires a secure protection scheme to encrypt the medical records of each patient for storing PHR into Cloud server. In the encryption process, it would be a challenge to achieve accurately accessing to medical records and corresponding to flexibility and efficiency. A new PHR access control scheme under Cloud computing environments is proposed in this study. With Lagrange interpolation polynomial to establish a secure and effective PHR information access scheme, it allows to accurately access to PHR with security and is suitable for enormous multi-users. Moreover, this scheme also dynamically supports multi-users in Cloud computing environments with personal privacy and offers legal authorities to access to PHR. From security and effectiveness analyses, the proposed PHR access scheme in Cloud computing environments is proven flexible and secure and could effectively correspond to real-time appending and deleting user access authorization and appending and revising PHR records.}, } @article {pmid22916831, year = {2012}, author = {Mohammed, Y and Mostovenko, E and Henneman, AA and Marissen, RJ and Deelder, AM and Palmblad, M}, title = {Cloud parallel processing of tandem mass spectrometry based proteomics data.}, journal = {Journal of proteome research}, volume = {11}, number = {10}, pages = {5101-5108}, doi = {10.1021/pr300561q}, pmid = {22916831}, issn = {1535-3907}, mesh = {Algorithms ; Blood Proteins/chemistry/isolation & purification ; Chromatography, Liquid ; Computer Communication Networks ; Data Compression ; Data Mining ; Electronic Data Processing ; Escherichia coli Proteins/chemistry/isolation & purification ; Humans ; Peptide Mapping/*methods ; Proteomics ; *Search Engine ; Tandem Mass Spectrometry/*methods ; }, abstract = {Data analysis in mass spectrometry based proteomics struggles to keep pace with the advances in instrumentation and the increasing rate of data acquisition. Analyzing this data involves multiple steps requiring diverse software, using different algorithms and data formats. Speed and performance of the mass spectral search engines are continuously improving, although not necessarily as needed to face the challenges of acquired big data. Improving and parallelizing the search algorithms is one possibility; data decomposition presents another, simpler strategy for introducing parallelism. We describe a general method for parallelizing identification of tandem mass spectra using data decomposition that keeps the search engine intact and wraps the parallelization around it. We introduce two algorithms for decomposing mzXML files and recomposing resulting pepXML files. This makes the approach applicable to different search engines, including those relying on sequence databases and those searching spectral libraries. We use cloud computing to deliver the computational power and scientific workflow engines to interface and automate the different processing steps. We show how to leverage these technologies to achieve faster data analysis in proteomics and present three scientific workflows for parallel database as well as spectral library search using our data decomposition programs, X!Tandem and SpectraST.}, } @article {pmid22894045, year = {2012}, author = {Savage, GT and van der Reis, L}, title = {A Dutch and American commentary on IT in health care: roundtable discussions on IT and innovations in health care.}, journal = {Advances in health care management}, volume = {12}, number = {}, pages = {61-74}, doi = {10.1108/s1474-8231(2012)0000012007}, pmid = {22894045}, issn = {1474-8231}, mesh = {Benchmarking ; Cost-Benefit Analysis ; Delivery of Health Care/*economics/*organization & administration ; Electronic Health Records ; Humans ; *Medical Informatics ; Netherlands ; Quality of Health Care ; Social Media/statistics & numerical data ; United States ; }, abstract = {PURPOSE: This chapter reports on experts' perspectives on health information technology (HIT) and how it may be used to improve health care quality and to lower health care costs.

DESIGN/METHODOLOGY/APPROACH: Two roundtables were convened that focused on how to best use HIT to improve the quality of health care while ensuring it is accessible and affordable. Participants drew upon lessons learned in the Netherlands, the United States, and other countries.

FINDINGS: The first roundtable focused on the use of (1) electronic health records (EHRs) by health care providers, (2) cloud computing for EHRs and health portals for consumers, and (3) data registries and networks for public health surveillance. The second roundtable highlighted (1) the rapid growth of personalized medicine, (2) the corresponding growth and sophistication of bioinformatics and analytics, (3) the increasing presence of mobile HIT, and (4) the disruptive changes in the institutional structures of biomedical research and development.

PRACTICAL IMPLICATIONS: Governmental sponsorship of small pilot projects to solve practicable health system problems would encourage HIT innovation among key stakeholders. However, large-scale HIT solutions developed through small pilot projects--should be pursued through public-private partnerships. At the same time, governments should speed up legislative and regulatory procedures to encourage adoption of cost-effective HIT innovations.

SOCIAL IMPLICATIONS: Mobile HIT and social media are capable of fostering disease prevention and encouraging personal responsibility for improving or stabilizing chronic diseases.

ORIGINALITY/VALUE: Both health services researchers and policy makers should find this chapter of value since it highlights trends in HIT and addresses how health care quality may be improved while costs are contained.}, } @article {pmid22875554, year = {2013}, author = {Silva, LA and Costa, C and Oliveira, JL}, title = {DICOM relay over the cloud.}, journal = {International journal of computer assisted radiology and surgery}, volume = {8}, number = {3}, pages = {323-333}, pmid = {22875554}, issn = {1861-6429}, mesh = {Computer Communication Networks/*organization & administration ; Humans ; *Information Storage and Retrieval ; Radiology Information Systems/*organization & administration ; Teleradiology/*organization & administration ; }, abstract = {PURPOSE: Healthcare institutions worldwide have adopted picture archiving and communication system (PACS) for enterprise access to images, relying on Digital Imaging Communication in Medicine (DICOM) standards for data exchange. However, communication over a wider domain of independent medical institutions is not well standardized. A DICOM-compliant bridge was developed for extending and sharing DICOM services across healthcare institutions without requiring complex network setups or dedicated communication channels.

METHODS: A set of DICOM routers interconnected through a public cloud infrastructure was implemented to support medical image exchange among institutions. Despite the advantages of cloud computing, new challenges were encountered regarding data privacy, particularly when medical data are transmitted over different domains. To address this issue, a solution was introduced by creating a ciphered data channel between the entities sharing DICOM services.

RESULTS: Two main DICOM services were implemented in the bridge: Storage and Query/Retrieve. The performance measures demonstrated it is quite simple to exchange information and processes between several institutions. The solution can be integrated with any currently installed PACS-DICOM infrastructure. This method works transparently with well-known cloud service providers.

CONCLUSIONS: Cloud computing was introduced to augment enterprise PACS by providing standard medical imaging services across different institutions, offering communication privacy and enabling creation of wider PACS scenarios with suitable technical solutions.}, } @article {pmid22874270, year = {2012}, author = {Ribeiro, LS and Costa, C and Oliveira, JL}, title = {Enhancing the many-to-many relations across IHE document sharing communities.}, journal = {Studies in health technology and informatics}, volume = {180}, number = {}, pages = {641-645}, pmid = {22874270}, issn = {0926-9630}, mesh = {*Database Management Systems ; *Electronic Health Records ; *Health Records, Personal ; Information Storage and Retrieval/*methods ; Medical Record Linkage/*methods ; Patient-Centered Care/*methods ; Systems Integration ; }, abstract = {The Integrating Healthcare Enterprise (IHE) initiative is an ongoing project aiming to enable true inter-site interoperability in the health IT field. IHE is a work in progress and many challenges need to be overcome before the healthcare Institutions may share patient clinical records transparently and effortless. Configuring, deploying and testing an IHE document sharing community requires a significant effort to plan and maintain the supporting IT infrastructure. With the new paradigm of cloud computing is now possible to launch software devices on demand and paying accordantly to the usage. This paper presents a framework designed with purpose of expediting the creation of IHE document sharing communities. It provides semi-ready templates of sharing communities that will be customized according the community needs. The framework is a meeting point of the healthcare institutions, creating a favourable environment that might converge in new inter-institutional professional relationships and eventually the creation of new Affinity Domains.}, } @article {pmid22874241, year = {2012}, author = {Viana-Ferreira, C and Ferreira, D and Valente, F and Monteiro, E and Costa, C and Oliveira, JL}, title = {Dicoogle Mobile: a medical imaging platform for Android.}, journal = {Studies in health technology and informatics}, volume = {180}, number = {}, pages = {502-506}, pmid = {22874241}, issn = {0926-9630}, mesh = {Cell Phone ; *Computers, Handheld ; Data Mining/*methods ; *Database Management Systems ; Internet ; Portugal ; Programming Languages ; *Radiology Information Systems ; *Software ; Teleradiology/*methods ; *User-Computer Interface ; }, abstract = {Mobile computing technologies are increasingly becoming a valuable asset in healthcare information systems. The adoption of these technologies helps to assist in improving quality of care, increasing productivity and facilitating clinical decision support. They provide practitioners with ubiquitous access to patient records, being actually an important component in telemedicine and tele-work environments. We have developed Dicoogle Mobile, an Android application that provides remote access to distributed medical imaging data through a cloud relay service. Besides, this application has the capability to store and index local imaging data, so that they can also be searched and visualized. In this paper, we will describe Dicoogle Mobile concept as well the architecture of the whole system that makes it running.}, } @article {pmid22874196, year = {2012}, author = {Vida, MM and Lupşe, OS and Stoicu-Tivadar, L and Bernad, E}, title = {Flexible solution for interoperable cloud healthcare systems.}, journal = {Studies in health technology and informatics}, volume = {180}, number = {}, pages = {280-284}, pmid = {22874196}, issn = {0926-9630}, mesh = {Databases, Factual/*standards ; Delivery of Health Care/*standards ; *Health Status ; Information Systems/*standards ; Internet/*standards ; Romania ; }, abstract = {It is extremely important for the healthcare domain to have a standardized communication because will improve the quality of information and in the end the resulting benefits will improve the quality of patients' life. The standards proposed to be used are: HL7 CDA and CCD. For a better access to the medical data a solution based on cloud computing (CC) is investigated. CC is a technology that supports flexibility, seamless care, and reduced costs of the medical act. To ensure interoperability between healthcare information systems a solution creating a Web Custom Control is presented. The control shows the database tables and fields used to configure the two standards. This control will facilitate the work of the medical staff and hospital administrators, because they can configure the local system easily and prepare it for communication with other systems. The resulted information will have a higher quality and will provide knowledge that will support better patient management and diagnosis.}, } @article {pmid22870267, year = {2012}, author = {Fischer, M and Snajder, R and Pabinger, S and Dander, A and Schossig, A and Zschocke, J and Trajanoski, Z and Stocker, G}, title = {SIMPLEX: cloud-enabled pipeline for the comprehensive analysis of exome sequencing data.}, journal = {PloS one}, volume = {7}, number = {8}, pages = {e41948}, pmid = {22870267}, issn = {1932-6203}, mesh = {*Exome ; *Internet ; *Polymorphism, Single Nucleotide ; Sequence Alignment/instrumentation/*methods ; Sequence Analysis, DNA/instrumentation/*methods ; *Software ; }, abstract = {In recent studies, exome sequencing has proven to be a successful screening tool for the identification of candidate genes causing rare genetic diseases. Although underlying targeted sequencing methods are well established, necessary data handling and focused, structured analysis still remain demanding tasks. Here, we present a cloud-enabled autonomous analysis pipeline, which comprises the complete exome analysis workflow. The pipeline combines several in-house developed and published applications to perform the following steps: (a) initial quality control, (b) intelligent data filtering and pre-processing, (c) sequence alignment to a reference genome, (d) SNP and DIP detection, (e) functional annotation of variants using different approaches, and (f) detailed report generation during various stages of the workflow. The pipeline connects the selected analysis steps, exposes all available parameters for customized usage, performs required data handling, and distributes computationally expensive tasks either on a dedicated high-performance computing infrastructure or on the Amazon cloud environment (EC2). The presented application has already been used in several research projects including studies to elucidate the role of rare genetic diseases. The pipeline is continuously tested and is publicly available under the GPL as a VirtualBox or Cloud image at http://simplex.i-med.ac.at; additional supplementary data is provided at http://www.icbi.at/exome.}, } @article {pmid22868590, year = {2012}, author = {Maratt, JD and Srinivasan, RC and Dahl, WJ and Schilling, PL and Urquhart, AG}, title = {Cloud-based preoperative planning for total hip arthroplasty: a study of accuracy, efficiency, and compliance.}, journal = {Orthopedics}, volume = {35}, number = {8}, pages = {682-686}, doi = {10.3928/01477447-20120725-05}, pmid = {22868590}, issn = {1938-2367}, mesh = {Arthroplasty, Replacement, Hip/instrumentation/*standards ; Hip Prosthesis ; Humans ; Internet/*standards ; Preoperative Care/*standards ; Radiographic Image Enhancement/*standards ; Radiographic Image Interpretation, Computer-Assisted/standards ; Retrospective Studies ; }, abstract = {As digital radiography becomes more prevalent, several systems for digital preoperative planning have become available. The purpose of this study was to evaluate the accuracy and efficiency of an inexpensive, cloud-based digital templating system, which is comparable with acetate templating. However, cloud-based templating is substantially faster and more convenient than acetate templating or locally installed software. Although this is a practical solution for this particular medical application, regulatory changes are necessary before the tremendous advantages of cloud-based storage and computing can be realized in medical research and clinical practice.}, } @article {pmid22865161, year = {2012}, author = {Karthikeyan, N and Sukanesh, R}, title = {Cloud based emergency health care information service in India.}, journal = {Journal of medical systems}, volume = {36}, number = {6}, pages = {4031-4036}, pmid = {22865161}, issn = {0148-5598}, mesh = {*Access to Information ; Biometric Identification ; Computer Security ; *Emergency Medical Services ; Humans ; Image Processing, Computer-Assisted ; India ; Information Storage and Retrieval/*methods ; *Internet ; Medical Informatics ; Software ; }, abstract = {A hospital is a health care organization providing patient treatment by expert physicians, surgeons and equipments. A report from a health care accreditation group says that miscommunication between patients and health care providers is the reason for the gap in providing emergency medical care to people in need. In developing countries, illiteracy is the major key root for deaths resulting from uncertain diseases constituting a serious public health problem. Mentally affected, differently abled and unconscious patients can't communicate about their medical history to the medical practitioners. Also, Medical practitioners can't edit or view DICOM images instantly. Our aim is to provide palm vein pattern recognition based medical record retrieval system, using cloud computing for the above mentioned people. Distributed computing technology is coming in the new forms as Grid computing and Cloud computing. These new forms are assured to bring Information Technology (IT) as a service. In this paper, we have described how these new forms of distributed computing will be helpful for modern health care industries. Cloud Computing is germinating its benefit to industrial sectors especially in medical scenarios. In Cloud Computing, IT-related capabilities and resources are provided as services, via the distributed computing on-demand. This paper is concerned with sprouting software as a service (SaaS) by means of Cloud computing with an aim to bring emergency health care sector in an umbrella with physical secured patient records. In framing the emergency healthcare treatment, the crucial thing considered necessary to decide about patients is their previous health conduct records. Thus a ubiquitous access to appropriate records is essential. Palm vein pattern recognition promises a secured patient record access. Likewise our paper reveals an efficient means to view, edit or transfer the DICOM images instantly which was a challenging task for medical practitioners in the past years. We have developed two services for health care. 1. Cloud based Palm vein recognition system 2. Distributed Medical image processing tools for medical practitioners.}, } @article {pmid22846423, year = {2012}, author = {Jaschob, D and Riffle, M}, title = {JobCenter: an open source, cross-platform, and distributed job queue management system optimized for scalability and versatility.}, journal = {Source code for biology and medicine}, volume = {7}, number = {1}, pages = {8}, pmid = {22846423}, issn = {1751-0473}, abstract = {BACKGROUND: Laboratories engaged in computational biology or bioinformatics frequently need to run lengthy, multistep, and user-driven computational jobs. Each job can tie up a computer for a few minutes to several days, and many laboratories lack the expertise or resources to build and maintain a dedicated computer cluster.

RESULTS: JobCenter is a client-server application and framework for job management and distributed job execution. The client and server components are both written in Java and are cross-platform and relatively easy to install. All communication with the server is client-driven, which allows worker nodes to run anywhere (even behind external firewalls or "in the cloud") and provides inherent load balancing. Adding a worker node to the worker pool is as simple as dropping the JobCenter client files onto any computer and performing basic configuration, which provides tremendous ease-of-use, flexibility, and limitless horizontal scalability. Each worker installation may be independently configured, including the types of jobs it is able to run. Executed jobs may be written in any language and may include multistep workflows.

CONCLUSIONS: JobCenter is a versatile and scalable distributed job management system that allows laboratories to very efficiently distribute all computational work among available resources. JobCenter is freely available at http://code.google.com/p/jobcenter/.}, } @article {pmid22838382, year = {2012}, author = {Hsieh, JC and Hsu, MW}, title = {A cloud computing based 12-lead ECG telemedicine service.}, journal = {BMC medical informatics and decision making}, volume = {12}, number = {}, pages = {77}, pmid = {22838382}, issn = {1472-6947}, mesh = {*Decision Making, Computer-Assisted ; *Electrocardiography ; Humans ; Quality of Health Care ; Taiwan ; *Telemedicine ; }, abstract = {BACKGROUND: Due to the great variability of 12-lead ECG instruments and medical specialists' interpretation skills, it remains a challenge to deliver rapid and accurate 12-lead ECG reports with senior cardiologists' decision making support in emergency telecardiology.

METHODS: We create a new cloud and pervasive computing based 12-lead Electrocardiography (ECG) service to realize ubiquitous 12-lead ECG tele-diagnosis.

RESULTS: This developed service enables ECG to be transmitted and interpreted via mobile phones. That is, tele-consultation can take place while the patient is on the ambulance, between the onsite clinicians and the off-site senior cardiologists, or among hospitals. Most importantly, this developed service is convenient, efficient, and inexpensive.

CONCLUSIONS: This cloud computing based ECG tele-consultation service expands the traditional 12-lead ECG applications onto the collaboration of clinicians at different locations or among hospitals. In short, this service can greatly improve medical service quality and efficiency, especially for patients in rural areas. This service has been evaluated and proved to be useful by cardiologists in Taiwan.}, } @article {pmid22743228, year = {2012}, author = {Habegger, L and Balasubramanian, S and Chen, DZ and Khurana, E and Sboner, A and Harmanci, A and Rozowsky, J and Clarke, D and Snyder, M and Gerstein, M}, title = {VAT: a computational framework to functionally annotate variants in personal genomes within a cloud-computing environment.}, journal = {Bioinformatics (Oxford, England)}, volume = {28}, number = {17}, pages = {2267-2269}, pmid = {22743228}, issn = {1367-4811}, mesh = {Genetic Variation ; *Genome, Human ; Genomics/*methods ; Genotype ; Humans ; Information Storage and Retrieval/*methods ; Internet ; Molecular Sequence Annotation/*methods ; *Software ; }, abstract = {UNLABELLED: The functional annotation of variants obtained through sequencing projects is generally assumed to be a simple intersection of genomic coordinates with genomic features. However, complexities arise for several reasons, including the differential effects of a variant on alternatively spliced transcripts, as well as the difficulty in assessing the impact of small insertions/deletions and large structural variants. Taking these factors into consideration, we developed the Variant Annotation Tool (VAT) to functionally annotate variants from multiple personal genomes at the transcript level as well as obtain summary statistics across genes and individuals. VAT also allows visualization of the effects of different variants, integrates allele frequencies and genotype data from the underlying individuals and facilitates comparative analysis between different groups of individuals. VAT can either be run through a command-line interface or as a web application. Finally, in order to enable on-demand access and to minimize unnecessary transfers of large data files, VAT can be run as a virtual machine in a cloud-computing environment.

VAT is implemented in C and PHP. The VAT web service, Amazon Machine Image, source code and detailed documentation are available at vat.gersteinlab.org.}, } @article {pmid22734722, year = {2012}, author = {Shanker, A}, title = {Genome research in the cloud.}, journal = {Omics : a journal of integrative biology}, volume = {16}, number = {7-8}, pages = {422-428}, doi = {10.1089/omi.2012.0001}, pmid = {22734722}, issn = {1557-8100}, mesh = {Base Sequence ; *Genome, Human ; Genomics ; High-Throughput Nucleotide Sequencing ; Humans ; *Information Storage and Retrieval/economics/trends ; Online Systems ; Software ; }, abstract = {High-throughput genome research has long been associated with bioinformatics, as it assists genome sequencing and annotation projects. Along with databases, to store, properly manage, and retrieve biological data, a large number of computational tools have been developed to decode biological information from this data. However, with the advent of next-generation sequencing (NGS) technology the sequence data starts generating at a pace never before seen. Consequently researchers are facing a threat as they are experiencing a potential shortage of storage space and tools to analyze the data. Moreover, the voluminous data increases traffic in the network by uploading and downloading large data sets, and thus consume much of the network's available bandwidth. All of these obstacles have led to the solution in the form of cloud computing.}, } @article {pmid22692258, year = {2012}, author = {Lopez, MH and Holve, E and Sarkar, IN and Segal, C}, title = {Building the informatics infrastructure for comparative effectiveness research (CER): a review of the literature.}, journal = {Medical care}, volume = {50 Suppl}, number = {}, pages = {S38-48}, doi = {10.1097/MLR.0b013e318259becd}, pmid = {22692258}, issn = {1537-1948}, support = {U13HS19564-01/HS/AHRQ HHS/United States ; }, mesh = {Comparative Effectiveness Research/*organization & administration ; Computer Security ; Confidentiality ; Humans ; Information Storage and Retrieval/standards ; Informed Consent ; Medical Informatics/*organization & administration ; Medical Records Systems, Computerized ; }, abstract = {BACKGROUND: Technological advances in clinical informatics have made large amounts of data accessible and potentially useful for research. As a result, a burgeoning literature addresses efforts to bridge the fields of health services research and biomedical informatics. The Electronic Data Methods Forum review examines peer-reviewed literature at the intersection of comparative effectiveness research and clinical informatics. The authors are specifically interested in characterizing this literature and identifying cross-cutting themes and gaps in the literature.

METHODS: A 3-step systematic literature search was conducted, including a structured search of PubMed, manual reviews of articles from selected publication lists, and manual reviews of research activities based on prospective electronic clinical data. Two thousand four hundred thirty-five citations were identified as potentially relevant. Ultimately, a full-text review was performed for 147 peer-reviewed papers.

RESULTS: One hundred thirty-two articles were selected for inclusion in the review. Of these, 88 articles are the focus of the discussion in this paper. Three types of articles were identified, including papers that: (1) provide historical context or frameworks for using clinical informatics for research, (2) describe platforms and projects, and (3) discuss issues, challenges, and applications of natural language processing. In addition, 2 cross-cutting themes emerged: the challenges of conducting research in the absence of standardized ontologies and data collection; and unique data governance concerns related to the transfer, storage, deidentification, and access to electronic clinical data. Finally, the authors identified several current gaps on important topics such as the use of clinical informatics for cohort identification, cloud computing, and single point access to research data.}, } @article {pmid22711774, year = {2012}, author = {Hsu, CY and Lu, CS and Pei, SC}, title = {Image feature extraction in encrypted domain with privacy-preserving SIFT.}, journal = {IEEE transactions on image processing : a publication of the IEEE Signal Processing Society}, volume = {21}, number = {11}, pages = {4593-4607}, doi = {10.1109/TIP.2012.2204272}, pmid = {22711774}, issn = {1941-0042}, abstract = {Privacy has received considerable attention but is still largely ignored in the multimedia community. Consider a cloud computing scenario where the server is resource-abundant, and is capable of finishing the designated tasks. It is envisioned that secure media applications with privacy preservation will be treated seriously. In view of the fact that scale-invariant feature transform (SIFT) has been widely adopted in various fields, this paper is the first to target the importance of privacy-preserving SIFT (PPSIFT) and to address the problem of secure SIFT feature extraction and representation in the encrypted domain. As all of the operations in SIFT must be moved to the encrypted domain, we propose a privacy-preserving realization of the SIFT method based on homomorphic encryption. We show through the security analysis based on the discrete logarithm problem and RSA that PPSIFT is secure against ciphertext only attack and known plaintext attack. Experimental results obtained from different case studies demonstrate that the proposed homomorphic encryption-based privacy-preserving SIFT performs comparably to the original SIFT and that our method is useful in SIFT-based privacy-preserving applications.}, } @article {pmid22700313, year = {2012}, author = {Afgan, E and Chapman, B and Jadan, M and Franke, V and Taylor, J}, title = {Using cloud computing infrastructure with CloudBioLinux, CloudMan, and Galaxy.}, journal = {Current protocols in bioinformatics}, volume = {Chapter 11}, number = {}, pages = {11.9.1-11.9.20}, pmid = {22700313}, issn = {1934-340X}, support = {R21 HG005133/HG/NHGRI NIH HHS/United States ; RC2 HG005542/HG/NHGRI NIH HHS/United States ; }, mesh = {Cluster Analysis ; Computational Biology/*methods ; *Internet ; *Software ; }, abstract = {Cloud computing has revolutionized availability and access to computing and storage resources, making it possible to provision a large computational infrastructure with only a few clicks in a Web browser. However, those resources are typically provided in the form of low-level infrastructure components that need to be procured and configured before use. In this unit, we demonstrate how to utilize cloud computing resources to perform open-ended bioinformatic analyses, with fully automated management of the underlying cloud infrastructure. By combining three projects, CloudBioLinux, CloudMan, and Galaxy, into a cohesive unit, we have enabled researchers to gain access to more than 100 preconfigured bioinformatics tools and gigabytes of reference genomes on top of the flexible cloud computing infrastructure. The protocol demonstrates how to set up the available infrastructure and how to use the tools via a graphical desktop interface, a parallel command-line interface, and the Web-based Galaxy interface.}, } @article {pmid22699871, year = {2012}, author = {Krestin, GP and Grenier, PA and Hricak, H and Jackson, VP and Khong, PL and Miller, JC and Muellner, A and Schwaiger, M and Thrall, JH}, title = {Integrated diagnostics: proceedings from the 9th biennial symposium of the International Society for Strategic Studies in Radiology.}, journal = {European radiology}, volume = {22}, number = {11}, pages = {2283-2294}, pmid = {22699871}, issn = {1432-1084}, mesh = {Algorithms ; Biomarkers/metabolism ; Computer Systems ; Decision Support Systems, Clinical ; Diagnostic Imaging/*methods/trends ; Europe ; Humans ; International Cooperation ; Medical Informatics/methods ; Molecular Imaging/methods ; Nanoparticles/chemistry ; Radiology/*methods/*trends ; Societies, Medical ; }, abstract = {The International Society for Strategic Studies in Radiology held its 9th biennial meeting in August 2011. The focus of the programme was integrated diagnostics and massive computing. Participants discussed the opportunities, challenges, and consequences for the discipline of radiology that will likely arise from the integration of diagnostic technologies. Diagnostic technologies are increasing in scope, including advanced imaging techniques, new molecular imaging agents, and sophisticated point-of-use devices. Advanced information technology (IT), which is increasingly influencing the practice of medicine, will aid clinical communication and the development of "population images" that represent the phenotype of particular diseases, which will aid the development of diagnostic algorithms. Integrated diagnostics offer increased operational efficiency and benefits to patients through quicker and more accurate diagnoses. As physicians with the most expertise in IT, radiologists are well placed to take the lead in introducing IT solutions and cloud computing to promote integrated diagnostics. To achieve this, radiologists must adapt to include quantitative data on biomarkers in their reports. Radiologists must also increase their role as participating physicians, collaborating with other medical specialties, not only to avoid being sidelined by other specialties but also to better prepare as leaders in the selection and sequence of diagnostic procedures. Key Points • New diagnostic technologies are yielding unprecedented amounts of diagnostic information.• Advanced IT/cloud computing will aid integration and analysis of diagnostic data.• Better diagnostic algorithms will lead to faster diagnosis and more rapid treatment.}, } @article {pmid22684960, year = {2012}, author = {Deck, J and Gross, J and Stones-Havas, S and Davies, N and Shapley, R and Meyer, C}, title = {Field information management systems for DNA barcoding.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {858}, number = {}, pages = {255-267}, doi = {10.1007/978-1-61779-591-6_12}, pmid = {22684960}, issn = {1940-6029}, mesh = {DNA Barcoding, Taxonomic/*methods/standards ; *Database Management Systems ; *Information Management/standards ; *Information Systems/standards ; }, abstract = {Information capture pertaining to the "what?", "where?", and "when?" of biodiversity data is critical to maintain data integrity, interoperability, and utility. Moreover, DNA barcoding and other biodiversity studies must adhere to agreed upon data standards in order to effectively contextualize the biota encountered. A field information management system (FIMS) is presented that locks down metadata associated with collecting events, specimens, and tissues. Emphasis is placed on ease of use and flexibility of operation. Standardized templates for data entry are validated through a flexible, project-oriented validation process that assures adherence to data standards and thus data quality. Furthermore, we provide export functionality to existing cloud-based solutions, including Google Fusion Tables and Flickr to allow sharing of these data elements across research collaboration teams and other potential data harvesters via API services.}, } @article {pmid22679743, year = {2012}, author = {Pechette, JM}, title = {Transforming health care through cloud computing.}, journal = {Health care law monthly}, volume = {2012}, number = {5}, pages = {2-12}, pmid = {22679743}, issn = {1526-0704}, mesh = {*Computer Communication Networks ; Computer Security ; Confidentiality/*legislation & jurisprudence ; Efficiency, Organizational ; Electronic Health Records ; Health Insurance Portability and Accountability Act ; Humans ; *Information Storage and Retrieval ; Systems Integration ; United States ; }, } @article {pmid22668792, year = {2012}, author = {Lee, H and Schatz, MC}, title = {Genomic dark matter: the reliability of short read mapping illustrated by the genome mappability score.}, journal = {Bioinformatics (Oxford, England)}, volume = {28}, number = {16}, pages = {2097-2105}, pmid = {22668792}, issn = {1367-4811}, support = {R01 HG006677/HG/NHGRI NIH HHS/United States ; R01-HG006677-12/HG/NHGRI NIH HHS/United States ; }, mesh = {Algorithms ; Animals ; Chromosome Mapping/*methods ; Computational Biology/*methods ; Computer Simulation ; Drosophila melanogaster/genetics ; Genomics/*methods ; Humans ; Mice ; Reproducibility of Results ; Saccharomyces cerevisiae/genetics ; Sequence Analysis, DNA/*methods ; *Software ; }, abstract = {MOTIVATION: Genome resequencing and short read mapping are two of the primary tools of genomics and are used for many important applications. The current state-of-the-art in mapping uses the quality values and mapping quality scores to evaluate the reliability of the mapping. These attributes, however, are assigned to individual reads and do not directly measure the problematic repeats across the genome. Here, we present the Genome Mappability Score (GMS) as a novel measure of the complexity of resequencing a genome. The GMS is a weighted probability that any read could be unambiguously mapped to a given position and thus measures the overall composition of the genome itself.

RESULTS: We have developed the Genome Mappability Analyzer to compute the GMS of every position in a genome. It leverages the parallelism of cloud computing to analyze large genomes, and enabled us to identify the 5-14% of the human, mouse, fly and yeast genomes that are difficult to analyze with short reads. We examined the accuracy of the widely used BWA/SAMtools polymorphism discovery pipeline in the context of the GMS, and found discovery errors are dominated by false negatives, especially in regions with poor GMS. These errors are fundamental to the mapping process and cannot be overcome by increasing coverage. As such, the GMS should be considered in every resequencing project to pinpoint the 'dark matter' of the genome, including of known clinically relevant variations in these regions.

AVAILABILITY: The source code and profiles of several model organisms are available at http://gma-bio.sourceforge.net}, } @article {pmid22633884, year = {2012}, author = {Das, A and Gupta, AK and Mazumder, TN}, title = {A comprehensive risk assessment framework for offsite transportation of inflammable hazardous waste.}, journal = {Journal of hazardous materials}, volume = {227-228}, number = {}, pages = {88-96}, doi = {10.1016/j.jhazmat.2012.05.014}, pmid = {22633884}, issn = {1873-3336}, mesh = {*Chemical Hazard Release ; Drug Industry ; Explosions ; *Hazardous Waste ; Humans ; India ; Industrial Waste ; Models, Theoretical ; Risk Assessment ; *Transportation ; }, abstract = {A framework for risk assessment due to offsite transportation of hazardous wastes is designed based on the type of event that can be triggered from an accident of a hazardous waste carrier. The objective of this study is to design a framework for computing the risk to population associated with offsite transportation of inflammable and volatile wastes. The framework is based on traditional definition of risk and is designed for conditions where accident databases are not available. The probability based variable in risk assessment framework is substituted by a composite accident index proposed in this study. The framework computes the impacts due to a volatile cloud explosion based on TNO Multi-energy model. The methodology also estimates the vulnerable population in terms of disability adjusted life years (DALY) which takes into consideration the demographic profile of the population and the degree of injury on mortality and morbidity sustained. The methodology is illustrated using a case study of a pharmaceutical industry in the Kolkata metropolitan area.}, } @article {pmid22607956, year = {2012}, author = {Patel, RP}, title = {Cloud computing and virtualization technology in radiology.}, journal = {Clinical radiology}, volume = {67}, number = {11}, pages = {1095-1100}, doi = {10.1016/j.crad.2012.03.010}, pmid = {22607956}, issn = {1365-229X}, mesh = {*Computer Communication Networks ; Computer Security ; Humans ; Medical Informatics Applications ; Medical Informatics Computing ; *Radiology/methods ; *User-Computer Interface ; }, } @article {pmid22564561, year = {2012}, author = {Shih, FJ and Fan, YW and Chiu, CM and Shih, FJ and Wang, SS}, title = {The dilemma of "to be or not to be": developing electronically e-health & cloud computing documents for overseas transplant patients from Taiwan organ transplant health professionals' perspective.}, journal = {Transplantation proceedings}, volume = {44}, number = {4}, pages = {835-838}, doi = {10.1016/j.transproceed.2012.02.001}, pmid = {22564561}, issn = {1873-2623}, mesh = {*Access to Information ; *Asian People/psychology/statistics & numerical data ; Attitude of Health Personnel/*ethnology ; Cooperative Behavior ; *Delivery of Health Care, Integrated/organization & administration/statistics & numerical data ; *Electronic Health Records/organization & administration/statistics & numerical data ; Health Knowledge, Attitudes, Practice/*ethnology ; Humans ; Information Systems/organization & administration/statistics & numerical data ; Interinstitutional Relations ; International Cooperation ; *Medical Tourism/statistics & numerical data ; Models, Organizational ; *Organ Transplantation/statistics & numerical data ; Patient Care Team ; Quality of Health Care ; Taiwan ; }, abstract = {AIMS: The development of mutually accessible e-health documents (ehD) and cloud computing (CC) for overseas organ transplant health professionals (OTHP) in two medical parties (domestic and overseas) would ensure better quality of care. This project attempted to compare pro and con arguments from the perspective of Taiwan's OTHP.

METHODS: A sample was obtained from three leading medical centers in Taiwan.

RESULTS: Eighty subjects including transplant surgeons (n = 20), registered nurses (RN; n = 30), coordinating nurses (OTCN; n = 15), and e-health information and communication technologies experts (ehICTs; n = 15) participated in this research. The pros of developing ehD were: (1) better and continuous care through communication and cooperation in two parties (78%); (2) better collaborative efforts between health professionals, information technology experts in two medical parties is (74%); (3) easier retrieval and communication of personal health documents with the trustworthy OTHP in the different countries (71%); and (4) CC may help develop transplant patients medical cloud based on the collaboration between medical systems in political parties of Taiwan and mainland China (69%). The cons of developing ehD and CC included: (1) inadequate knowledge of benefits and manuals of developing ehD and CC (75%); (2) no reliable communication avenues in developing ehD and CC (73%); (3) increased workload in direct care and documentation in developing new ehD and CC (70%); (4) lack of coaching and accreditation systems in medical, electronic, and law aspects to settle discrepancies in medical diagnosis and treatment protocols between two parties (68%); and (5) lacking systematic ehD and CC plans developed by interdisciplinary teams in two parties (60%).

CONCLUSION: In this initial phase, the establishment of an interdisciplinary team including transplant leaders, transplant surgeon, RN, OTCN, ehICTs, and law experts from two parties might be helpful in working out developing plans with careful monitoring mechanisms.}, } @article {pmid22559942, year = {2012}, author = {Abouelhoda, M and Issa, SA and Ghanem, M}, title = {Tavaxy: integrating Taverna and Galaxy workflows with cloud computing support.}, journal = {BMC bioinformatics}, volume = {13}, number = {}, pages = {77}, pmid = {22559942}, issn = {1471-2105}, mesh = {Computational Biology/*methods ; *Internet ; Metagenomics ; Sequence Analysis ; Software ; *Systems Integration ; User-Computer Interface ; *Workflow ; }, abstract = {BACKGROUND: Over the past decade the workflow system paradigm has evolved as an efficient and user-friendly approach for developing complex bioinformatics applications. Two popular workflow systems that have gained acceptance by the bioinformatics community are Taverna and Galaxy. Each system has a large user-base and supports an ever-growing repository of application workflows. However, workflows developed for one system cannot be imported and executed easily on the other. The lack of interoperability is due to differences in the models of computation, workflow languages, and architectures of both systems. This lack of interoperability limits sharing of workflows between the user communities and leads to duplication of development efforts.

RESULTS: In this paper, we present Tavaxy, a stand-alone system for creating and executing workflows based on using an extensible set of re-usable workflow patterns. Tavaxy offers a set of new features that simplify and enhance the development of sequence analysis applications: It allows the integration of existing Taverna and Galaxy workflows in a single environment, and supports the use of cloud computing capabilities. The integration of existing Taverna and Galaxy workflows is supported seamlessly at both run-time and design-time levels, based on the concepts of hierarchical workflows and workflow patterns. The use of cloud computing in Tavaxy is flexible, where the users can either instantiate the whole system on the cloud, or delegate the execution of certain sub-workflows to the cloud infrastructure.

CONCLUSIONS: Tavaxy reduces the workflow development cycle by introducing the use of workflow patterns to simplify workflow creation. It enables the re-use and integration of existing (sub-) workflows from Taverna and Galaxy, and allows the creation of hybrid workflows. Its additional features exploit recent advances in high performance cloud computing to cope with the increasing data size and complexity of analysis.The system can be accessed either through a cloud-enabled web-interface or downloaded and installed to run within the user's local environment. All resources related to Tavaxy are available at http://www.tavaxy.org.}, } @article {pmid22551205, year = {2012}, author = {Almeida, JS and Grüneberg, A and Maass, W and Vinga, S}, title = {Fractal MapReduce decomposition of sequence alignment.}, journal = {Algorithms for molecular biology : AMB}, volume = {7}, number = {1}, pages = {12}, pmid = {22551205}, issn = {1748-7188}, abstract = {BACKGROUND: The dramatic fall in the cost of genomic sequencing, and the increasing convenience of distributed cloud computing resources, positions the MapReduce coding pattern as a cornerstone of scalable bioinformatics algorithm development. In some cases an algorithm will find a natural distribution via use of map functions to process vectorized components, followed by a reduce of aggregate intermediate results. However, for some data analysis procedures such as sequence analysis, a more fundamental reformulation may be required.

RESULTS: In this report we describe a solution to sequence comparison that can be thoroughly decomposed into multiple rounds of map and reduce operations. The route taken makes use of iterated maps, a fractal analysis technique, that has been found to provide a "alignment-free" solution to sequence analysis and comparison. That is, a solution that does not require dynamic programming, relying on a numeric Chaos Game Representation (CGR) data structure. This claim is demonstrated in this report by calculating the length of the longest similar segment by inspecting only the USM coordinates of two analogous units: with no resort to dynamic programming.

CONCLUSIONS: The procedure described is an attempt at extreme decomposition and parallelization of sequence alignment in anticipation of a volume of genomic sequence data that cannot be met by current algorithmic frameworks. The solution found is delivered with a browser-based application (webApp), highlighting the browser's emergence as an environment for high performance distributed computing.

AVAILABILITY: Public distribution of accompanying software library with open source and version control at http://usm.github.com. Also available as a webApp through Google Chrome's WebStore http://chrome.google.com/webstore: search with "usm".}, } @article {pmid22514969, year = {2012}, author = {Koch, P}, title = {Benefits of cloud computing for PACS and archiving.}, journal = {Radiology management}, volume = {34}, number = {2}, pages = {16-9; quiz 21-2}, pmid = {22514969}, issn = {0198-7097}, mesh = {Education, Continuing ; Information Storage and Retrieval/*methods ; *Internet ; Radiology Information Systems/*organization & administration ; United States ; }, abstract = {The goal of cloud-based services is to provide easy, scalable access to computing resources and IT services. The healthcare industry requires a private cloud that adheres to government mandates designed to ensure privacy and security of patient data while enabling access by authorized users. Cloud-based computing in the imaging market has evolved from a service that provided cost effective disaster recovery for archived data to fully featured PACS and vendor neutral archiving services that can address the needs of healthcare providers of all sizes. Healthcare providers worldwide are now using the cloud to distribute images to remote radiologists while supporting advanced reading tools, deliver radiology reports and imaging studies to referring physicians, and provide redundant data storage. Vendor managed cloud services eliminate large capital investments in equipment and maintenance, as well as staffing for the data center--creating a reduction in total cost of ownership for the healthcare provider.}, } @article {pmid22505808, year = {2012}, author = {van den Hurk, AF and Hall-Mendelin, S and Johansen, CA and Warrilow, D and Ritchie, SA}, title = {Evolution of mosquito-based arbovirus surveillance systems in Australia.}, journal = {Journal of biomedicine & biotechnology}, volume = {2012}, number = {}, pages = {325659}, pmid = {22505808}, issn = {1110-7251}, mesh = {Animals ; Arbovirus Infections/*epidemiology/*veterinary ; Arboviruses/*isolation & purification ; Australia/epidemiology ; Culicidae/*virology ; Humans ; Sentinel Surveillance/*veterinary ; }, abstract = {Control of arboviral disease is dependent on the sensitive and timely detection of elevated virus activity or the identification of emergent or exotic viruses. The emergence of Japanese encephalitis virus (JEV) in northern Australia revealed numerous problems with performing arbovirus surveillance in remote locations. A sentinel pig programme detected JEV activity, although there were a number of financial, logistical, diagnostic and ethical limitations. A system was developed which detected viral RNA in mosquitoes collected by solar or propane powered CO2-baited traps. However, this method was hampered by trap-component malfunction, microbial contamination and large mosquito numbers which overwhelmed diagnostic capabilities. A novel approach involves allowing mosquitoes within a box trap to probe a sugar-baited nucleic-acid preservation card that is processed for expectorated arboviruses. In a longitudinal field trial, both Ross River and Barmah Forest viruses were detected numerous times from multiple traps over different weeks. Further refinements, including the development of unpowered traps and use of yeast-generated CO2, could enhance the applicability of this system to remote locations. New diagnostic technology, such as next generation sequencing and biosensors, will increase the capacity for recognizing emergent or exotic viruses, while cloud computing platforms will facilitate rapid dissemination of data.}, } @article {pmid22492314, year = {2012}, author = {Jourdren, L and Bernard, M and Dillies, MA and Le Crom, S}, title = {Eoulsan: a cloud computing-based framework facilitating high throughput sequencing analyses.}, journal = {Bioinformatics (Oxford, England)}, volume = {28}, number = {11}, pages = {1542-1543}, doi = {10.1093/bioinformatics/bts165}, pmid = {22492314}, issn = {1367-4811}, mesh = {*Algorithms ; Animals ; Computational Biology/*methods ; High-Throughput Nucleotide Sequencing/*methods ; Mice ; Sequence Analysis, RNA/*methods ; Software ; }, abstract = {UNLABELLED: We developed a modular and scalable framework called Eoulsan, based on the Hadoop implementation of the MapReduce algorithm dedicated to high-throughput sequencing data analysis. Eoulsan allows users to easily set up a cloud computing cluster and automate the analysis of several samples at once using various software solutions available. Our tests with Amazon Web Services demonstrated that the computation cost is linear with the number of instances booked as is the running time with the increasing amounts of data.

Eoulsan is implemented in Java, supported on Linux systems and distributed under the LGPL License at: http://transcriptome.ens.fr/eoulsan/}, } @article {pmid22492177, year = {2012}, author = {Fernández-Cardeñosa, G and de la Torre-Díez, I and López-Coronado, M and Rodrigues, JJ}, title = {Analysis of cloud-based solutions on EHRs systems in different scenarios.}, journal = {Journal of medical systems}, volume = {36}, number = {6}, pages = {3777-3782}, pmid = {22492177}, issn = {0148-5598}, mesh = {Community Health Centers ; Electronic Health Records/*organization & administration ; Hospitals ; Humans ; Information Storage and Retrieval/*methods ; *Internet ; Multi-Institutional Systems ; }, abstract = {Nowadays with the growing of the wireless connections people can access all the resources hosted in the Cloud almost everywhere. In this context, organisms can take advantage of this fact, in terms of e-Health, deploying Cloud-based solutions on e-Health services. In this paper two Cloud-based solutions for different scenarios of Electronic Health Records (EHRs) management system are proposed. We have researched articles published between the years 2005 and 2011 about the implementation of e-Health services based on the Cloud in Medline. In order to analyze the best scenario for the deployment of Cloud Computing two solutions for a large Hospital and a network of Primary Care Health centers have been studied. Economic estimation of the cost of the implementation for both scenarios has been done via the Amazon calculator tool. As a result of this analysis two solutions are suggested depending on the scenario: To deploy a Cloud solution for a large Hospital a typical Cloud solution in which are hired just the needed services has been assumed. On the other hand to work with several Primary Care Centers it's suggested the implementation of a network, which interconnects these centers with just one Cloud environment. Finally it's considered the fact of deploying a hybrid solution: in which EHRs with images will be hosted in the Hospital or Primary Care Centers and the rest of them will be migrated to the Cloud.}, } @article {pmid22491119, year = {2012}, author = {Lupşe, OS and Vida, M and Stoicu-Tivadar, L}, title = {Cloud computing technology applied in healthcare for developing large scale flexible solutions.}, journal = {Studies in health technology and informatics}, volume = {174}, number = {}, pages = {94-99}, pmid = {22491119}, issn = {0926-9630}, mesh = {Computer Communication Networks/*organization & administration ; Continuity of Patient Care/organization & administration ; Delivery of Health Care, Integrated/organization & administration ; *Health Services Administration ; Humans ; *Information Storage and Retrieval ; Information Systems/*organization & administration ; Internet ; }, abstract = {An extremely important area in which there is also vital information needed in different locations is the healthcare domain. In the areas of healthcare there is an important exchange of information since there are many departments where a patient can be sent for investigation. In this regard cloud computing is a technology that could really help supporting flexibility, seamless care and financial cuts.}, } @article {pmid22476397, year = {2012}, author = {Sofka, M and Ralovich, K and Zhang, J and Zhou, SK and Comaniciu, D}, title = {Progressive data transmission for anatomical landmark detection in a cloud.}, journal = {Methods of information in medicine}, volume = {51}, number = {3}, pages = {268-278}, doi = {10.3414/ME11-02-0017}, pmid = {22476397}, issn = {2511-705X}, mesh = {Access to Information ; Algorithms ; Anatomy/*instrumentation ; *Artificial Intelligence ; Humans ; Image Interpretation, Computer-Assisted/*instrumentation/methods ; Magnetic Resonance Imaging/instrumentation ; Medical Informatics/instrumentation ; Pathology/*instrumentation ; Tomography, X-Ray Computed/instrumentation/methods ; }, abstract = {BACKGROUND: In the concept of cloud-computing-based systems, various authorized users have secure access to patient records from a number of care delivery organizations from any location. This creates a growing need for remote visualization, advanced image processing, state-of-the-art image analysis, and computer aided diagnosis.

OBJECTIVES: This paper proposes a system of algorithms for automatic detection of anatomical landmarks in 3D volumes in the cloud computing environment. The system addresses the inherent problem of limited bandwidth between a (thin) client, data center, and data analysis server.

METHODS: The problem of limited bandwidth is solved by a hierarchical sequential detection algorithm that obtains data by progressively transmitting only image regions required for processing. The client sends a request to detect a set of landmarks for region visualization or further analysis. The algorithm running on the data analysis server obtains a coarse level image from the data center and generates landmark location candidates. The candidates are then used to obtain image neighborhood regions at a finer resolution level for further detection. This way, the landmark locations are hierarchically and sequentially detected and refined.

RESULTS: Only image regions surrounding landmark location candidates need to be trans- mitted during detection. Furthermore, the image regions are lossy compressed with JPEG 2000. Together, these properties amount to at least 30 times bandwidth reduction while achieving similar accuracy when compared to an algorithm using the original data.

CONCLUSIONS: The hierarchical sequential algorithm with progressive data transmission considerably reduces bandwidth requirements in cloud-based detection systems.}, } @article {pmid22450843, year = {2012}, author = {Johnson, PT and Zimmerman, SL and Heath, D and Eng, J and Horton, KM and Scott, WW and Fishman, EK}, title = {The iPad as a mobile device for CT display and interpretation: diagnostic accuracy for identification of pulmonary embolism.}, journal = {Emergency radiology}, volume = {19}, number = {4}, pages = {323-327}, pmid = {22450843}, issn = {1438-1435}, mesh = {Adult ; Aged ; Aged, 80 and over ; *Computers, Handheld ; Female ; Humans ; Logistic Models ; Male ; Middle Aged ; Pulmonary Embolism/*diagnostic imaging ; Radiographic Image Interpretation, Computer-Assisted/*instrumentation ; Radiology Information Systems ; Sensitivity and Specificity ; *Tomography, X-Ray Computed ; }, abstract = {Recent software developments enable interactive, real-time axial, 2D and 3D CT display on an iPad by cloud computing from a server for remote rendering. The purpose of this study was to compare radiologists' interpretative performance on the iPad to interpretation on the conventional picture archive and communication system (PACS). Fifty de-identified contrast-enhanced CT exams performed for suspected pulmonary embolism were compiled as an educational tool to prepare our residents for night call. Two junior radiology attendings blindly interpreted the cases twice, one reader used the PACS first, and the other interpreted on the iPad first. After an interval of at least 2 weeks, the cases were reinterpreted in different order using the other display technique. Sensitivity, specificity, and accuracy for identification of pulmonary embolism were compared for each interpretation method. Pulmonary embolism was present in 25 patients, ranging from main pulmonary artery to subsegmental thrombi. Both readers interpreted 98 % of cases correctly regardless of display platform. There was no significant difference in sensitivity (98 vs 100 %, p = 1.0), specificity (98 vs 96 %, p = 1.0), or accuracy (98 vs 98 %, p = 1.0) for interpretation with the iPad vs the PACS, respectively. CT interpretation on an iPad enabled accurate identification of pulmonary embolism, equivalent to display on the PACS. This mobile device has the potential to expand radiologists' availability for consultation and expedite emergency patient management.}, } @article {pmid22429538, year = {2012}, author = {Krampis, K and Booth, T and Chapman, B and Tiwari, B and Bicak, M and Field, D and Nelson, KE}, title = {Cloud BioLinux: pre-configured and on-demand bioinformatics computing for the genomics community.}, journal = {BMC bioinformatics}, volume = {13}, number = {}, pages = {42}, pmid = {22429538}, issn = {1471-2105}, mesh = {Animals ; Computers ; *Computing Methodologies ; Genomics/*methods ; Humans ; Sequence Alignment ; Software ; }, abstract = {BACKGROUND: A steep drop in the cost of next-generation sequencing during recent years has made the technology affordable to the majority of researchers, but downstream bioinformatic analysis still poses a resource bottleneck for smaller laboratories and institutes that do not have access to substantial computational resources. Sequencing instruments are typically bundled with only the minimal processing and storage capacity required for data capture during sequencing runs. Given the scale of sequence datasets, scientific value cannot be obtained from acquiring a sequencer unless it is accompanied by an equal investment in informatics infrastructure.

RESULTS: Cloud BioLinux is a publicly accessible Virtual Machine (VM) that enables scientists to quickly provision on-demand infrastructures for high-performance bioinformatics computing using cloud platforms. Users have instant access to a range of pre-configured command line and graphical software applications, including a full-featured desktop interface, documentation and over 135 bioinformatics packages for applications including sequence alignment, clustering, assembly, display, editing, and phylogeny. Each tool's functionality is fully described in the documentation directly accessible from the graphical interface of the VM. Besides the Amazon EC2 cloud, we have started instances of Cloud BioLinux on a private Eucalyptus cloud installed at the J. Craig Venter Institute, and demonstrated access to the bioinformatic tools interface through a remote connection to EC2 instances from a local desktop computer. Documentation for using Cloud BioLinux on EC2 is available from our project website, while a Eucalyptus cloud image and VirtualBox Appliance is also publicly available for download and use by researchers with access to private clouds.

CONCLUSIONS: Cloud BioLinux provides a platform for developing bioinformatics infrastructures on the cloud. An automated and configurable process builds Virtual Machines, allowing the development of highly customized versions from a shared code base. This shared community toolkit enables application specific analysis platforms on the cloud by minimizing the effort required to prepare and maintain them.}, } @article {pmid22428174, year = {2012}, author = {Goedert, J}, title = {Clearing the air around cloud computing.}, journal = {Health data management}, volume = {20}, number = {3}, pages = {44, 46, 48}, pmid = {22428174}, issn = {1079-9869}, mesh = {Information Services/*trends ; Information Storage and Retrieval/*trends ; *Internet ; United States ; }, } @article {pmid22426983, year = {2012}, author = {Zhao, G and Bu, D and Liu, C and Li, J and Yang, J and Liu, Z and Zhao, Y and Chen, R}, title = {CloudLCA: finding the lowest common ancestor in metagenome analysis using cloud computing.}, journal = {Protein & cell}, volume = {3}, number = {2}, pages = {148-152}, pmid = {22426983}, issn = {1674-8018}, mesh = {*Algorithms ; Databases, Genetic ; *Metagenomics ; Search Engine ; *User-Computer Interface ; }, abstract = {Estimating taxonomic content constitutes a key problem in metagenomic sequencing data analysis. However, extracting such content from high-throughput data of next-generation sequencing is very time-consuming with the currently available software. Here, we present CloudLCA, a parallel LCA algorithm that significantly improves the efficiency of determining taxonomic composition in metagenomic data analysis. Results show that CloudLCA (1) has a running time nearly linear with the increase of dataset magnitude, (2) displays linear speedup as the number of processors grows, especially for large datasets, and (3) reaches a speed of nearly 215 million reads each minute on a cluster with ten thin nodes. In comparison with MEGAN, a well-known metagenome analyzer, the speed of CloudLCA is up to 5 more times faster, and its peak memory usage is approximately 18.5% that of MEGAN, running on a fat node. CloudLCA can be run on one multiprocessor node or a cluster. It is expected to be part of MEGAN to accelerate analyzing reads, with the same output generated as MEGAN, which can be import into MEGAN in a direct way to finish the following analysis. Moreover, CloudLCA is a universal solution for finding the lowest common ancestor, and it can be applied in other fields requiring an LCA algorithm.}, } @article {pmid22399474, year = {2012}, author = {Prins, P and Belhachemi, D and Möller, S and Smant, G}, title = {Scalable computing for evolutionary genomics.}, journal = {Methods in molecular biology (Clifton, N.J.)}, volume = {856}, number = {}, pages = {529-545}, doi = {10.1007/978-1-61779-585-5_22}, pmid = {22399474}, issn = {1940-6029}, mesh = {Computer Communication Networks ; Computers ; *Evolution, Molecular ; Genomics/*methods ; User-Computer Interface ; }, abstract = {Genomic data analysis in evolutionary biology is becoming so computationally intensive that analysis of multiple hypotheses and scenarios takes too long on a single desktop computer. In this chapter, we discuss techniques for scaling computations through parallelization of calculations, after giving a quick overview of advanced programming techniques. Unfortunately, parallel programming is difficult and requires special software design. The alternative, especially attractive for legacy software, is to introduce poor man's parallelization by running whole programs in parallel as separate processes, using job schedulers. Such pipelines are often deployed on bioinformatics computer clusters. Recent advances in PC virtualization have made it possible to run a full computer operating system, with all of its installed software, on top of another operating system, inside a "box," or virtual machine (VM). Such a VM can flexibly be deployed on multiple computers, in a local network, e.g., on existing desktop PCs, and even in the Cloud, to create a "virtual" computer cluster. Many bioinformatics applications in evolutionary biology can be run in parallel, running processes in one or more VMs. Here, we show how a ready-made bioinformatics VM image, named BioNode, effectively creates a computing cluster, and pipeline, in a few steps. This allows researchers to scale-up computations from their desktop, using available hardware, anytime it is required. BioNode is based on Debian Linux and can run on networked PCs and in the Cloud. Over 200 bioinformatics and statistical software packages, of interest to evolutionary biology, are included, such as PAML, Muscle, MAFFT, MrBayes, and BLAST. Most of these software packages are maintained through the Debian Med project. In addition, BioNode contains convenient configuration scripts for parallelizing bioinformatics software. Where Debian Med encourages packaging free and open source bioinformatics software through one central project, BioNode encourages creating free and open source VM images, for multiple targets, through one central project. BioNode can be deployed on Windows, OSX, Linux, and in the Cloud. Next to the downloadable BioNode images, we provide tutorials online, which empower bioinformaticians to install and run BioNode in different environments, as well as information for future initiatives, on creating and building such images.}, } @article {pmid22397113, year = {2012}, author = {Cantrell, D and Maluf, M}, title = {Getting started with cloud computing: offloading ancillary applications helps data center expand without adding cost or staff.}, journal = {Health management technology}, volume = {33}, number = {2}, pages = {10-11}, pmid = {22397113}, issn = {1074-4770}, mesh = {Computer Communication Networks ; Efficiency, Organizational ; Information Centers/economics/*organization & administration ; *Information Services ; *Information Storage and Retrieval ; Internet/*organization & administration ; }, } @article {pmid22397112, year = {2012}, author = {Webb, G}, title = {Making the cloud work for healthcare: Cloud computing offers incredible opportunities to improve healthcare, reduce costs and accelerate ability to adopt new IT services.}, journal = {Health management technology}, volume = {33}, number = {2}, pages = {8-9}, pmid = {22397112}, issn = {1074-4770}, mesh = {Computer Communication Networks ; Cost Control ; Humans ; *Information Services ; Information Storage and Retrieval/*methods ; Internet/*organization & administration ; *Quality Assurance, Health Care ; }, } @article {pmid22390523, year = {2012}, author = {Lenert, L and Sundwall, DN}, title = {Public health surveillance and meaningful use regulations: a crisis of opportunity.}, journal = {American journal of public health}, volume = {102}, number = {3}, pages = {e1-7}, pmid = {22390523}, issn = {1541-0048}, mesh = {*American Recovery and Reinvestment Act ; Diffusion of Innovation ; Electronic Health Records ; Information Dissemination/*legislation & jurisprudence ; Information Management/methods ; Information Storage and Retrieval/*methods ; Information Systems/economics ; Medical Record Linkage ; *Population Surveillance ; Reimbursement, Incentive ; Systems Integration ; United States ; }, abstract = {The Health Information Technology for Economic and Clinical Health Act is intended to enhance reimbursement of health care providers for meaningful use of electronic health records systems. This presents both opportunities and challenges for public health departments. To earn incentive payments, clinical providers must exchange specified types of data with the public health system, such as immunization and syndromic surveillance data and notifiable disease reporting. However, a crisis looms because public health's information technology systems largely lack the capabilities to accept the types of data proposed for exchange. Cloud computing may be a solution for public health information systems. Through shared computing resources, public health departments could reap the benefits of electronic reporting within federal funding constraints.}, } @article {pmid22366976, year = {2012}, author = {Low, C and Hsueh Chen, Y}, title = {Criteria for the evaluation of a cloud-based hospital information system outsourcing provider.}, journal = {Journal of medical systems}, volume = {36}, number = {6}, pages = {3543-3553}, pmid = {22366976}, issn = {0148-5598}, mesh = {*Choice Behavior ; *Decision Making, Organizational ; Female ; Fuzzy Logic ; *Hospital Information Systems ; Humans ; *Information Storage and Retrieval ; Male ; *Outsourced Services ; }, abstract = {As cloud computing technology has proliferated rapidly worldwide, there has been a trend toward adopting cloud-based hospital information systems (CHISs). This study examines the critical criteria for selecting the CHISs outsourcing provider. The fuzzy Delphi method (FDM) is used to evaluate the primary indicator collected from 188 useable responses at a working hospital in Taiwan. Moreover, the fuzzy analytic hierarchy process (FAHP) is employed to calculate the weights of these criteria and establish a fuzzy multi-criteria model of CHISs outsourcing provider selection from 42 experts. The results indicate that the five most critical criteria related to CHISs outsourcing provider selection are (1) system function, (2) service quality, (3) integration, (4) professionalism, and (5) economics. This study may contribute to understanding how cloud-based hospital systems can reinforce content design and offer a way to compete in the field by developing more appropriate systems.}, } @article {pmid22352137, year = {2012}, author = {Perna, G}, title = {A hazy outlook for cloud computing.}, journal = {Healthcare informatics : the business magazine for information and communication systems}, volume = {29}, number = {1}, pages = {14, 16, 18 passim}, pmid = {22352137}, issn = {1050-9135}, mesh = {*Conflict, Psychological ; *Diffusion of Innovation ; Hospital Administrators ; Hospital Information Systems ; Humans ; Information Storage and Retrieval/*trends ; Medical Records Systems, Computerized ; United States ; }, abstract = {Because of competing priorities as well as cost, security, and implementation concerns, cloud-based storage development has gotten off to a slow start in healthcare. CIOs, CTOs, and other healthcare IT leaders are adopting a variety of strategies in this area, based on their organizations' needs, resources, and priorities.}, } @article {pmid22351166, year = {2012}, author = {Chen, YY and Lu, JC and Jan, JK}, title = {A secure EHR system based on hybrid clouds.}, journal = {Journal of medical systems}, volume = {36}, number = {5}, pages = {3375-3384}, pmid = {22351166}, issn = {0148-5598}, mesh = {Algorithms ; *Computer Security ; Confidentiality ; Electronic Health Records/*organization & administration ; Humans ; Information Storage and Retrieval ; *Internet ; Patient Access to Records ; }, abstract = {Consequently, application services rendering remote medical services and electronic health record (EHR) have become a hot topic and stimulating increased interest in studying this subject in recent years. Information and communication technologies have been applied to the medical services and healthcare area for a number of years to resolve problems in medical management. Sharing EHR information can provide professional medical programs with consultancy, evaluation, and tracing services can certainly improve accessibility to the public receiving medical services or medical information at remote sites. With the widespread use of EHR, building a secure EHR sharing environment has attracted a lot of attention in both healthcare industry and academic community. Cloud computing paradigm is one of the popular healthIT infrastructures for facilitating EHR sharing and EHR integration. In this paper, we propose an EHR sharing and integration system in healthcare clouds and analyze the arising security and privacy issues in access and management of EHRs.}, } @article {pmid22333270, year = {2012}, author = {Hunter, AA and Macgregor, AB and Szabo, TO and Wellington, CA and Bellgard, MI}, title = {Yabi: An online research environment for grid, high performance and cloud computing.}, journal = {Source code for biology and medicine}, volume = {7}, number = {1}, pages = {1}, pmid = {22333270}, issn = {1751-0473}, abstract = {BACKGROUND: There is a significant demand for creating pipelines or workflows in the life science discipline that chain a number of discrete compute and data intensive analysis tasks into sophisticated analysis procedures. This need has led to the development of general as well as domain-specific workflow environments that are either complex desktop applications or Internet-based applications. Complexities can arise when configuring these applications in heterogeneous compute and storage environments if the execution and data access models are not designed appropriately. These complexities manifest themselves through limited access to available HPC resources, significant overhead required to configure tools and inability for users to simply manage files across heterogenous HPC storage infrastructure.

RESULTS: In this paper, we describe the architecture of a software system that is adaptable to a range of both pluggable execution and data backends in an open source implementation called Yabi. Enabling seamless and transparent access to heterogenous HPC environments at its core, Yabi then provides an analysis workflow environment that can create and reuse workflows as well as manage large amounts of both raw and processed data in a secure and flexible way across geographically distributed compute resources. Yabi can be used via a web-based environment to drag-and-drop tools to create sophisticated workflows. Yabi can also be accessed through the Yabi command line which is designed for users that are more comfortable with writing scripts or for enabling external workflow environments to leverage the features in Yabi. Configuring tools can be a significant overhead in workflow environments. Yabi greatly simplifies this task by enabling system administrators to configure as well as manage running tools via a web-based environment and without the need to write or edit software programs or scripts. In this paper, we highlight Yabi's capabilities through a range of bioinformatics use cases that arise from large-scale biomedical data analysis.

CONCLUSION: The Yabi system encapsulates considered design of both execution and data models, while abstracting technical details away from users who are not skilled in HPC and providing an intuitive drag-and-drop scalable web-based workflow environment where the same tools can also be accessed via a command line. Yabi is currently in use and deployed at multiple institutions and is available at http://ccg.murdoch.edu.au/yabi.}, } @article {pmid22303597, year = {2012}, author = {Kupfer, DM}, title = {Cloud computing in biomedical research.}, journal = {Aviation, space, and environmental medicine}, volume = {83}, number = {2}, pages = {152-153}, doi = {10.3357/asem.3242.2012}, pmid = {22303597}, issn = {0095-6562}, mesh = {*Biomedical Research ; *Computer Communication Networks ; Computer Security ; Human Genome Project ; Humans ; Information Storage and Retrieval ; *Internet ; }, } @article {pmid22302568, year = {2012}, author = {Niemenmaa, M and Kallio, A and Schumacher, A and Klemelä, P and Korpelainen, E and Heljanko, K}, title = {Hadoop-BAM: directly manipulating next generation sequencing data in the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {28}, number = {6}, pages = {876-877}, pmid = {22302568}, issn = {1367-4811}, mesh = {Genome ; High-Throughput Nucleotide Sequencing/*methods ; Sequence Analysis, DNA/*methods ; *Software ; User-Computer Interface ; }, abstract = {Hadoop-BAM is a novel library for the scalable manipulation of aligned next-generation sequencing data in the Hadoop distributed computing framework. It acts as an integration layer between analysis applications and BAM files that are processed using Hadoop. Hadoop-BAM solves the issues related to BAM data access by presenting a convenient API for implementing map and reduce functions that can directly operate on BAM records. It builds on top of the Picard SAM JDK, so tools that rely on the Picard API are expected to be easily convertible to support large-scale distributed processing. In this article we demonstrate the use of Hadoop-BAM by building a coverage summarizing tool for the Chipster genome browser. Our results show that Hadoop offers good scalability, and one should avoid moving data in and out of Hadoop between analysis steps.}, } @article {pmid22299957, year = {2012}, author = {Kiracofe, D and Melcher, J and Raman, A}, title = {Gaining insight into the physics of dynamic atomic force microscopy in complex environments using the VEDA simulator.}, journal = {The Review of scientific instruments}, volume = {83}, number = {1}, pages = {013702}, doi = {10.1063/1.3669638}, pmid = {22299957}, issn = {1089-7623}, abstract = {Dynamic atomic force microscopy (dAFM) continues to grow in popularity among scientists in many different fields, and research on new methods and operating modes continues to expand the resolution, capabilities, and types of samples that can be studied. But many promising increases in capability are accompanied by increases in complexity. Indeed, interpreting modern dAFM data can be challenging, especially on complicated material systems, or in liquid environments where the behavior is often contrary to what is known in air or vacuum environments. Mathematical simulations have proven to be an effective tool in providing physical insight into these non-intuitive systems. In this article we describe recent developments in the VEDA (virtual environment for dynamic AFM) simulator, which is a suite of freely available, open-source simulation tools that are delivered through the cloud computing cyber-infrastructure of nanoHUB (www.nanohub.org). Here we describe three major developments. First, simulations in liquid environments are improved by enhancements in the modeling of cantilever dynamics, excitation methods, and solvation shell forces. Second, VEDA is now able to simulate many new advanced modes of operation (bimodal, phase-modulation, frequency-modulation, etc.). Finally, nineteen different tip-sample models are available to simulate the surface physics of a wide variety different material systems including capillary, specific adhesion, van der Waals, electrostatic, viscoelasticity, and hydration forces. These features are demonstrated through example simulations and validated against experimental data, in order to provide insight into practical problems in dynamic AFM.}, } @article {pmid22289098, year = {2012}, author = {Hoy, MB}, title = {Cloud computing basics for librarians.}, journal = {Medical reference services quarterly}, volume = {31}, number = {1}, pages = {84-91}, doi = {10.1080/02763869.2012.641853}, pmid = {22289098}, issn = {1540-9597}, mesh = {Information Storage and Retrieval/*methods ; *Internet ; Libraries, Medical ; Library Science/*education ; *Software ; }, abstract = {"Cloud computing" is the name for the recent trend of moving software and computing resources to an online, shared-service model. This article briefly defines cloud computing, discusses different models, explores the advantages and disadvantages, and describes some of the ways cloud computing can be used in libraries. Examples of cloud services are included at the end of the article.}, } @article {pmid22267806, year = {2012}, author = {Barz, S and Kashefi, E and Broadbent, A and Fitzsimons, JF and Zeilinger, A and Walther, P}, title = {Demonstration of blind quantum computing.}, journal = {Science (New York, N.Y.)}, volume = {335}, number = {6066}, pages = {303-308}, doi = {10.1126/science.1214707}, pmid = {22267806}, issn = {1095-9203}, abstract = {Quantum computers, besides offering substantial computational speedups, are also expected to preserve the privacy of a computation. We present an experimental demonstration of blind quantum computing in which the input, computation, and output all remain unknown to the computer. We exploit the conceptual framework of measurement-based quantum computation that enables a client to delegate a computation to a quantum server. Various blind delegated computations, including one- and two-qubit gates and the Deutsch and Grover quantum algorithms, are demonstrated. The client only needs to be able to prepare and transmit individual photonic qubits. Our demonstration is crucial for unconditionally secure quantum cloud computing and might become a key ingredient for real-life applications, especially when considering the challenges of making powerful quantum computers widely available.}, } @article {pmid22257667, year = {2012}, author = {Hong, D and Rhie, A and Park, SS and Lee, J and Ju, YS and Kim, S and Yu, SB and Bleazard, T and Park, HS and Rhee, H and Chong, H and Yang, KS and Lee, YS and Kim, IH and Lee, JS and Kim, JI and Seo, JS}, title = {FX: an RNA-Seq analysis tool on the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {28}, number = {5}, pages = {721-723}, doi = {10.1093/bioinformatics/bts023}, pmid = {22257667}, issn = {1367-4811}, mesh = {*Gene Expression Profiling ; Genome ; Genome, Human ; Humans ; RNA Splice Sites ; RNA Splicing ; RNA, Messenger/genetics ; *Sequence Analysis, RNA ; *User-Computer Interface ; }, abstract = {UNLABELLED: FX is an RNA-Seq analysis tool, which runs in parallel on cloud computing infrastructure, for the estimation of gene expression levels and genomic variant calling. In the mapping of short RNA-Seq reads, FX uses a transcriptome-based reference primarily, generated from ~160 000 mRNA sequences from RefSeq, UCSC and Ensembl databases. This approach reduces the misalignment of reads originating from splicing junctions. Unmapped reads not aligned on known transcripts are then mapped on the human genome reference. FX allows analysis of RNA-Seq data on cloud computing infrastructures, supporting access through a user-friendly web interface.

AVAILABILITY: FX is freely available on the web at (http://fx.gmi.ac.kr), and can be installed on local Hadoop clusters. Guidance for the installation and operation of FX can be found under the 'Documentation' menu on the website.

SUPPLEMENTARY INFORMATION: Supplementary data are available at Bioinformatics online.}, } @article {pmid22255012, year = {2011}, author = {Cheng, C and Stokes, TH and Wang, MD}, title = {caREMOTE: the design of a cancer reporting and monitoring telemedicine system for domestic care.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2011}, number = {}, pages = {3168-3171}, pmid = {22255012}, issn = {2694-0604}, support = {R01 CA108468/CA/NCI NIH HHS/United States ; U54 CA119338/CA/NCI NIH HHS/United States ; R01CA108468/CA/NCI NIH HHS/United States ; U54CA119338/CA/NCI NIH HHS/United States ; }, mesh = {*Home Care Services ; Humans ; *Monitoring, Physiologic ; Neoplasms/*physiopathology ; Quality of Life ; Surveys and Questionnaires ; *Telemedicine ; }, abstract = {After receiving cancer treatment, patients often experience a decline of HRQoL (health-related quality of life). Physicians typically evaluate HRQoL during periodic clinical visits. However, out-patient reporting of vital signals between two visits could be used to interpret the decline of HRQoL. Considering that the vast majority of patients recovering from cancer are not in hospitals, it is often impractical for the care providers to collect these data. In this paper, we design and prototype caREMOTE, a cancer reporting and monitoring telemedicine system, which can be used in domestic cancer care. By extending a standard clinical trial informatics model, we build a prototype on cloud computing services that can be accessed by a mobile application. We aim to maximize the potential of caREMOTE to help medical practitioners efficiently monitor discharged patients' HRQoL and vital signals, and facilitate data reusability and system interoperability in future collaborative cancer research.}, } @article {pmid22253803, year = {2012}, author = {Chouvarine, P and Cooksey, AM and McCarthy, FM and Ray, DA and Baldwin, BS and Burgess, SC and Peterson, DG}, title = {Transcriptome-based differentiation of closely-related Miscanthus lines.}, journal = {PloS one}, volume = {7}, number = {1}, pages = {e29850}, pmid = {22253803}, issn = {1932-6203}, mesh = {Arabidopsis/genetics ; DNA, Complementary/genetics ; Exome/genetics ; Gene Expression Regulation, Plant ; Molecular Sequence Annotation ; Phylogeny ; Poaceae/*classification/*genetics ; Polymorphism, Single Nucleotide/genetics ; RNA, Messenger/genetics/metabolism ; Sequence Analysis, DNA ; Sorghum/genetics ; Statistics as Topic ; Transcriptome/*genetics ; }, abstract = {BACKGROUND: Distinguishing between individuals is critical to those conducting animal/plant breeding, food safety/quality research, diagnostic and clinical testing, and evolutionary biology studies. Classical genetic identification studies are based on marker polymorphisms, but polymorphism-based techniques are time and labor intensive and often cannot distinguish between closely related individuals. Illumina sequencing technologies provide the detailed sequence data required for rapid and efficient differentiation of related species, lines/cultivars, and individuals in a cost-effective manner. Here we describe the use of Illumina high-throughput exome sequencing, coupled with SNP mapping, as a rapid means of distinguishing between related cultivars of the lignocellulosic bioenergy crop giant miscanthus (Miscanthus × giganteus). We provide the first exome sequence database for Miscanthus species complete with Gene Ontology (GO) functional annotations.

RESULTS: A SNP comparative analysis of rhizome-derived cDNA sequences was successfully utilized to distinguish three Miscanthus × giganteus cultivars from each other and from other Miscanthus species. Moreover, the resulting phylogenetic tree generated from SNP frequency data parallels the known breeding history of the plants examined. Some of the giant miscanthus plants exhibit considerable sequence divergence.

CONCLUSIONS: Here we describe an analysis of Miscanthus in which high-throughput exome sequencing was utilized to differentiate between closely related genotypes despite the current lack of a reference genome sequence. We functionally annotated the exome sequences and provide resources to support Miscanthus systems biology. In addition, we demonstrate the use of the commercial high-performance cloud computing to do computational GO annotation.}, } @article {pmid23569644, year = {2012}, author = {Jalali, A and Olabode, OA and Bell, CM}, title = {Leveraging Cloud Computing to Address Public Health Disparities: An Analysis of the SPHPS.}, journal = {Online journal of public health informatics}, volume = {4}, number = {3}, pages = {}, pmid = {23569644}, issn = {1947-2579}, abstract = {As the use of certified electronic health record technology (CEHRT) has continued to gain prominence in hospitals and physician practices, public health agencies and health professionals have the ability to access health data through health information exchanges (HIE). With such knowledge health providers are well positioned to positively affect population health, and enhance health status or quality-of-life outcomes in at-risk populations. Through big data analytics, predictive analytics and cloud computing, public health agencies have the opportunity to observe emerging public health threats in real-time and provide more effective interventions addressing health disparities in our communities. The Smarter Public Health Prevention System (SPHPS) provides real-time reporting of potential public health threats to public health leaders through the use of a simple and efficient dashboard and links people with needed personal health services through mobile platforms for smartphones and tablets to promote and encourage healthy behaviors in our communities. The purpose of this working paper is to evaluate how a secure virtual private cloud (VPC) solution could facilitate the implementation of the SPHPS in order to address public health disparities.}, } @article {pmid22205383, year = {2012}, author = {Poulymenopoulou, M and Malamateniou, F and Vassilacopoulos, G}, title = {Emergency healthcare process automation using mobile computing and cloud services.}, journal = {Journal of medical systems}, volume = {36}, number = {5}, pages = {3233-3241}, pmid = {22205383}, issn = {0148-5598}, mesh = {Algorithms ; Computer Security ; *Computers, Handheld ; Confidentiality ; Electronic Health Records/organization & administration ; Emergency Medical Service Communication Systems/organization & administration ; Emergency Medical Services/*organization & administration ; Humans ; *Wireless Technology ; }, abstract = {Emergency care is basically concerned with the provision of pre-hospital and in-hospital medical and/or paramedical services and it typically involves a wide variety of interdependent and distributed activities that can be interconnected to form emergency care processes within and between Emergency Medical Service (EMS) agencies and hospitals. Hence, in developing an information system for emergency care processes, it is essential to support individual process activities and to satisfy collaboration and coordination needs by providing readily access to patient and operational information regardless of location and time. Filling this information gap by enabling the provision of the right information, to the right people, at the right time fosters new challenges, including the specification of a common information format, the interoperability among heterogeneous institutional information systems or the development of new, ubiquitous trans-institutional systems. This paper is concerned with the development of an integrated computer support to emergency care processes by evolving and cross-linking institutional healthcare systems. To this end, an integrated EMS cloud-based architecture has been developed that allows authorized users to access emergency case information in standardized document form, as proposed by the Integrating the Healthcare Enterprise (IHE) profile, uses the Organization for the Advancement of Structured Information Standards (OASIS) standard Emergency Data Exchange Language (EDXL) Hospital Availability Exchange (HAVE) for exchanging operational data with hospitals and incorporates an intelligent module that supports triaging and selecting the most appropriate ambulances and hospitals for each case.}, } @article {pmid22195089, year = {2011}, author = {Eriksson, H and Raciti, M and Basile, M and Cunsolo, A and Fröberg, A and Leifler, O and Ekberg, J and Timpka, T}, title = {A cloud-based simulation architecture for pandemic influenza simulation.}, journal = {AMIA ... Annual Symposium proceedings. AMIA Symposium}, volume = {2011}, number = {}, pages = {364-373}, pmid = {22195089}, issn = {1942-597X}, mesh = {*Computer Simulation ; Humans ; Influenza, Human/*epidemiology ; *Internet ; *Pandemics ; }, abstract = {High-fidelity simulations of pandemic outbreaks are resource consuming. Cluster-based solutions have been suggested for executing such complex computations. We present a cloud-based simulation architecture that utilizes computing resources both locally available and dynamically rented online. The approach uses the Condor framework for job distribution and management of the Amazon Elastic Computing Cloud (EC2) as well as local resources. The architecture has a web-based user interface that allows users to monitor and control simulation execution. In a benchmark test, the best cost-adjusted performance was recorded for the EC2 H-CPU Medium instance, while a field trial showed that the job configuration had significant influence on the execution time and that the network capacity of the master node could become a bottleneck. We conclude that it is possible to develop a scalable simulation environment that uses cloud-based solutions, while providing an easy-to-use graphical user interface.}, } @article {pmid22191916, year = {2011}, author = {Pratx, G and Xing, L}, title = {Monte Carlo simulation of photon migration in a cloud computing environment with MapReduce.}, journal = {Journal of biomedical optics}, volume = {16}, number = {12}, pages = {125003}, pmid = {22191916}, issn = {1560-2281}, support = {R01 CA133474/CA/NCI NIH HHS/United States ; 1R01 CA 133474/CA/NCI NIH HHS/United States ; }, mesh = {Absorption ; *Algorithms ; Computer Simulation ; Diagnostic Imaging ; Diffusion ; *Monte Carlo Method ; *Photons ; Signal Processing, Computer-Assisted ; *Software ; }, abstract = {Monte Carlo simulation is considered the most reliable method for modeling photon migration in heterogeneous media. However, its widespread use is hindered by the high computational cost. The purpose of this work is to report on our implementation of a simple MapReduce method for performing fault-tolerant Monte Carlo computations in a massively-parallel cloud computing environment. We ported the MC321 Monte Carlo package to Hadoop, an open-source MapReduce framework. In this implementation, Map tasks compute photon histories in parallel while a Reduce task scores photon absorption. The distributed implementation was evaluated on a commercial compute cloud. The simulation time was found to be linearly dependent on the number of photons and inversely proportional to the number of nodes. For a cluster size of 240 nodes, the simulation of 100 billion photon histories took 22 min, a 1258 × speed-up compared to the single-threaded Monte Carlo program. The overall computational throughput was 85,178 photon histories per node per second, with a latency of 100 s. The distributed simulation produced the same output as the original implementation and was resilient to hardware failure: the correctness of the simulation was unaffected by the shutdown of 50% of the nodes.}, } @article {pmid22164048, year = {2011}, author = {Kim, S and Song, SM and Yoon, YI}, title = {Smart learning services based on smart cloud computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {11}, number = {8}, pages = {7835-7850}, doi = {10.3390/s110807835}, pmid = {22164048}, issn = {1424-8220}, mesh = {*Computer Systems ; Education, Distance/*methods ; Equipment Design ; Humans ; *Learning ; Signal Processing, Computer-Assisted ; Software ; Systems Integration ; Telecommunications ; User-Computer Interface ; }, abstract = {Context-aware technologies can make e-learning services smarter and more efficient since context-aware services are based on the user's behavior. To add those technologies into existing e-learning services, a service architecture model is needed to transform the existing e-learning environment, which is situation-aware, into the environment that understands context as well. The context-awareness in e-learning may include the awareness of user profile and terminal context. In this paper, we propose a new notion of service that provides context-awareness to smart learning content in a cloud computing environment. We suggest the elastic four smarts (E4S)--smart pull, smart prospect, smart content, and smart push--concept to the cloud services so smart learning services are possible. The E4S focuses on meeting the users' needs by collecting and analyzing users' behavior, prospecting future services, building corresponding contents, and delivering the contents through cloud computing environment. Users' behavior can be collected through mobile devices such as smart phones that have built-in sensors. As results, the proposed smart e-learning model in cloud computing environment provides personalized and customized learning services to its users.}, } @article {pmid22163811, year = {2011}, author = {Kang, M and Kang, DI and Crago, SP and Park, GL and Lee, J}, title = {Design and development of a run-time monitor for multi-core architectures in cloud computing.}, journal = {Sensors (Basel, Switzerland)}, volume = {11}, number = {4}, pages = {3595-3610}, doi = {10.3390/s110403595}, pmid = {22163811}, issn = {1424-8220}, mesh = {*Computers ; Humans ; Information Storage and Retrieval/*methods ; Internet ; *Software ; }, abstract = {Cloud computing is a new information technology trend that moves computing and data away from desktops and portable PCs into large data centers. The basic principle of cloud computing is to deliver applications as services over the Internet as well as infrastructure. A cloud is a type of parallel and distributed system consisting of a collection of inter-connected and virtualized computers that are dynamically provisioned and presented as one or more unified computing resources. The large-scale distributed applications on a cloud require adaptive service-based software, which has the capability of monitoring system status changes, analyzing the monitored information, and adapting its service configuration while considering tradeoffs among multiple QoS features simultaneously. In this paper, we design and develop a Run-Time Monitor (RTM) which is a system software to monitor the application behavior at run-time, analyze the collected information, and optimize cloud computing resources for multi-core architectures. RTM monitors application software through library instrumentation as well as underlying hardware through a performance counter optimizing its computing configuration based on the analyzed data.}, } @article {pmid22159987, year = {2011}, author = {Klein, CA}, title = {Cloudy confidentiality: clinical and legal implications of cloud computing in health care.}, journal = {The journal of the American Academy of Psychiatry and the Law}, volume = {39}, number = {4}, pages = {571-578}, pmid = {22159987}, issn = {1943-3662}, mesh = {Confidentiality/*legislation & jurisprudence ; Electronic Health Records/legislation & jurisprudence ; Government Regulation ; Health Insurance Portability and Accountability Act ; Information Storage and Retrieval/*legislation & jurisprudence ; *Internet ; United States ; }, abstract = {The Internet has grown into a world of its own, and its ethereal space now offers capabilities that could aid physicians in their duties in numerous ways. In recent years software functions have moved from the individual's local hardware to a central server that operates from a remote location. This centralization is called cloud computing. Privacy laws that speak to the protection of patient confidentiality are complex and often difficult to understand in the context of an ever-growing cloud-based technology. This article is a review of the legal background of protected health records, as well as cloud technology and physician applications. An attempt is made to integrate both concepts and examine Health Insurance Portability and Accountability Act (HIPAA) compliance for each of the examples discussed. The legal regulations that may inform care and standards of practice are reviewed, and the difficulties that arise in assessment and monitoring of the current situation are analyzed. For forensic psychiatrists who may be asked to provide expert opinions regarding malpractice situations pertaining to confidentiality standards, it is important to become acquainted with the new digital language from which these questions may arise.}, } @article {pmid22157990, year = {2012}, author = {Wink, DM}, title = {Cloud computing.}, journal = {Nurse educator}, volume = {37}, number = {1}, pages = {3-5}, doi = {10.1097/NNE.0b013e3182383371}, pmid = {22157990}, issn = {1538-9855}, mesh = {Education, Nursing/*organization & administration ; Educational Technology/trends ; *Faculty, Nursing ; Humans ; Internet/*organization & administration ; Medical Informatics/*organization & administration ; United States ; }, abstract = {In this bimonthly series, the author examines how nurse educators can use Internet and Web-based technologies such as search, communication, and collaborative writing tools; social networking and social bookmarking sites; virtual worlds; and Web-based teaching and learning programs. This article describes how cloud computing can be used in nursing education.}, } @article {pmid22149842, year = {2011}, author = {Meng, B and Pratx, G and Xing, L}, title = {Ultrafast and scalable cone-beam CT reconstruction using MapReduce in a cloud computing environment.}, journal = {Medical physics}, volume = {38}, number = {12}, pages = {6603-6609}, pmid = {22149842}, issn = {0094-2405}, support = {R01 CA133474/CA/NCI NIH HHS/United States ; 1R01 CA133474/CA/NCI NIH HHS/United States ; }, mesh = {*Algorithms ; *Computer Communication Networks ; Cone-Beam Computed Tomography/*methods ; Imaging, Three-Dimensional/*methods ; Radiographic Image Enhancement/*methods ; Radiographic Image Interpretation, Computer-Assisted/*methods ; Reproducibility of Results ; Sensitivity and Specificity ; *Software ; }, abstract = {PURPOSE: Four-dimensional CT (4DCT) and cone beam CT (CBCT) are widely used in radiation therapy for accurate tumor target definition and localization. However, high-resolution and dynamic image reconstruction is computationally demanding because of the large amount of data processed. Efficient use of these imaging techniques in the clinic requires high-performance computing. The purpose of this work is to develop a novel ultrafast, scalable and reliable image reconstruction technique for 4D CBCT∕CT using a parallel computing framework called MapReduce. We show the utility of MapReduce for solving large-scale medical physics problems in a cloud computing environment.

METHODS: In this work, we accelerated the Feldcamp-Davis-Kress (FDK) algorithm by porting it to Hadoop, an open-source MapReduce implementation. Gated phases from a 4DCT scans were reconstructed independently. Following the MapReduce formalism, Map functions were used to filter and backproject subsets of projections, and Reduce function to aggregate those partial backprojection into the whole volume. MapReduce automatically parallelized the reconstruction process on a large cluster of computer nodes. As a validation, reconstruction of a digital phantom and an acquired CatPhan 600 phantom was performed on a commercial cloud computing environment using the proposed 4D CBCT∕CT reconstruction algorithm.

RESULTS: Speedup of reconstruction time is found to be roughly linear with the number of nodes employed. For instance, greater than 10 times speedup was achieved using 200 nodes for all cases, compared to the same code executed on a single machine. Without modifying the code, faster reconstruction is readily achievable by allocating more nodes in the cloud computing environment. Root mean square error between the images obtained using MapReduce and a single-threaded reference implementation was on the order of 10(-7). Our study also proved that cloud computing with MapReduce is fault tolerant: the reconstruction completed successfully with identical results even when half of the nodes were manually terminated in the middle of the process.

CONCLUSIONS: An ultrafast, reliable and scalable 4D CBCT∕CT reconstruction method was developed using the MapReduce framework. Unlike other parallel computing approaches, the parallelization and speedup required little modification of the original reconstruction code. MapReduce provides an efficient and fault tolerant means of solving large-scale computing problems in a cloud computing environment.}, } @article {pmid22144881, year = {2011}, author = {Greenbaum, D and Sboner, A and Mu, XJ and Gerstein, M}, title = {Genomics and privacy: implications of the new reality of closed data for the field.}, journal = {PLoS computational biology}, volume = {7}, number = {12}, pages = {e1002278}, pmid = {22144881}, issn = {1553-7358}, mesh = {*Computer Security ; *Genetic Privacy ; *Genomics ; Humans ; }, abstract = {Open source and open data have been driving forces in bioinformatics in the past. However, privacy concerns may soon change the landscape, limiting future access to important data sets, including personal genomics data. Here we survey this situation in some detail, describing, in particular, how the large scale of the data from personal genomic sequencing makes it especially hard to share data, exacerbating the privacy problem. We also go over various aspects of genomic privacy: first, there is basic identifiability of subjects having their genome sequenced. However, even for individuals who have consented to be identified, there is the prospect of very detailed future characterization of their genotype, which, unanticipated at the time of their consent, may be more personal and invasive than the release of their medical records. We go over various computational strategies for dealing with the issue of genomic privacy. One can "slice" and reformat datasets to allow them to be partially shared while securing the most private variants. This is particularly applicable to functional genomics information, which can be largely processed without variant information. For handling the most private data there are a number of legal and technological approaches-for example, modifying the informed consent procedure to acknowledge that privacy cannot be guaranteed, and/or employing a secure cloud computing environment. Cloud computing in particular may allow access to the data in a more controlled fashion than the current practice of downloading and computing on large datasets. Furthermore, it may be particularly advantageous for small labs, given that the burden of many privacy issues falls disproportionately on them in comparison to large corporations and genome centers. Finally, we discuss how education of future genetics researchers will be important, with curriculums emphasizing privacy and data security. However, teaching personal genomics with identifiable subjects in the university setting will, in turn, create additional privacy issues and social conundrums.}, } @article {pmid22142244, year = {2012}, author = {Grossman, RL and White, KP}, title = {A vision for a biomedical cloud.}, journal = {Journal of internal medicine}, volume = {271}, number = {2}, pages = {122-130}, pmid = {22142244}, issn = {1365-2796}, support = {P50 GM081892/GM/NIGMS NIH HHS/United States ; }, mesh = {Biomedical Research/organization & administration/*trends ; Computational Biology/organization & administration/*trends ; Computer Communication Networks/organization & administration ; Computer Security ; Data Mining/*trends ; Electronic Health Records/organization & administration/*trends ; Genome, Human/genetics ; Humans ; Systems Biology/*trends ; }, abstract = {We present a vision for a Biomedical Cloud that draws on progress in the fields of Genomics, Systems Biology and biomedical data mining. The successful fusion of these areas will combine the use of biomarkers, genetic variants, and environmental variables to build predictive models that will drastically increase the specificity and timeliness of diagnosis for a wide range of common diseases, whilst delivering accurate predictions about the efficacy of treatment options. However, the amount of data being generated by each of these fields is staggering, as is the task of managing and analysing it. Adequate computing infrastructure needs to be developed to assemble, manage and mine the enormous and rapidly growing corpus of 'omics' data along with clinical information. We have now arrived at an intersection point between genome technology, cloud computing and biological data mining. This intersection point provides a launch pad for developing a globally applicable cloud computing platform capable of supporting a new paradigm of data intensive, cloud-enabled predictive medicine.}, } @article {pmid22084254, year = {2012}, author = {Zhang, L and Gu, S and Liu, Y and Wang, B and Azuaje, F}, title = {Gene set analysis in the cloud.}, journal = {Bioinformatics (Oxford, England)}, volume = {28}, number = {2}, pages = {294-295}, doi = {10.1093/bioinformatics/btr630}, pmid = {22084254}, issn = {1367-4811}, mesh = {Algorithms ; Computational Biology/*methods ; *Information Storage and Retrieval ; *Internet ; Programming Languages ; *Software ; }, abstract = {UNLABELLED: Cloud computing offers low cost and highly flexible opportunities in bioinformatics. Its potential has already been demonstrated in high-throughput sequence data analysis. Pathway-based or gene set analysis of expression data has received relatively less attention. We developed a gene set analysis algorithm for biomarker identification in the cloud. The resulting tool, YunBe, is ready to use on Amazon Web Services. Moreover, here we compare its performance to those obtained with desktop and computing cluster solutions.

YunBe is open-source and freely accessible within the Amazon Elastic MapReduce service at s3n://lrcv-crp-sante/app/yunbe.jar. Source code and user's guidelines can be downloaded from http://tinyurl.com/yunbedownload.}, } @article {pmid22081224, year = {2012}, author = {Ohno-Machado, L and Bafna, V and Boxwala, AA and Chapman, BE and Chapman, WW and Chaudhuri, K and Day, ME and Farcas, C and Heintzman, ND and Jiang, X and Kim, H and Kim, J and Matheny, ME and Resnic, FS and Vinterbo, SA and , }, title = {iDASH: integrating data for analysis, anonymization, and sharing.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {19}, number = {2}, pages = {196-201}, pmid = {22081224}, issn = {1527-974X}, support = {R01 HG004962/HG/NHGRI NIH HHS/United States ; U54 HL108460/HL/NHLBI NIH HHS/United States ; }, mesh = {*Algorithms ; *Confidentiality ; Forecasting ; Goals ; Health Insurance Portability and Accountability Act ; *Information Dissemination ; Information Storage and Retrieval ; *Medical Informatics ; United States ; }, abstract = {iDASH (integrating data for analysis, anonymization, and sharing) is the newest National Center for Biomedical Computing funded by the NIH. It focuses on algorithms and tools for sharing data in a privacy-preserving manner. Foundational privacy technology research performed within iDASH is coupled with innovative engineering for collaborative tool development and data-sharing capabilities in a private Health Insurance Portability and Accountability Act (HIPAA)-certified cloud. Driving Biological Projects, which span different biological levels (from molecules to individuals to populations) and focus on various health conditions, help guide research and development within this Center. Furthermore, training and dissemination efforts connect the Center with its stakeholders and educate data owners and data consumers on how to share and use clinical and biological data. Through these various mechanisms, iDASH implements its goal of providing biomedical and behavioral researchers with access to data, software, and a high-performance computing environment, thus enabling them to generate and test new hypotheses.}, } @article {pmid22074278, year = {2011}, author = {Briscoe, C}, title = {Laboratory and software applications for clinical trials: the global laboratory environment.}, journal = {Bioanalysis}, volume = {3}, number = {21}, pages = {2381-2384}, doi = {10.4155/bio.11.246}, pmid = {22074278}, issn = {1757-6199}, mesh = {Biomedical Research ; *Clinical Trials as Topic/standards ; *Database Management Systems/organization & administration/standards/trends ; Drug Industry/methods/*standards/trends ; Information Dissemination ; Internationality ; *Laboratories/standards ; United States ; }, abstract = {The Applied Pharmaceutical Software Meeting is held annually. It is sponsored by The Boston Society, a not-for-profit organization that coordinates a series of meetings within the global pharmaceutical industry. The meeting generally focuses on laboratory applications, but in recent years has expanded to include some software applications for clinical trials. The 2011 meeting emphasized the global laboratory environment. Global clinical trials generate massive amounts of data in many locations that must be centralized and processed for efficient analysis. Thus, the meeting had a strong focus on establishing networks and systems for dealing with the computer infrastructure to support such environments. In addition to the globally installed laboratory information management system, electronic laboratory notebook and other traditional laboratory applications, cloud computing is quickly becoming the answer to provide efficient, inexpensive options for managing the large volumes of data and computing power, and thus it served as a central theme for the meeting.}, } @article {pmid22072385, year = {2012}, author = {Pratt, B and Howbert, JJ and Tasman, NI and Nilsson, EJ}, title = {MR-Tandem: parallel X!Tandem using Hadoop MapReduce on Amazon Web Services.}, journal = {Bioinformatics (Oxford, England)}, volume = {28}, number = {1}, pages = {136-137}, pmid = {22072385}, issn = {1367-4811}, support = {R43 HG006091/HG/NHGRI NIH HHS/United States ; HG006091/HG/NHGRI NIH HHS/United States ; }, mesh = {Cluster Analysis ; Programming Languages ; *Protein Processing, Post-Translational ; Proteins/*analysis/*metabolism ; *Search Engine ; *Software/economics ; }, abstract = {SUMMARY: MR-Tandem adapts the popular X!Tandem peptide search engine to work with Hadoop MapReduce for reliable parallel execution of large searches. MR-Tandem runs on any Hadoop cluster but offers special support for Amazon Web Services for creating inexpensive on-demand Hadoop clusters, enabling search volumes that might not otherwise be feasible with the compute resources a researcher has at hand. MR-Tandem is designed to drop in wherever X!Tandem is already in use and requires no modification to existing X!Tandem parameter files, and only minimal modification to X!Tandem-based workflows.

MR-Tandem is implemented as a lightly modified X!Tandem C++ executable and a Python script that drives Hadoop clusters including Amazon Web Services (AWS) Elastic Map Reduce (EMR), using the modified X!Tandem program as a Hadoop Streaming mapper and reducer. The modified X!Tandem C++ source code is Artistic licensed, supports pluggable scoring, and is available as part of the Sashimi project at http://sashimi.svn.sourceforge.net/viewvc/sashimi/trunk/trans_proteomic_pipeline/extern/xtandem/. The MR-Tandem Python script is Apache licensed and available as part of the Insilicos Cloud Army project at http://ica.svn.sourceforge.net/viewvc/ica/trunk/mr-tandem/. Full documentation and a windows installer that configures MR-Tandem, Python and all necessary packages are available at this same URL.

CONTACT: brian.pratt@insilicos.com}, } @article {pmid22068528, year = {2011}, author = {Afgan, E and Baker, D and Coraor, N and Goto, H and Paul, IM and Makova, KD and Nekrutenko, A and Taylor, J}, title = {Harnessing cloud computing with Galaxy Cloud.}, journal = {Nature biotechnology}, volume = {29}, number = {11}, pages = {972-974}, pmid = {22068528}, issn = {1546-1696}, support = {HG005133/HG/NHGRI NIH HHS/United States ; R01 HG004909/HG/NHGRI NIH HHS/United States ; R01 HG004909-01/HG/NHGRI NIH HHS/United States ; HG005542/HG/NHGRI NIH HHS/United States ; R01 HG004909-02/HG/NHGRI NIH HHS/United States ; HG004909/HG/NHGRI NIH HHS/United States ; RC2 HG005542/HG/NHGRI NIH HHS/United States ; R01 HG004909-03/HG/NHGRI NIH HHS/United States ; R01 GM072264/GM/NIGMS NIH HHS/United States ; R21 HG005133/HG/NHGRI NIH HHS/United States ; GM07226405S2/GM/NIGMS NIH HHS/United States ; }, mesh = {Computer Storage Devices/economics/*trends ; DNA, Mitochondrial/genetics ; Humans ; Internet/*standards ; Sequence Analysis, DNA/*methods ; Software/*trends ; }, } @article {pmid22047125, year = {2011}, author = {Greenbaum, D and Gerstein, M}, title = {The role of cloud computing in managing the deluge of potentially private genetic data.}, journal = {The American journal of bioethics : AJOB}, volume = {11}, number = {11}, pages = {39-41}, doi = {10.1080/15265161.2011.608242}, pmid = {22047125}, issn = {1536-0075}, mesh = {*Beneficence ; Confidentiality/*ethics/*legislation & jurisprudence ; *Health Insurance Portability and Accountability Act ; Humans ; Informed Consent/*ethics ; Patient Selection/*ethics ; *Personal Autonomy ; Research/*organization & administration ; *Research Personnel ; }, } @article {pmid22043737, year = {2011}, author = {DeGaspari, J}, title = {Staying on your your feet.}, journal = {Healthcare informatics : the business magazine for information and communication systems}, volume = {28}, number = {10}, pages = {16-8, 20-1}, pmid = {22043737}, issn = {1050-9135}, mesh = {American Recovery and Reinvestment Act ; *Computer Security ; *Disaster Planning ; *Electronic Health Records ; Hospital Administration/methods/standards ; *Hospital Information Systems ; Humans ; Information Storage and Retrieval ; United States ; }, abstract = {CIOs are hard at work coming up with the most effective and affordable strategies for protecting electronic data as their hospitals move forward on electronic medical records. While the rise of cloud computing and declining network costs are offering new opportunities in dealing with potential disasters, many find there is no substitute for good planning and constant testing.}, } @article {pmid22028928, year = {2011}, author = {Angiuoli, SV and White, JR and Matalka, M and White, O and Fricke, WF}, title = {Resources and costs for microbial sequence analysis evaluated using virtual machines and cloud computing.}, journal = {PloS one}, volume = {6}, number = {10}, pages = {e26624}, pmid = {22028928}, issn = {1932-6203}, support = {RC2 HG005597/HG/NHGRI NIH HHS/United States ; RC2 HG005597-01/HG/NHGRI NIH HHS/United States ; }, mesh = {Animals ; Computational Biology/*economics/instrumentation/*methods ; Humans ; Infant ; *Internet ; Metagenomics ; Mice ; Microbiology/*economics/instrumentation ; Molecular Sequence Annotation ; RNA, Bacterial/genetics ; RNA, Ribosomal, 16S/genetics ; Sequence Analysis/*economics/instrumentation/*methods ; *User-Computer Interface ; }, abstract = {BACKGROUND: The widespread popularity of genomic applications is threatened by the "bioinformatics bottleneck" resulting from uncertainty about the cost and infrastructure needed to meet increasing demands for next-generation sequence analysis. Cloud computing services have been discussed as potential new bioinformatics support systems but have not been evaluated thoroughly.

RESULTS: We present benchmark costs and runtimes for common microbial genomics applications, including 16S rRNA analysis, microbial whole-genome shotgun (WGS) sequence assembly and annotation, WGS metagenomics and large-scale BLAST. Sequence dataset types and sizes were selected to correspond to outputs typically generated by small- to midsize facilities equipped with 454 and Illumina platforms, except for WGS metagenomics where sampling of Illumina data was used. Automated analysis pipelines, as implemented in the CloVR virtual machine, were used in order to guarantee transparency, reproducibility and portability across different operating systems, including the commercial Amazon Elastic Compute Cloud (EC2), which was used to attach real dollar costs to each analysis type. We found considerable differences in computational requirements, runtimes and costs associated with different microbial genomics applications. While all 16S analyses completed on a single-CPU desktop in under three hours, microbial genome and metagenome analyses utilized multi-CPU support of up to 120 CPUs on Amazon EC2, where each analysis completed in under 24 hours for less than $60. Representative datasets were used to estimate maximum data throughput on different cluster sizes and to compare costs between EC2 and comparable local grid servers.

CONCLUSIONS: Although bioinformatics requirements for microbial genomics depend on dataset characteristics and the analysis protocols applied, our results suggests that smaller sequencing facilities (up to three Roche/454 or one Illumina GAIIx sequencer) invested in 16S rRNA amplicon sequencing, microbial single-genome and metagenomics WGS projects can achieve cost-efficient bioinformatics support using CloVR in combination with Amazon EC2 as an alternative to local computing centers.}, } @article {pmid22023682, year = {2011}, author = {Goodman, AF}, title = {Analysis, biomedicine, collaboration, and determinism challenges and guidance: wish list for biopharmaceuticals on the interface of computing and statistics.}, journal = {Journal of biopharmaceutical statistics}, volume = {21}, number = {6}, pages = {1140-1157}, doi = {10.1080/10543406.2011.613361}, pmid = {22023682}, issn = {1520-5711}, mesh = {Animals ; Biomedical Technology/methods/trends ; Biopharmaceutics/methods/*trends ; Computational Biology/methods/*trends ; Computers/trends ; *Cooperative Behavior ; Humans ; Practice Guidelines as Topic/*standards ; Protein Array Analysis/methods/trends ; Translational Research, Biomedical/methods/trends ; *User-Computer Interface ; }, abstract = {I have personally witnessed processing advance from desk calculators and mainframes, through timesharing and PCs, to supercomputers and cloud computing. I have also witnessed resources grow from too little data into almost too much data, and from theory dominating data into data beginning to dominate theory while needing new theory. Finally, I have witnessed problems advance from simple in a lone discipline into becoming almost too complex in multiple disciplines, as well as approaches evolve from analysis driving solutions into solutions by data mining beginning to drive the analysis itself. How we do all of this has transitioned from competition overcoming collaboration into collaboration starting to overcome competition, as well as what is done being more important than how it is done has transitioned into how it is done becoming as important as what is done. In addition, what or how we do it being more important than what or how we should actually do it has shifted into what or how we should do it becoming just as important as what or how we do it, if not more so. Although we have come a long way in both our methodology and technology, are they sufficient for our current or future complex and multidisciplinary problems with their massive databases? Since the apparent answer is not a resounding yes, we are presented with tremendous challenges and opportunities. This personal perspective adapts my background and experience to be appropriate for biopharmaceuticals. In these times of exploding change, informed perspectives on what challenges should be explored with accompanying guidance may be even more valuable than the far more typical literature reviews in conferences and journals of what has already been accomplished without challenges or guidance. Would we believe that an architect who designs a skyscraper determines the skyscraper's exact exterior, interior and furnishings or only general characteristics? Why not increase dependability of conclusions in genetics and translational medicine by enriching genetic determinism with uncertainty? Uncertainty is our friend if exploited or potential enemy if ignored. Genes design proteins, but they cannot operationally determine all protein characteristics: they begin a long chain of complex events occurring many times via intricate feedbacks plus interactions which are not all determined. Genes influence proteins and diseases by just determining their probability distributions, not by determining them. From any sample of diseased people, we may more successfully infer gene probability distributions than genes themselves, and it poses an issue to resolve. My position is supported by 2-3 articles a week in ScienceDaily, 2011.}, } @article {pmid22018222, year = {2012}, author = {Franklin, EC and Stat, M and Pochon, X and Putnam, HM and Gates, RD}, title = {GeoSymbio: a hybrid, cloud-based web application of global geospatial bioinformatics and ecoinformatics for Symbiodinium-host symbioses.}, journal = {Molecular ecology resources}, volume = {12}, number = {2}, pages = {369-373}, doi = {10.1111/j.1755-0998.2011.03081.x}, pmid = {22018222}, issn = {1755-0998}, mesh = {Animals ; Computational Biology/*instrumentation/methods ; Dinoflagellida/genetics/*physiology ; Invertebrates/genetics/*physiology ; *Software ; *Symbiosis ; User-Computer Interface ; }, abstract = {The genus Symbiodinium encompasses a group of unicellular, photosynthetic dinoflagellates that are found free living or in hospite with a wide range of marine invertebrate hosts including scleractinian corals. We present GeoSymbio, a hybrid web application that provides an online, easy to use and freely accessible interface for users to discover, explore and utilize global geospatial bioinformatic and ecoinformatic data on Symbiodinium-host symbioses. The novelty of this application lies in the combination of a variety of query and visualization tools, including dynamic searchable maps, data tables with filter and grouping functions, and interactive charts that summarize the data. Importantly, this application is hosted remotely or 'in the cloud' using Google Apps, and therefore does not require any specialty GIS, web programming or data programming expertise from the user. The current version of the application utilizes Symbiodinium data based on the ITS2 genetic marker from PCR-based techniques, including denaturing gradient gel electrophoresis, sequencing and cloning of specimens collected during 1982-2010. All data elements of the application are also downloadable as spatial files, tables and nucleic acid sequence files in common formats for desktop analysis. The application provides a unique tool set to facilitate research on the basic biology of Symbiodinium and expedite new insights into their ecology, biogeography and evolution in the face of a changing global climate. GeoSymbio can be accessed at https://sites.google.com/site/geosymbio/.}, } @article {pmid21937354, year = {2011}, author = {Kuo, AM}, title = {Opportunities and challenges of cloud computing to improve health care services.}, journal = {Journal of medical Internet research}, volume = {13}, number = {3}, pages = {e67}, pmid = {21937354}, issn = {1438-8871}, mesh = {Computer Communication Networks/*organization & administration ; *Computers ; Consumer Health Information/*organization & administration ; Efficiency, Organizational ; Humans ; Information Storage and Retrieval/*methods ; Public Health Informatics/*organization & administration ; *Systems Integration ; }, abstract = {Cloud computing is a new way of delivering computing resources and services. Many managers and experts believe that it can improve health care services, benefit health care research, and change the face of health information technology. However, as with any innovation, cloud computing should be rigorously evaluated before its widespread adoption. This paper discusses the concept and its current place in health care, and uses 4 aspects (management, technology, security, and legal) to evaluate the opportunities and challenges of this computing model. Strategic planning that could be used by a health organization to determine its direction, strategy, and resource allocation when it has decided to migrate from traditional to cloud-based health services is also discussed.}, } @article {pmid21923009, year = {2011}, author = {Skiba, DJ}, title = {Are you computing in the clouds? Understanding cloud computing.}, journal = {Nursing education perspectives}, volume = {32}, number = {4}, pages = {266-268}, doi = {10.5480/1536-5026-32.4.266}, pmid = {21923009}, issn = {1536-5026}, mesh = {Humans ; *Internet ; Nursing Informatics/*education ; United States ; }, } @article {pmid21901085, year = {2011}, author = {Fusaro, VA and Patil, P and Gafni, E and Wall, DP and Tonellato, PJ}, title = {Biomedical cloud computing with Amazon Web Services.}, journal = {PLoS computational biology}, volume = {7}, number = {8}, pages = {e1002147}, pmid = {21901085}, issn = {1553-7358}, support = {R01 LM010130/LM/NLM NIH HHS/United States ; R01 MH090611/MH/NIMH NIH HHS/United States ; R01LM010130/LM/NLM NIH HHS/United States ; }, mesh = {Computational Biology ; Computer Security ; Information Storage and Retrieval/economics/*methods ; *Internet ; *Software ; }, abstract = {In this overview to biomedical computing in the cloud, we discussed two primary ways to use the cloud (a single instance or cluster), provided a detailed example using NGS mapping, and highlighted the associated costs. While many users new to the cloud may assume that entry is as straightforward as uploading an application and selecting an instance type and storage options, we illustrated that there is substantial up-front effort required before an application can make full use of the cloud's vast resources. Our intention was to provide a set of best practices and to illustrate how those apply to a typical application pipeline for biomedical informatics, but also general enough for extrapolation to other types of computational problems. Our mapping example was intended to illustrate how to develop a scalable project and not to compare and contrast alignment algorithms for read mapping and genome assembly. Indeed, with a newer aligner such as Bowtie, it is possible to map the entire African genome using one m2.2xlarge instance in 48 hours for a total cost of approximately $48 in computation time. In our example, we were not concerned with data transfer rates, which are heavily influenced by the amount of available bandwidth, connection latency, and network availability. When transferring large amounts of data to the cloud, bandwidth limitations can be a major bottleneck, and in some cases it is more efficient to simply mail a storage device containing the data to AWS (http://aws.amazon.com/importexport/). More information about cloud computing, detailed cost analysis, and security can be found in references.}, } @article {pmid21893817, year = {2011}, author = {Kreuzthaler, M and Schulz, S}, title = {Truecasing clinical narratives.}, journal = {Studies in health technology and informatics}, volume = {169}, number = {}, pages = {589-593}, pmid = {21893817}, issn = {0926-9630}, mesh = {Algorithms ; Documentation/*methods ; Germany ; Humans ; Information Storage and Retrieval/*methods ; Internet ; *Language ; *Medical Records ; Pattern Recognition, Automated/*methods ; Pattern Recognition, Visual/*physiology ; Reading ; Reproducibility of Results ; Semantics ; }, abstract = {Truecasing, or capitalization, is the rewriting of each word of an input text with its proper case information. Many medical texts, especially those from legacy systems, are still written entirely in capitalized letters, hampering their readability. We present a pilot study that uses the World Wide Web as a corpus in order to support automatic truecasing. The texts under scrutiny were German-language pathology reports. By submitting token bigrams to the Google Web search engine we collected enough case information so that we achieved 81.3% accuracy for acronyms and 98.5% accuracy for normal words. This is all the more impressive as only half of the words used in this corpus existed in a standard medical dictionary due to the excessive use of ad-hoc single-word nominal compounds in German. Our system performed less satisfactory for spelling correction, and in three cases the proposed word substitutions altered the meaning of the input sentence. For the routine deployment of this method the dependency on a (black box) search engine must be overcome, for example by using cloud-based Web n-gram services.}, } @article {pmid21893777, year = {2011}, author = {Kuo, MH and Kushniruk, A and Borycki, E}, title = {Can cloud computing benefit health services? - a SWOT analysis.}, journal = {Studies in health technology and informatics}, volume = {169}, number = {}, pages = {379-383}, pmid = {21893777}, issn = {0926-9630}, mesh = {Computer Security ; Computers ; *Electronic Health Records ; Health Services Research ; Humans ; Information Storage and Retrieval ; Information Systems ; Internet ; *Medical Informatics ; Software ; Systems Integration ; }, abstract = {In this paper, we discuss cloud computing, the current state of cloud computing in healthcare, and the challenges and opportunities of adopting cloud computing in healthcare. A Strengths, Weaknesses, Opportunities and Threats (SWOT) analysis was used to evaluate the feasibility of adopting this computing model in healthcare. The paper concludes that cloud computing could have huge benefits for healthcare but there are a number of issues that will need to be addressed before its widespread use in healthcare.}, } @article {pmid21893725, year = {2011}, author = {McGregor, C and Catley, C and James, A and Padbury, J}, title = {Next generation neonatal health informatics with Artemis.}, journal = {Studies in health technology and informatics}, volume = {169}, number = {}, pages = {115-119}, pmid = {21893725}, issn = {0926-9630}, mesh = {Canada ; Computer Systems ; Computers ; Decision Support Systems, Clinical ; Diagnosis, Computer-Assisted/*instrumentation ; Diffusion of Innovation ; Equipment Design ; Humans ; Infant, Newborn ; Intensive Care, Neonatal/*methods ; Internet ; Medical Informatics/*instrumentation/methods ; Medical Records Systems, Computerized ; Monitoring, Physiologic/*instrumentation ; }, abstract = {This paper describes the deployment of a platform to enable processing of currently uncharted high frequency, high fidelity, synchronous data from medical devices. Such a platform would support the next generation of informatics solutions for neonatal intensive care. We present Artemis, a platform for real-time enactment of clinical knowledge as it relates to multidimensional data analysis and clinical research. Through specific deployment examples at two different neonatal intensive care units, we demonstrate that Artemis supports: 1) instantiation of clinical rules; 2) multidimensional analysis; 3) distribution of services for critical care via cloud computing; and 4) accomplishing 1 through 3 using current technology without a negative impact on patient care.}, } @article {pmid21878105, year = {2011}, author = {Angiuoli, SV and Matalka, M and Gussman, A and Galens, K and Vangala, M and Riley, DR and Arze, C and White, JR and White, O and Fricke, WF}, title = {CloVR: a virtual machine for automated and portable sequence analysis from the desktop using cloud computing.}, journal = {BMC bioinformatics}, volume = {12}, number = {}, pages = {356}, pmid = {21878105}, issn = {1471-2105}, support = {RC2 HG005597-01/HG/NHGRI NIH HHS/United States ; }, mesh = {Computational Biology ; *Computers ; Genomics ; High-Throughput Nucleotide Sequencing ; *Internet ; *Sequence Analysis, DNA ; *Software ; }, abstract = {BACKGROUND: Next-generation sequencing technologies have decentralized sequence acquisition, increasing the demand for new bioinformatics tools that are easy to use, portable across multiple platforms, and scalable for high-throughput applications. Cloud computing platforms provide on-demand access to computing infrastructure over the Internet and can be used in combination with custom built virtual machines to distribute pre-packaged with pre-configured software.

RESULTS: We describe the Cloud Virtual Resource, CloVR, a new desktop application for push-button automated sequence analysis that can utilize cloud computing resources. CloVR is implemented as a single portable virtual machine (VM) that provides several automated analysis pipelines for microbial genomics, including 16S, whole genome and metagenome sequence analysis. The CloVR VM runs on a personal computer, utilizes local computer resources and requires minimal installation, addressing key challenges in deploying bioinformatics workflows. In addition CloVR supports use of remote cloud computing resources to improve performance for large-scale sequence processing. In a case study, we demonstrate the use of CloVR to automatically process next-generation sequencing data on multiple cloud computing platforms.

CONCLUSION: The CloVR VM and associated architecture lowers the barrier of entry for utilizing complex analysis protocols on both local single- and multi-core computers and cloud systems for high throughput data processing.}, } @article {pmid21866720, year = {2011}, author = {Glaser, J}, title = {Cloud computing can simplify HIT infrastructure management.}, journal = {Healthcare financial management : journal of the Healthcare Financial Management Association}, volume = {65}, number = {8}, pages = {52-55}, pmid = {21866720}, issn = {0735-0732}, mesh = {*Access to Information ; *Efficiency, Organizational ; Hospital Information Systems/*organization & administration ; *Internet ; *Outsourced Services ; United States ; User-Computer Interface ; }, abstract = {Software as a Service (SaaS), built on cloud computing technology, is emerging as the forerunner in IT infrastructure because it helps healthcare providers reduce capital investments. Cloud computing leads to predictable, monthly, fixed operating expenses for hospital IT staff. Outsourced cloud computing facilities are state-of-the-art data centers boasting some of the most sophisticated networking equipment on the market. The SaaS model helps hospitals safeguard against technology obsolescence, minimizes maintenance requirements, and simplifies management.}, } @article {pmid21863719, year = {2011}, author = {Degaspari, J}, title = {Security in the cloud.}, journal = {Healthcare informatics : the business magazine for information and communication systems}, volume = {28}, number = {8}, pages = {18, 20, 22 passim}, pmid = {21863719}, issn = {1050-9135}, mesh = {*Computer Security ; *Internet ; *Medical Record Linkage ; United States ; }, abstract = {As more provider organizations look to the cloud computing model, they face a host of security-related questions. What are the appropriate applications for the cloud, what is the best cloud model, and what do they need to know to choose the best vendor? Hospital CIOs and security experts weigh in.}, } @article {pmid21843145, year = {2011}, author = {Garg, V and Arora, S and Gupta, C}, title = {Cloud computing approaches to accelerate drug discovery value chain.}, journal = {Combinatorial chemistry & high throughput screening}, volume = {14}, number = {10}, pages = {861-871}, doi = {10.2174/138620711797537085}, pmid = {21843145}, issn = {1875-5402}, mesh = {Animals ; Drug Discovery/economics/*methods ; Genomics/economics/methods ; High-Throughput Screening Assays/economics/methods ; Humans ; Microarray Analysis/economics/methods ; *Software ; }, abstract = {Continued advancements in the area of technology have helped high throughput screening (HTS) evolve from a linear to parallel approach by performing system level screening. Advanced experimental methods used for HTS at various steps of drug discovery (i.e. target identification, target validation, lead identification and lead validation) can generate data of the order of terabytes. As a consequence, there is pressing need to store, manage, mine and analyze this data to identify informational tags. This need is again posing challenges to computer scientists to offer the matching hardware and software infrastructure, while managing the varying degree of desired computational power. Therefore, the potential of "On-Demand Hardware" and "Software as a Service (SAAS)" delivery mechanisms cannot be denied. This on-demand computing, largely referred to as Cloud Computing, is now transforming the drug discovery research. Also, integration of Cloud computing with parallel computing is certainly expanding its footprint in the life sciences community. The speed, efficiency and cost effectiveness have made cloud computing a 'good to have tool' for researchers, providing them significant flexibility, allowing them to focus on the 'what' of science and not the 'how'. Once reached to its maturity, Discovery-Cloud would fit best to manage drug discovery and clinical development data, generated using advanced HTS techniques, hence supporting the vision of personalized medicine.}, } @article {pmid21841211, year = {2011}, author = {Wang, H and Ma, Y and Pratx, G and Xing, L}, title = {Toward real-time Monte Carlo simulation using a commercial cloud computing infrastructure.}, journal = {Physics in medicine and biology}, volume = {56}, number = {17}, pages = {N175-81}, pmid = {21841211}, issn = {1361-6560}, support = {R01 CA133474-04/CA/NCI NIH HHS/United States ; 1R01 CA133474/CA/NCI NIH HHS/United States ; R01 CA133474/CA/NCI NIH HHS/United States ; R21 CA153587-02/CA/NCI NIH HHS/United States ; R21 CA153587/CA/NCI NIH HHS/United States ; 1R21 CA153587/CA/NCI NIH HHS/United States ; }, mesh = {Algorithms ; Cluster Analysis ; *Computer Simulation ; Computing Methodologies ; Humans ; Information Storage and Retrieval/*methods ; *Internet ; *Monte Carlo Method ; Phantoms, Imaging ; Radiotherapy/*instrumentation/methods ; Radiotherapy Planning, Computer-Assisted/*methods ; *Software ; }, abstract = {Monte Carlo (MC) methods are the gold standard for modeling photon and electron transport in a heterogeneous medium; however, their computational cost prohibits their routine use in the clinic. Cloud computing, wherein computing resources are allocated on-demand from a third party, is a new approach for high performance computing and is implemented to perform ultra-fast MC calculation in radiation therapy. We deployed the EGS5 MC package in a commercial cloud environment. Launched from a single local computer with Internet access, a Python script allocates a remote virtual cluster. A handshaking protocol designates master and worker nodes. The EGS5 binaries and the simulation data are initially loaded onto the master node. The simulation is then distributed among independent worker nodes via the message passing interface, and the results aggregated on the local computer for display and data analysis. The described approach is evaluated for pencil beams and broad beams of high-energy electrons and photons. The output of cloud-based MC simulation is identical to that produced by single-threaded implementation. For 1 million electrons, a simulation that takes 2.58 h on a local computer can be executed in 3.3 min on the cloud with 100 nodes, a 47× speed-up. Simulation time scales inversely with the number of parallel nodes. The parallelization overhead is also negligible for large simulations. Cloud computing represents one of the most important recent advances in supercomputing technology and provides a promising platform for substantially improved MC simulation. In addition to the significant speed up, cloud computing builds a layer of abstraction for high performance parallel computing, which may change the way dose calculations are performed and radiation treatment plans are completed.}, } @article {pmid21834429, year = {2011}, author = {Page, D}, title = {Technology. IT forecast: warming, with a chance of cloud computing.}, journal = {Hospitals & health networks}, volume = {85}, number = {7}, pages = {17-18}, pmid = {21834429}, issn = {1068-8838}, mesh = {Computer Security ; Confidentiality ; *Hospital Administration ; Humans ; Information Systems/*organization & administration ; *Internet ; }, } @article {pmid21775302, year = {2011}, author = {Grant, GR and Farkas, MH and Pizarro, AD and Lahens, NF and Schug, J and Brunk, BP and Stoeckert, CJ and Hogenesch, JB and Pierce, EA}, title = {Comparative analysis of RNA-Seq alignment algorithms and the RNA-Seq unified mapper (RUM).}, journal = {Bioinformatics (Oxford, England)}, volume = {27}, number = {18}, pages = {2518-2528}, pmid = {21775302}, issn = {1367-4811}, support = {EY020902/EY/NEI NIH HHS/United States ; R01 EY012910/EY/NEI NIH HHS/United States ; F32 EY020747/EY/NEI NIH HHS/United States ; EY12910/EY/NEI NIH HHS/United States ; R01 EY020902/EY/NEI NIH HHS/United States ; }, mesh = {Algorithms ; Animals ; Base Sequence ; Benchmarking ; Cluster Analysis ; Exons ; Gene Library ; Genome ; High-Throughput Nucleotide Sequencing ; Mice ; Models, Genetic ; Molecular Sequence Data ; RNA/genetics ; RNA Splicing ; Sequence Alignment ; Sequence Analysis, RNA/*methods ; Software ; }, abstract = {MOTIVATION: A critical task in high-throughput sequencing is aligning millions of short reads to a reference genome. Alignment is especially complicated for RNA sequencing (RNA-Seq) because of RNA splicing. A number of RNA-Seq algorithms are available, and claim to align reads with high accuracy and efficiency while detecting splice junctions. RNA-Seq data are discrete in nature; therefore, with reasonable gene models and comparative metrics RNA-Seq data can be simulated to sufficient accuracy to enable meaningful benchmarking of alignment algorithms. The exercise to rigorously compare all viable published RNA-Seq algorithms has not been performed previously.

RESULTS: We developed an RNA-Seq simulator that models the main impediments to RNA alignment, including alternative splicing, insertions, deletions, substitutions, sequencing errors and intron signal. We used this simulator to measure the accuracy and robustness of available algorithms at the base and junction levels. Additionally, we used reverse transcription-polymerase chain reaction (RT-PCR) and Sanger sequencing to validate the ability of the algorithms to detect novel transcript features such as novel exons and alternative splicing in RNA-Seq data from mouse retina. A pipeline based on BLAT was developed to explore the performance of established tools for this problem, and to compare it to the recently developed methods. This pipeline, the RNA-Seq Unified Mapper (RUM), performs comparably to the best current aligners and provides an advantageous combination of accuracy, speed and usability.

AVAILABILITY: The RUM pipeline is distributed via the Amazon Cloud and for computing clusters using the Sun Grid Engine (http://cbil.upenn.edu/RUM).

CONTACT: ggrant@pcbi.upenn.edu; epierce@mail.med.upenn.edu

SUPPLEMENTARY INFORMATION: The RNA-Seq sequence reads described in the article are deposited at GEO, accession GSE26248.}, } @article {pmid21765628, year = {2011}, author = {Singer, A and Wu, HT}, title = {Orientability and Diffusion Maps.}, journal = {Applied and computational harmonic analysis}, volume = {31}, number = {1}, pages = {44-58}, pmid = {21765628}, issn = {1063-5203}, support = {27302C0028/ES/NIEHS NIH HHS/United States ; R01 GM090200/GM/NIGMS NIH HHS/United States ; R01 GM090200-01/GM/NIGMS NIH HHS/United States ; }, abstract = {One of the main objectives in the analysis of a high dimensional large data set is to learn its geometric and topological structure. Even though the data itself is parameterized as a point cloud in a high dimensional ambient space ℝ(p), the correlation between parameters often suggests the "manifold assumption" that the data points are distributed on (or near) a low dimensional Riemannian manifold ℳ(d) embedded in ℝ(p), with d ≪ p. We introduce an algorithm that determines the orientability of the intrinsic manifold given a sufficiently large number of sampled data points. If the manifold is orientable, then our algorithm also provides an alternative procedure for computing the eigenfunctions of the Laplacian that are important in the diffusion map framework for reducing the dimensionality of the data. If the manifold is non-orientable, then we provide a modified diffusion mapping of its orientable double covering.}, } @article {pmid21763781, year = {2012}, author = {Cheng, KC and Hinton, DE and Mattingly, CJ and Planchart, A}, title = {Aquatic models, genomics and chemical risk management.}, journal = {Comparative biochemistry and physiology. Toxicology & pharmacology : CBP}, volume = {155}, number = {1}, pages = {169-173}, pmid = {21763781}, issn = {1532-0456}, support = {R01 AR052535/AR/NIAMS NIH HHS/United States ; R01AR052535/AR/NIAMS NIH HHS/United States ; R21 ES017828/ES/NIEHS NIH HHS/United States ; R24 RR017441/RR/NCRR NIH HHS/United States ; P42 ES010356/ES/NIEHS NIH HHS/United States ; R01ES014065/ES/NIEHS NIH HHS/United States ; R01ES019604/ES/NIEHS NIH HHS/United States ; R24RR017441/RR/NCRR NIH HHS/United States ; R01 ES019604/ES/NIEHS NIH HHS/United States ; R24 OD011152/OD/NIH HHS/United States ; R01 ES014065/ES/NIEHS NIH HHS/United States ; R21ES016892/ES/NIEHS NIH HHS/United States ; P42 ES10356/ES/NIEHS NIH HHS/United States ; R01CA242956/CA/NCI NIH HHS/United States ; }, mesh = {Animals ; Computational Biology/*methods ; *Databases, Factual ; Ecotoxicology ; Environmental Exposure/analysis ; Environmental Monitoring/methods ; Fishes/anatomy & histology/genetics/physiology ; Gene-Environment Interaction ; Genomics/methods ; Internet ; Models, Animal ; Risk Factors ; *User-Computer Interface ; }, abstract = {The 5th Aquatic Animal Models for Human Disease meeting follows four previous meetings (Nairn et al., 2001; Schmale, 2004; Schmale et al., 2007; Hinton et al., 2009) in which advances in aquatic animal models for human disease research were reported, and community discussion of future direction was pursued. At this meeting, discussion at a workshop entitled Bioinformatics and Computational Biology with Web-based Resources (20 September 2010) led to an important conclusion: Aquatic model research using feral and experimental fish, in combination with web-based access to annotated anatomical atlases and toxicological databases, yields data that advance our understanding of human gene function, and can be used to facilitate environmental management and drug development. We propose here that the effects of genes and environment are best appreciated within an anatomical context - the specifically affected cells and organs in the whole animal. We envision the use of automated, whole-animal imaging at cellular resolution and computational morphometry facilitated by high-performance computing and automated entry into toxicological databases, as anchors for genetic and toxicological data, and as connectors between human and model system data. These principles should be applied to both laboratory and feral fish populations, which have been virtually irreplaceable sentinals for environmental contamination that results in human morbidity and mortality. We conclude that automation, database generation, and web-based accessibility, facilitated by genomic/transcriptomic data and high-performance and cloud computing, will potentiate the unique and potentially key roles that aquatic models play in advancing systems biology, drug development, and environmental risk management.}, } @article {pmid21748412, year = {2012}, author = {Adair, L and Ledermann, E}, title = {Our path to a filmless future.}, journal = {Journal of digital imaging}, volume = {25}, number = {1}, pages = {78-80}, pmid = {21748412}, issn = {1618-727X}, mesh = {Computer Communication Networks/*organization & administration ; Forecasting ; Humans ; Quality Improvement ; Radiographic Image Enhancement/trends ; Radiology Department, Hospital/trends ; Radiology Information Systems/*trends ; United States ; X-Ray Film/*trends ; }, abstract = {Film-based radiographs are still being used to teach in a conference format, which presents several viewing challenges amongst other problems. In the age of cloud computing, which enables the use of online server storage space, this information could be used more effectively if it were digitized. However, digitizing film-based radiographs and making them available for use in the cloud is not as easy as it seems. In order to address the issue of digitizing the film-based radiograph libraries in our radiology department, we looked at several options. The option that we chose was a consumer-grade scanner, and this decision was based on price, resolution, shades of gray, built-in transparency function, and its physical attributes. Our goal was to digitize the film-based radiograph teaching files so they could be stored in a digital file locker such as Google Picassa for organization and quick access later. These files would constantly be updated in a Google document by residents, and this document would be called the "Living Document" based on its continuous expandability. This method would allow even the smallest radiology department to benefit from the use of modern technology to gain access to valuable information stored in film-based radiographs and give every resident the opportunity to benefit from it.}, } @article {pmid21727204, year = {2012}, author = {Schweitzer, EJ}, title = {Reconciliation of the cloud computing model with US federal electronic health record regulations.}, journal = {Journal of the American Medical Informatics Association : JAMIA}, volume = {19}, number = {2}, pages = {161-165}, pmid = {21727204}, issn = {1527-974X}, mesh = {Computer Security/*legislation & jurisprudence ; Computer Simulation ; Computers ; Electronic Health Records/*legislation & jurisprudence/organization & administration ; Federal Government ; *Government Regulation ; Health Insurance Portability and Accountability Act ; Hospital Information Systems/organization & administration ; Information Storage and Retrieval ; *Internet ; Privacy/legislation & jurisprudence ; Software ; United States ; }, abstract = {Cloud computing refers to subscription-based, fee-for-service utilization of computer hardware and software over the Internet. The model is gaining acceptance for business information technology (IT) applications because it allows capacity and functionality to increase on the fly without major investment in infrastructure, personnel or licensing fees. Large IT investments can be converted to a series of smaller operating expenses. Cloud architectures could potentially be superior to traditional electronic health record (EHR) designs in terms of economy, efficiency and utility. A central issue for EHR developers in the US is that these systems are constrained by federal regulatory legislation and oversight. These laws focus on security and privacy, which are well-recognized challenges for cloud computing systems in general. EHRs built with the cloud computing model can achieve acceptable privacy and security through business associate contracts with cloud providers that specify compliance requirements, performance metrics and liability sharing.}, } @article {pmid21696236, year = {2011}, author = {Chang, KW and Tsai, TY and Chen, KC and Yang, SC and Huang, HJ and Chang, TT and Sun, MF and Chen, HY and Tsai, FJ and Chen, CY}, title = {iSMART: an integrated cloud computing web server for traditional Chinese medicine for online virtual screening, de novo evolution and drug design.}, journal = {Journal of biomolecular structure & dynamics}, volume = {29}, number = {1}, pages = {243-250}, doi = {10.1080/073911011010524988}, pmid = {21696236}, issn = {1538-0254}, mesh = {*Drug Design ; Drugs, Chinese Herbal/*chemistry ; Internet ; Medicine, Chinese Traditional ; *Software ; }, } @article {pmid21685055, year = {2011}, author = {Bolouri, H and Dulepet, R and Angerman, M}, title = {Menu-driven cloud computing and resource sharing for R and Bioconductor.}, journal = {Bioinformatics (Oxford, England)}, volume = {27}, number = {16}, pages = {2309-2310}, pmid = {21685055}, issn = {1367-4811}, support = {R33 HL089102/HL/NHLBI NIH HHS/United States ; HL089102/HL/NHLBI NIH HHS/United States ; }, mesh = {Humans ; Programming Languages ; *Software ; *Systems Biology ; User-Computer Interface ; }, abstract = {SUMMARY: We report CRdata.org, a cloud-based, free, open-source web server for running analyses and sharing data and R scripts with others. In addition to using the free, public service, CRdata users can launch their own private Amazon Elastic Computing Cloud (EC2) nodes and store private data and scripts on Amazon's Simple Storage Service (S3) with user-controlled access rights. All CRdata services are provided via point-and-click menus.

CRdata is open-source and free under the permissive MIT License (opensource.org/licenses/mit-license.php). The source code is in Ruby (ruby-lang.org/en/) and available at: github.com/seerdata/crdata.

CONTACT: hbolouri@fhcrc.org.}, } @article {pmid21678039, year = {2012}, author = {Silva, LA and Costa, C and Oliveira, JL}, title = {A PACS archive architecture supported on cloud services.}, journal = {International journal of computer assisted radiology and surgery}, volume = {7}, number = {3}, pages = {349-358}, pmid = {21678039}, issn = {1861-6429}, mesh = {*Computers ; Diagnostic Imaging/*methods ; Humans ; *Information Storage and Retrieval ; *Internet ; Radiology Information Systems/*organization & administration ; Software/*standards ; Telemedicine/*methods ; }, abstract = {PURPOSE: Diagnostic imaging procedures have continuously increased over the last decade and this trend may continue in coming years, creating a great impact on storage and retrieval capabilities of current PACS. Moreover, many smaller centers do not have financial resources or requirements that justify the acquisition of a traditional infrastructure. Alternative solutions, such as cloud computing, may help address this emerging need.

METHODS: A tremendous amount of ubiquitous computational power, such as that provided by Google and Amazon, are used every day as a normal commodity. Taking advantage of this new paradigm, an architecture for a Cloud-based PACS archive that provides data privacy, integrity, and availability is proposed. The solution is independent from the cloud provider and the core modules were successfully instantiated in examples of two cloud computing providers. Operational metrics for several medical imaging modalities were tabulated and compared for Google Storage, Amazon S3, and LAN PACS.

RESULTS: A PACS-as-a-Service archive that provides storage of medical studies using the Cloud was developed. The results show that the solution is robust and that it is possible to store, query, and retrieve all desired studies in a similar way as in a local PACS approach.

CONCLUSION: Cloud computing is an emerging solution that promises high scalability of infrastructures, software, and applications, according to a "pay-as-you-go" business model. The presented architecture uses the cloud to setup medical data repositories and can have a significant impact on healthcare institutions by reducing IT infrastructures.}, } @article {pmid21650144, year = {2011}, author = {Dinh, AK}, title = {Cloud computing 101.}, journal = {Journal of AHIMA}, volume = {82}, number = {4}, pages = {36-7; quiz 44}, pmid = {21650144}, issn = {1060-5487}, mesh = {*Access to Information ; Education, Continuing ; *Health Care Sector ; Humans ; Information Management/*trends ; United States ; User-Computer Interface ; }, } @article {pmid21647737, year = {2011}, author = {Tsai, TY and Chang, KW and Chen, CY}, title = {iScreen: world's first cloud-computing web server for virtual screening and de novo drug design based on TCM database@Taiwan.}, journal = {Journal of computer-aided molecular design}, volume = {25}, number = {6}, pages = {525-531}, pmid = {21647737}, issn = {1573-4951}, mesh = {Binding Sites ; *Computer-Aided Design ; *Databases, Factual ; *Drug Design ; Humans ; *Internet ; Ligands ; *Medicine, Chinese Traditional ; Taiwan ; }, abstract = {The rapidly advancing researches on traditional Chinese medicine (TCM) have greatly intrigued pharmaceutical industries worldwide. To take initiative in the next generation of drug development, we constructed a cloud-computing system for TCM intelligent screening system (iScreen) based on TCM Database@Taiwan. iScreen is compacted web server for TCM docking and followed by customized de novo drug design. We further implemented a protein preparation tool that both extract protein of interest from a raw input file and estimate the size of ligand bind site. In addition, iScreen is designed in user-friendly graphic interface for users who have less experience with the command line systems. For customized docking, multiple docking services, including standard, in-water, pH environment, and flexible docking modes are implemented. Users can download first 200 TCM compounds of best docking results. For TCM de novo drug design, iScreen provides multiple molecular descriptors for a user's interest. iScreen is the world's first web server that employs world's largest TCM database for virtual screening and de novo drug design. We believe our web server can lead TCM research to a new era of drug development. The TCM docking and screening server is available at http://iScreen.cmu.edu.tw/.}, } @article {pmid21645377, year = {2011}, author = {Nguyen, T and Shi, W and Ruden, D}, title = {CloudAligner: A fast and full-featured MapReduce based tool for sequence mapping.}, journal = {BMC research notes}, volume = {4}, number = {}, pages = {171}, pmid = {21645377}, issn = {1756-0500}, support = {R01 ES012933/ES/NIEHS NIH HHS/United States ; R21 ES021983/ES/NIEHS NIH HHS/United States ; }, abstract = {BACKGROUND: Research in genetics has developed rapidly recently due to the aid of next generation sequencing (NGS). However, massively-parallel NGS produces enormous amounts of data, which leads to storage, compatibility, scalability, and performance issues. The Cloud Computing and MapReduce framework, which utilizes hundreds or thousands of shared computers to map sequencing reads quickly and efficiently to reference genome sequences, appears to be a very promising solution for these issues. Consequently, it has been adopted by many organizations recently, and the initial results are very promising. However, since these are only initial steps toward this trend, the developed software does not provide adequate primary functions like bisulfite, pair-end mapping, etc., in on-site software such as RMAP or BS Seeker. In addition, existing MapReduce-based applications were not designed to process the long reads produced by the most recent second-generation and third-generation NGS instruments and, therefore, are inefficient. Last, it is difficult for a majority of biologists untrained in programming skills to use these tools because most were developed on Linux with a command line interface.

RESULTS: To urge the trend of using Cloud technologies in genomics and prepare for advances in second- and third-generation DNA sequencing, we have built a Hadoop MapReduce-based application, CloudAligner, which achieves higher performance, covers most primary features, is more accurate, and has a user-friendly interface. It was also designed to be able to deal with long sequences. The performance gain of CloudAligner over Cloud-based counterparts (35 to 80%) mainly comes from the omission of the reduce phase. In comparison to local-based approaches, the performance gain of CloudAligner is from the partition and parallel processing of the huge reference genome as well as the reads. The source code of CloudAligner is available at http://cloudaligner.sourceforge.net/ and its web version is at http://mine.cs.wayne.edu:8080/CloudAligner/.

CONCLUSIONS: Our results show that CloudAligner is faster than CloudBurst, provides more accurate results than RMAP, and supports various input as well as output formats. In addition, with the web-based interface, it is easier to use than its counterparts.}, } @article {pmid21634276, year = {2011}, author = {Giniat, EJ}, title = {Cloud computing: innovating the business of health care.}, journal = {Healthcare financial management : journal of the Healthcare Financial Management Association}, volume = {65}, number = {5}, pages = {130-131}, pmid = {21634276}, issn = {0735-0732}, mesh = {*Computing Methodologies ; *Diffusion of Innovation ; Efficiency, Organizational ; Financial Management, Hospital/*methods ; Internet ; Leadership ; Planning Techniques ; United States ; }, } @article {pmid21584772, year = {2012}, author = {Shen, CP and Jigjidsuren, C and Dorjgochoo, S and Chen, CH and Chen, WH and Hsu, CK and Wu, JM and Hsueh, CW and Lai, MS and Tan, CT and Altangerel, E and Lai, F}, title = {A data-mining framework for transnational healthcare system.}, journal = {Journal of medical systems}, volume = {36}, number = {4}, pages = {2565-2575}, pmid = {21584772}, issn = {0148-5598}, mesh = {Algorithms ; Computer Systems ; *Data Mining/methods ; *Delivery of Health Care ; Guidelines as Topic ; Health Resources/statistics & numerical data ; Humans ; Liver Diseases/therapy ; Mongolia ; Taiwan ; User-Computer Interface ; }, abstract = {Medical resources are important and necessary in health care. Recently, the development of methods for improving the efficiency of medical resource utilization is an emerging problem. Despite evidence supporting the use of order sets in hospitals, only a small number of health information systems have successfully equipped physicians with analysis of complex order sequences from clinical pathway and clinical guideline. This paper presents a data-mining framework for transnational healthcare system to find alternative practices, including transfusion, pre-admission tests, and evaluation of liver diseases. However, individual countries vary with respect to geographical location, living habits, and culture, so disease risks and treatment methods also vary across countries. To realize the difference, a service-oriented architecture and cloud-computing technology are applied to analyze these medical data. The validity of the proposed system is demonstrated in including Taiwan and Mongolia, to ensure the feasibility of our approach.}, } @article {pmid21565655, year = {2011}, author = {Piette, JD and Mendoza-Avelares, MO and Ganser, M and Mohamed, M and Marinec, N and Krishnan, S}, title = {A preliminary study of a cloud-computing model for chronic illness self-care support in an underdeveloped country.}, journal = {American journal of preventive medicine}, volume = {40}, number = {6}, pages = {629-632}, pmid = {21565655}, issn = {1873-2607}, support = {UL1RR024986/RR/NCRR NIH HHS/United States ; P60 DK020572/DK/NIDDK NIH HHS/United States ; P30 DK020572/DK/NIDDK NIH HHS/United States ; DK020572/DK/NIDDK NIH HHS/United States ; UL1 RR024986/RR/NCRR NIH HHS/United States ; }, mesh = {Adult ; Aged ; Blood Glucose ; *Cell Phone ; Chronic Disease ; Developing Countries ; Diabetes Mellitus/*therapy ; Feasibility Studies ; Female ; Follow-Up Studies ; Glycated Hemoglobin/metabolism ; Honduras ; Humans ; Male ; Medication Adherence ; Middle Aged ; Self Care/*methods ; Telecommunications ; United States ; *User-Computer Interface ; }, abstract = {BACKGROUND: Although interactive voice response (IVR) calls can be an effective tool for chronic disease management, many regions of the world lack the infrastructure to provide these services.

PURPOSE: This study evaluated the feasibility and potential impact of an IVR program using a cloud-computing model to improve diabetes management in Honduras.

METHODS: A single-group, pre-post study was conducted between June and August 2010. The telecommunications infrastructure was maintained on a U.S. server, and calls were directed to patients' cell phones using VoIP. Eighty-five diabetes patients in Honduras received weekly IVR disease management calls for 6 weeks, with automated follow-up e-mails to clinicians, and voicemail reports to family caregivers. Patients completed interviews at enrollment and a 6-week follow-up. Other measures included patients' glycemic control (HbA1c) and data from the IVR calling system.

RESULTS: A total of 53% of participants completed at least half of their IVR calls and 23% of participants completed 80% or more. Higher baseline blood pressures, greater diabetes burden, greater distance from the clinic, and better medication adherence were related to higher call completion rates. Nearly all participants (98%) reported that because of the program, they improved in aspects of diabetes management such as glycemic control (56%) or foot care (89%). Mean HbA1c's decreased from 10.0% at baseline to 8.9% at follow-up (p<0.01). Most participants (92%) said that if the service were available in their clinic they would use it again.

CONCLUSIONS: Cloud computing is a feasible strategy for providing IVR services globally. IVR self-care support may improve self-care and glycemic control for patients in underdeveloped countries.}, } @article {pmid21554709, year = {2011}, author = {Feng, X and Grossman, R and Stein, L}, title = {PeakRanger: a cloud-enabled peak caller for ChIP-seq data.}, journal = {BMC bioinformatics}, volume = {12}, number = {}, pages = {139}, pmid = {21554709}, issn = {1471-2105}, mesh = {*Algorithms ; Base Sequence ; Chromatin/chemistry ; *Chromatin Assembly and Disassembly ; Chromatin Immunoprecipitation/*methods/standards ; Histone Code ; Protein Binding ; Sensitivity and Specificity ; Sequence Analysis, DNA/*methods/standards ; Software ; }, abstract = {BACKGROUND: Chromatin immunoprecipitation (ChIP), coupled with massively parallel short-read sequencing (seq) is used to probe chromatin dynamics. Although there are many algorithms to call peaks from ChIP-seq datasets, most are tuned either to handle punctate sites, such as transcriptional factor binding sites, or broad regions, such as histone modification marks; few can do both. Other algorithms are limited in their configurability, performance on large data sets, and ability to distinguish closely-spaced peaks.

RESULTS: In this paper, we introduce PeakRanger, a peak caller software package that works equally well on punctate and broad sites, can resolve closely-spaced peaks, has excellent performance, and is easily customized. In addition, PeakRanger can be run in a parallel cloud computing environment to obtain extremely high performance on very large data sets. We present a series of benchmarks to evaluate PeakRanger against 10 other peak callers, and demonstrate the performance of PeakRanger on both real and synthetic data sets. We also present real world usages of PeakRanger, including peak-calling in the modENCODE project.

CONCLUSIONS: Compared to other peak callers tested, PeakRanger offers improved resolution in distinguishing extremely closely-spaced peaks. PeakRanger has above-average spatial accuracy in terms of identifying the precise location of binding events. PeakRanger also has excellent sensitivity and specificity in all benchmarks evaluated. In addition, PeakRanger offers significant improvements in run time when running on a single processor system, and very marked improvements when allowed to take advantage of the MapReduce parallel environment offered by a cloud computing resource. PeakRanger can be downloaded at the official site of modENCODE project: http://www.modencode.org/software/ranger/}, } @article {pmid21521584, year = {2011}, author = {Botts, NE and Horan, TA and Thoms, BP}, title = {HealthATM: personal health cyberinfrastructure for underserved populations.}, journal = {American journal of preventive medicine}, volume = {40}, number = {5 Suppl 2}, pages = {S115-22}, doi = {10.1016/j.amepre.2011.01.016}, pmid = {21521584}, issn = {1873-2607}, mesh = {Consumer Health Information/*organization & administration ; Female ; *Health Records, Personal ; Humans ; Los Angeles ; Male ; Medical Informatics/organization & administration ; Medically Underserved Area ; Middle Aged ; Patient Participation ; *Self Care ; User-Computer Interface ; }, abstract = {BACKGROUND: There is an opportunity for personal health record (PHR) systems to play a vital role in fostering health self-management within underserved populations. If properly designed and promoted, it is possible that patients will use PHRs to become more empowered in taking an active role toward managing their health needs.

PURPOSE: This research examines the potential of a cyberinfrastructure-based PHR to encourage patient activation in health care, while also having population health implications.

METHODS: A multi-phased, iterative research approach was used to design and evaluate a PHR system called HealthATM, which utilizes services from a cloud computing environment. These services were integrated into an ATM-style interface aimed at providing a broad range of health consumers with the ability to manage health conditions and encourage accomplishment of health goals.

RESULTS: Evaluation of the PHR included 115 patients who were clients of several free clinics in Los Angeles County. The majority of patients perceived ease of use (74%) and confidence (73%) in using the HealthATM system, and thought they would like to use it frequently (73%). Patients also indicated a belief in being responsible for their own health. However, fewer felt as though they were able to maintain necessary life changes to improve their health.

CONCLUSIONS: Findings from the field tests suggest that PHRs can be a beneficial health management tool for underserved populations. In order for these types of tools to be effective within safety-net communities, they must be technically accessible and provide meaningful opportunities to increase patient engagement in their health care.}, } @article {pmid21527096, year = {2011}, author = {Kahol, K}, title = {Integrative gaming: a framework for sustainable game-based diabetes management.}, journal = {Journal of diabetes science and technology}, volume = {5}, number = {2}, pages = {293-300}, pmid = {21527096}, issn = {1932-2968}, mesh = {Acceleration ; Diabetes Mellitus/*therapy ; Equipment Design ; Exercise ; Health Behavior ; Humans ; Motor Activity ; Movement ; Nutritional Sciences ; Patient Education as Topic ; Self Care ; *Video Games ; }, abstract = {Obesity and diabetes have reached epidemic proportions in both developing and developed nations. While doctors and caregivers stress the importance of physical exercise in maintaining a healthy lifestyle, many people have difficulty subscribing to a healthy lifestyle. Virtual reality games offer a potentially exciting aid in accelerating and sustaining behavior change. However, care needs to be taken to develop sustainable models of employing games for the management of diabetes and obesity. In this article, we propose an integrative gaming paradigm designed to combine multiple activities involving physical exercises and cognitive skills through a game-based storyline. The persuasive story acts as a motivational binder that enables a user to perform multiple activities such as running, cycling, and problem solving. These activities guide a virtual character through different stages of the game. While performing the activities in the games, users wear sensors that can measure movement (accelerometers, gyrometers, magnetometers) and sense physiological measures (heart rate, pulse oximeter oxygen saturation). These measures drive the game and are stored and analyzed on a cloud computing platform. A prototype integrative gaming system is described and design considerations are discussed. The system is highly configurable and allows researchers to build games for the system with ease and drive the games with different types of activities. The capabilities of the system allow for engaging and motivating the user in the long term. Clinicians can employ the system to collect clinically relevant data in a seamless manner.}, } @article {pmid21487532, year = {2011}, author = {Jeon, YJ and Park, SH and Ahn, SM and Hwang, HJ}, title = {SOLiDzipper: A High Speed Encoding Method for the Next-Generation Sequencing Data.}, journal = {Evolutionary bioinformatics online}, volume = {7}, number = {}, pages = {1-6}, pmid = {21487532}, issn = {1176-9343}, abstract = {BACKGROUND: Next-generation sequencing (NGS) methods pose computational challenges of handling large volumes of data. Although cloud computing offers a potential solution to these challenges, transferring a large data set across the internet is the biggest obstacle, which may be overcome by efficient encoding methods. When encoding is used to facilitate data transfer to the cloud, the time factor is equally as important as the encoding efficiency. Moreover, to take advantage of parallel processing in cloud computing, a parallel technique to decode and split compressed data in the cloud is essential. Hence in this review, we present SOLiDzipper, a new encoding method for NGS data.

METHODS: The basic strategy of SOLiDzipper is to divide and encode. NGS data files contain both the sequence and non-sequence information whose encoding efficiencies are different. In SOLiDzipper, encoded data are stored in binary data block that does not contain the characteristic information of a specific sequence platform, which means that data can be decoded according to a desired platform even in cases of Illumina, Solexa or Roche 454 data.

RESULTS: The main calculation time using Crossbow was 173 minutes when 40 EC2 nodes were involved. In that case, an analysis preparation time of 464 minutes is required to encode data in the latest DNA compression method like G-SQZ and transmit it on a 183 Mbit/s bandwidth. However, it takes 194 minutes to encode and transmit data with SOLiDzipper under the same bandwidth conditions. These results indicate that the entire processing time can be reduced according to the encoding methods used, under the same network bandwidth conditions. Considering the limited network bandwidth, high-speed, high-efficiency encoding methods such as SOLiDzipper can make a significant contribution to higher productivity in labs seeking to take advantage of the cloud as an alternative to local computing.

AVAILABILITY: http://szipper.dinfree.com. Academic/non-profit: Binary available for direct download at no cost. For-profit: Submit request for for-profit license from the web-site.}, } @article {pmid21476844, year = {2011}, author = {Wolf, F and Hobby, R and Lowry, S and Bauman, A and Franza, BR and Lin, B and Rapson, S and Stewart, E and Kolker, E}, title = {Education and data-intensive science in the beginning of the 21st century.}, journal = {Omics : a journal of integrative biology}, volume = {15}, number = {4}, pages = {217-219}, doi = {10.1089/omi.2011.0009}, pmid = {21476844}, issn = {1557-8100}, mesh = {Biological Science Disciplines/*education ; }, abstract = {Data-intensive science will open up new avenues to explore, new questions to ask, and new ways to answer. Yet, this potential cannot be unlocked without new emphasis on education of the researchers gathering data, the analysts analyzing data and the cross-disciplinary participants working together to make it happen. This article is a summary of the education issues and challenges of data-intensive sciences and cloud computing as discussed in the Data-Intensive Science (DIS) workshop in Seattle, September 19-20, 2010.}, } @article {pmid21476841, year = {2011}, author = {Bernstein, PA and Wecker, D and Krishnamurthy, A and Manocha, D and Gardner, J and Kolker, N and Reschke, C and Stombaugh, J and Vagata, P and Stewart, E and Welch, D and Kolker, E}, title = {Technology and data-intensive science in the beginning of the 21st century.}, journal = {Omics : a journal of integrative biology}, volume = {15}, number = {4}, pages = {203-207}, doi = {10.1089/omi.2011.0013}, pmid = {21476841}, issn = {1557-8100}, mesh = {Biological Science Disciplines/*methods ; Technology/*methods ; }, abstract = {This article is a summary of the technology issues and challenges of data-intensive science and cloud computing as discussed in the Data-Intensive Science (DIS) workshop in Seattle, September 19-20, 2010.}, } @article {pmid21466336, year = {2011}, author = {Kabachinski, J}, title = {What's the forecast for cloud computing in healthcare?.}, journal = {Biomedical instrumentation & technology}, volume = {45}, number = {2}, pages = {146-150}, doi = {10.2345/0899-8205-45.2.146}, pmid = {21466336}, issn = {0899-8205}, mesh = {Computer Communication Networks/*trends ; Delivery of Health Care/*trends ; Medical Informatics/*trends ; United States ; }, } @article {pmid21395635, year = {2011}, author = {Acquesta, AD and Sánchez, EY and Porta, A and Jacovkis, PM}, title = {A method for computing the damage level due to the exposure to an airborne chemical with a time-varying concentration.}, journal = {Risk analysis : an official publication of the Society for Risk Analysis}, volume = {31}, number = {9}, pages = {1451-1469}, doi = {10.1111/j.1539-6924.2011.01594.x}, pmid = {21395635}, issn = {1539-6924}, mesh = {Air Pollutants/*toxicity ; *Environmental Exposure ; }, abstract = {The calculation of damage level due to the exposure to a toxic cloud is usually not included in most popular software, or it is included using techniques that do not take into account the variation in concentration over a period of time. In this work, a method is introduced for calculating the temporal evolution of the potential damage level and to obtain a more precise and descriptive estimation of this level. The proposed goal is: to estimate the maximum and minimum damage level experienced by a population due to the exposure to an airborne chemical with a time-varying concentration; to be able to assess the damage level experienced in a progressive way, as the exposure to the airborne chemical occurs. The method relies on transformations of time-concentration pairs on a continuum of damage level curves based on the available guideline levels, obtaining maximum and minimum approximations of the expected damage level for any exposure duration. Consequently, applying this method to transport model output data and demographic information, damage evolution in relation to time and space can be predicted, as well as its effect on the local population, which enables the determination of threat zones. The comparison between the proposed method and the current (Spanish and ALOHA) ones showed that the former can offer a more precise estimation and a more descriptive approach of the potential damage level. This method can be used by atmospheric dispersion models to compute damage level and graphically display the regions exposed to each guideline level on area maps.}, } @article {pmid21389008, year = {2011}, author = {Roy, NC and Altermann, E and Park, ZA and McNabb, WC}, title = {A comparison of analog and Next-Generation transcriptomic tools for mammalian studies.}, journal = {Briefings in functional genomics}, volume = {10}, number = {3}, pages = {135-150}, doi = {10.1093/bfgp/elr005}, pmid = {21389008}, issn = {2041-2657}, mesh = {Animals ; Base Sequence ; Gene Expression Profiling/*methods ; Genome ; Humans ; Mammals/genetics ; Oligonucleotide Array Sequence Analysis ; Sequence Analysis, RNA ; Software ; }, abstract = {This review focuses on tools for studying a cell's transcriptome, the collection of all RNA transcripts produced at a specific time, and the tools available for determining how these changes in gene expression relate to the functional changes in an organism. While the microarray-based (analog) gene-expression profiling technology has dominated the 'omics' era, Next-Generation Sequencing based gene-expression profiling (RNA-Seq) is likely to replace this analog technology in the future. RNA-Seq shows much promise for transcriptomic studies as the genes of interest do not have to be known a priori, new classes of RNA, SNPs and alternative splice variants can be detected, and it is also theoretically possible to detect transcripts from all biologically relevant abundance classes. However, the technology also brings with it new issues to resolve: the specific technical properties of RNA-Seq data differ to those of analog data, leading to novel systematic biases which must be accounted for when analysing this type of data. Additionally, multireads and splice junctions can cause problems when mapping the sequences back to a genome, and concepts such as cloud computing may be required because of the massive amounts of data generated.}, } @article {pmid21367868, year = {2011}, author = {Wang, Z and Wang, Y and Tan, KL and Wong, L and Agrawal, D}, title = {eCEO: an efficient Cloud Epistasis cOmputing model in genome-wide association study.}, journal = {Bioinformatics (Oxford, England)}, volume = {27}, number = {8}, pages = {1045-1051}, doi = {10.1093/bioinformatics/btr091}, pmid = {21367868}, issn = {1367-4811}, mesh = {*Epistasis, Genetic ; *Genome-Wide Association Study ; *Models, Statistical ; Phenotype ; *Polymorphism, Single Nucleotide ; Software ; }, abstract = {MOTIVATION: Recent studies suggested that a combination of multiple single nucleotide polymorphisms (SNPs) could have more significant associations with a specific phenotype. However, to discover epistasis, the epistatic interactions of SNPs, in a large number of SNPs, is a computationally challenging task. We are, therefore, motivated to develop efficient and effective solutions for identifying epistatic interactions of SNPs.

RESULTS: In this article, we propose an efficient Cloud-based Epistasis cOmputing (eCEO) model for large-scale epistatic interaction in genome-wide association study (GWAS). Given a large number of combinations of SNPs, our eCEO model is able to distribute them to balance the load across the processing nodes. Moreover, our eCEO model can efficiently process each combination of SNPs to determine the significance of its association with the phenotype. We have implemented and evaluated our eCEO model on our own cluster of more than 40 nodes. The experiment results demonstrate that the eCEO model is computationally efficient, flexible, scalable and practical. In addition, we have also deployed our eCEO model on the Amazon Elastic Compute Cloud. Our study further confirms its efficiency and ease of use in a public cloud.

AVAILABILITY: The source code of eCEO is available at http://www.comp.nus.edu.sg/~wangzk/eCEO.html.

CONTACT: wangzhengkui@nus.edu.sg.}, } @article {pmid21358005, year = {2011}, author = {Fober, T and Glinca, S and Klebe, G and Hüllermeier, E}, title = {Superposition and alignment of labeled point clouds.}, journal = {IEEE/ACM transactions on computational biology and bioinformatics}, volume = {8}, number = {6}, pages = {1653-1666}, doi = {10.1109/TCBB.2011.42}, pmid = {21358005}, issn = {1557-9964}, mesh = {*Algorithms ; Binding Sites ; Proteins/*chemistry ; Sequence Alignment/*methods ; Sequence Analysis, Protein/methods ; }, abstract = {Geometric objects are often represented approximately in terms of a finite set of points in three-dimensional euclidean space. In this paper, we extend this representation to what we call labeled point clouds. A labeled point cloud is a finite set of points, where each point is not only associated with a position in three-dimensional space, but also with a discrete class label that represents a specific property. This type of model is especially suitable for modeling biomolecules such as proteins and protein binding sites, where a label may represent an atom type or a physico-chemical property. Proceeding from this representation, we address the question of how to compare two labeled points clouds in terms of their similarity. Using fuzzy modeling techniques, we develop a suitable similarity measure as well as an efficient evolutionary algorithm to compute it. Moreover, we consider the problem of establishing an alignment of the structures in the sense of a one-to-one correspondence between their basic constituents. From a biological point of view, alignments of this kind are of great interest, since mutually corresponding molecular constituents offer important information about evolution and heredity, and can also serve as a means to explain a degree of similarity. In this paper, we therefore develop a method for computing pairwise or multiple alignments of labeled point clouds. To this end, we proceed from an optimal superposition of the corresponding point clouds and construct an alignment which is as much as possible in agreement with the neighborhood structure established by this superposition. We apply our methods to the structural analysis of protein binding sites.}, } @article {pmid21301474, year = {2011}, author = {Schadt, EE and Linderman, MD and Sorenson, J and Lee, L and Nolan, GP}, title = {Cloud and heterogeneous computing solutions exist today for the emerging big data problems in biology.}, journal = {Nature reviews. Genetics}, volume = {12}, number = {3}, pages = {224}, pmid = {21301474}, issn = {1471-0064}, support = {R01 CA130826/CA/NCI NIH HHS/United States ; }, mesh = {Computational Biology/*economics ; Computers/*economics ; Software/*economics ; }, } @article {pmid21273473, year = {2011}, author = {Fox, A}, title = {Computer science. Cloud computing--what's in it for me as a scientist?.}, journal = {Science (New York, N.Y.)}, volume = {331}, number = {6016}, pages = {406-407}, doi = {10.1126/science.1198981}, pmid = {21273473}, issn = {1095-9203}, } @article {pmid21258651, year = {2010}, author = {Kudtarkar, P and Deluca, TF and Fusaro, VA and Tonellato, PJ and Wall, DP}, title = {Cost-effective cloud computing: a case study using the comparative genomics tool, roundup.}, journal = {Evolutionary bioinformatics online}, volume = {6}, number = {}, pages = {197-203}, pmid = {21258651}, issn = {1176-9343}, support = {R03 LM009261/LM/NLM NIH HHS/United States ; T15 LM007092/LM/NLM NIH HHS/United States ; }, abstract = {BACKGROUND: Comparative genomics resources, such as ortholog detection tools and repositories are rapidly increasing in scale and complexity. Cloud computing is an emerging technological paradigm that enables researchers to dynamically build a dedicated virtual cluster and may represent a valuable alternative for large computational tools in bioinformatics. In the present manuscript, we optimize the computation of a large-scale comparative genomics resource-Roundup-using cloud computing, describe the proper operating principles required to achieve computational efficiency on the cloud, and detail important procedures for improving cost-effectiveness to ensure maximal computation at minimal costs.

METHODS: Utilizing the comparative genomics tool, Roundup, as a case study, we computed orthologs among 902 fully sequenced genomes on Amazon's Elastic Compute Cloud. For managing the ortholog processes, we designed a strategy to deploy the web service, Elastic MapReduce, and maximize the use of the cloud while simultaneously minimizing costs. Specifically, we created a model to estimate cloud runtime based on the size and complexity of the genomes being compared that determines in advance the optimal order of the jobs to be submitted.

RESULTS: We computed orthologous relationships for 245,323 genome-to-genome comparisons on Amazon's computing cloud, a computation that required just over 200 hours and cost $8,000 USD, at least 40% less than expected under a strategy in which genome comparisons were submitted to the cloud randomly with respect to runtime. Our cost savings projections were based on a model that not only demonstrates the optimal strategy for deploying RSD to the cloud, but also finds the optimal cluster size to minimize waste and maximize usage. Our cost-reduction model is readily adaptable for other comparative genomics tools and potentially of significant benefit to labs seeking to take advantage of the cloud as an alternative to local computing infrastructure.}, } @article {pmid21210984, year = {2010}, author = {Möller, S and Krabbenhöft, HN and Tille, A and Paleino, D and Williams, A and Wolstencroft, K and Goble, C and Holland, R and Belhachemi, D and Plessy, C}, title = {Community-driven computational biology with Debian Linux.}, journal = {BMC bioinformatics}, volume = {11 Suppl 12}, number = {Suppl 12}, pages = {S5}, pmid = {21210984}, issn = {1471-2105}, mesh = {Computational Biology/*methods ; Internet ; *Software ; }, abstract = {BACKGROUND: The Open Source movement and its technologies are popular in the bioinformatics community because they provide freely available tools and resources for research. In order to feed the steady demand for updates on software and associated data, a service infrastructure is required for sharing and providing these tools to heterogeneous computing environments.

RESULTS: The Debian Med initiative provides ready and coherent software packages for medical informatics and bioinformatics. These packages can be used together in Taverna workflows via the UseCase plugin to manage execution on local or remote machines. If such packages are available in cloud computing environments, the underlying hardware and the analysis pipelines can be shared along with the software.

CONCLUSIONS: Debian Med closes the gap between developers and users. It provides a simple method for offering new releases of software and data resources, thus provisioning a local infrastructure for computational biology. For geographically distributed teams it can ensure they are working on the same versions of tools, in the same conditions. This contributes to the world-wide networking of researchers.}, } @article {pmid21210983, year = {2010}, author = {Afgan, E and Baker, D and Coraor, N and Chapman, B and Nekrutenko, A and Taylor, J}, title = {Galaxy CloudMan: delivering cloud compute clusters.}, journal = {BMC bioinformatics}, volume = {11 Suppl 12}, number = {Suppl 12}, pages = {S4}, pmid = {21210983}, issn = {1471-2105}, support = {U41 HG006620/HG/NHGRI NIH HHS/United States ; HG005133/HG/NHGRI NIH HHS/United States ; HG005542/HG/NHGRI NIH HHS/United States ; HG004909/HG/NHGRI NIH HHS/United States ; }, mesh = {Cluster Analysis ; Computational Biology/*methods ; Internet ; *Software ; }, abstract = {BACKGROUND: Widespread adoption of high-throughput sequencing has greatly increased the scale and sophistication of computational infrastructure needed to perform genomic research. An alternative to building and maintaining local infrastructure is "cloud computing", which, in principle, offers on demand access to flexible computational infrastructure. However, cloud computing resources are not yet suitable for immediate "as is" use by experimental biologists.

RESULTS: We present a cloud resource management system that makes it possible for individual researchers to compose and control an arbitrarily sized compute cluster on Amazon's EC2 cloud infrastructure without any informatics requirements. Within this system, an entire suite of biological tools packaged by the NERC Bio-Linux team (http://nebc.nerc.ac.uk/tools/bio-linux) is available for immediate consumption. The provided solution makes it possible, using only a web browser, to create a completely configured compute cluster ready to perform analysis in less than five minutes. Moreover, we provide an automated method for building custom deployments of cloud resources. This approach promotes reproducibility of results and, if desired, allows individuals and labs to add or customize an otherwise available cloud system to better meet their needs.

CONCLUSIONS: The expected knowledge and associated effort with deploying a compute cluster in the Amazon EC2 cloud is not trivial. The solution presented in this paper eliminates these barriers, making it possible for researchers to deploy exactly the amount of computing power they need, combined with a wealth of existing analysis software, to handle the ongoing data deluge.}, } @article {pmid21210982, year = {2010}, author = {Qiu, J and Ekanayake, J and Gunarathne, T and Choi, JY and Bae, SH and Li, H and Zhang, B and Wu, TL and Ruan, Y and Ekanayake, S and Hughes, A and Fox, G}, title = {Hybrid cloud and cluster computing paradigms for life science applications.}, journal = {BMC bioinformatics}, volume = {11 Suppl 12}, number = {Suppl 12}, pages = {S3}, pmid = {21210982}, issn = {1471-2105}, support = {RC2HG005806-02/HG/NHGRI NIH HHS/United States ; }, mesh = {Biological Science Disciplines ; Cluster Analysis ; Computational Biology/*methods ; Data Mining ; Metagenomics ; *Software ; }, abstract = {BACKGROUND: Clouds and MapReduce have shown themselves to be a broadly useful approach to scientific computing especially for parallel data intensive applications. However they have limited applicability to some areas such as data mining because MapReduce has poor performance on problems with an iterative structure present in the linear algebra that underlies much data analysis. Such problems can be run efficiently on clusters using MPI leading to a hybrid cloud and cluster environment. This motivates the design and implementation of an open source Iterative MapReduce system Twister.

RESULTS: Comparisons of Amazon, Azure, and traditional Linux and Windows environments on common applications have shown encouraging performance and usability comparisons in several important non iterative cases. These are linked to MPI applications for final stages of the data analysis. Further we have released the open source Twister Iterative MapReduce and benchmarked it against basic MapReduce (Hadoop) and MPI in information retrieval and life sciences applications.

CONCLUSIONS: The hybrid cloud (MapReduce) and cluster (MPI) approach offers an attractive production environment while Twister promises a uniform programming environment for many Life Sciences applications.

METHODS: We used commercial clouds Amazon and Azure and the NSF resource FutureGrid to perform detailed comparisons and evaluations of different approaches to data intensive computing. Several applications were developed in MPI, MapReduce and Twister in these different environments.}, } @article {pmid21210981, year = {2010}, author = {O'Connor, BD and Merriman, B and Nelson, SF}, title = {SeqWare Query Engine: storing and searching sequence data in the cloud.}, journal = {BMC bioinformatics}, volume = {11 Suppl 12}, number = {Suppl 12}, pages = {S2}, pmid = {21210981}, issn = {1471-2105}, support = {U24NS/NS/NINDS NIH HHS/United States ; }, mesh = {Databases, Nucleic Acid ; Genome, Human ; Genomics/*methods ; High-Throughput Nucleotide Sequencing ; Humans ; Sequence Analysis, DNA/methods ; *Software ; }, abstract = {BACKGROUND: Since the introduction of next-generation DNA sequencers the rapid increase in sequencer throughput, and associated drop in costs, has resulted in more than a dozen human genomes being resequenced over the last few years. These efforts are merely a prelude for a future in which genome resequencing will be commonplace for both biomedical research and clinical applications. The dramatic increase in sequencer output strains all facets of computational infrastructure, especially databases and query interfaces. The advent of cloud computing, and a variety of powerful tools designed to process petascale datasets, provide a compelling solution to these ever increasing demands.

RESULTS: In this work, we present the SeqWare Query Engine which has been created using modern cloud computing technologies and designed to support databasing information from thousands of genomes. Our backend implementation was built using the highly scalable, NoSQL HBase database from the Hadoop project. We also created a web-based frontend that provides both a programmatic and interactive query interface and integrates with widely used genome browsers and tools. Using the query engine, users can load and query variants (SNVs, indels, translocations, etc) with a rich level of annotations including coverage and functional consequences. As a proof of concept we loaded several whole genome datasets including the U87MG cell line. We also used a glioblastoma multiforme tumor/normal pair to both profile performance and provide an example of using the Hadoop MapReduce framework within the query engine. This software is open source and freely available from the SeqWare project (http://seqware.sourceforge.net).

CONCLUSIONS: The SeqWare Query Engine provided an easy way to make the U87MG genome accessible to programmers and non-programmers alike. This enabled a faster and more open exploration of results, quicker tuning of parameters for heuristic variant calling filters, and a common data interface to simplify development of analytical tools. The range of data types supported, the ease of querying and integrating with existing tools, and the robust scalability of the underlying cloud-based technologies make SeqWare Query Engine a nature fit for storing and searching ever-growing genome sequence datasets.}, } @article {pmid21129207, year = {2010}, author = {Ropella, GE and Hunt, CA}, title = {Cloud computing and validation of expandable in silico livers.}, journal = {BMC systems biology}, volume = {4}, number = {}, pages = {168}, pmid = {21129207}, issn = {1752-0509}, mesh = {Animals ; Cluster Analysis ; Computational Biology/*methods ; *Computers ; Liver/*metabolism ; Rats ; Xenobiotics/metabolism ; }, abstract = {BACKGROUND: In Silico Livers (ISLs) are works in progress. They are used to challenge multilevel, multi-attribute, mechanistic hypotheses about the hepatic disposition of xenobiotics coupled with hepatic responses. To enhance ISL-to-liver mappings, we added discrete time metabolism, biliary elimination, and bolus dosing features to a previously validated ISL and initiated re-validated experiments that required scaling experiments to use more simulated lobules than previously, more than could be achieved using the local cluster technology. Rather than dramatically increasing the size of our local cluster we undertook the re-validation experiments using the Amazon EC2 cloud platform. So doing required demonstrating the efficacy of scaling a simulation to use more cluster nodes and assessing the scientific equivalence of local cluster validation experiments with those executed using the cloud platform.

RESULTS: The local cluster technology was duplicated in the Amazon EC2 cloud platform. Synthetic modeling protocols were followed to identify a successful parameterization. Experiment sample sizes (number of simulated lobules) on both platforms were 49, 70, 84, and 152 (cloud only). Experimental indistinguishability was demonstrated for ISL outflow profiles of diltiazem using both platforms for experiments consisting of 84 or more samples. The process was analogous to demonstration of results equivalency from two different wet-labs.

CONCLUSIONS: The results provide additional evidence that disposition simulations using ISLs can cover the behavior space of liver experiments in distinct experimental contexts (there is in silico-to-wet-lab phenotype similarity). The scientific value of experimenting with multiscale biomedical models has been limited to research groups with access to computer clusters. The availability of cloud technology coupled with the evidence of scientific equivalency has lowered the barrier and will greatly facilitate model sharing as well as provide straightforward tools for scaling simulations to encompass greater detail with no extra investment in hardware.}, } @article {pmid21097207, year = {2010}, author = {Doukas, C and Pliakas, T and Maglogiannis, I}, title = {Mobile healthcare information management utilizing Cloud Computing and Android OS.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2010}, number = {}, pages = {1037-1040}, doi = {10.1109/IEMBS.2010.5628061}, pmid = {21097207}, issn = {2375-7477}, mesh = {*Cell Phone ; *Computer Communication Networks ; *Database Management Systems ; Greece ; *Health Records, Personal ; *Programming Languages ; *Radiology Information Systems ; Telemedicine/*methods ; *User-Computer Interface ; }, abstract = {Cloud Computing provides functionality for managing information data in a distributed, ubiquitous and pervasive manner supporting several platforms, systems and applications. This work presents the implementation of a mobile system that enables electronic healthcare data storage, update and retrieval using Cloud Computing. The mobile application is developed using Google's Android operating system and provides management of patient health records and medical images (supporting DICOM format and JPEG2000 coding). The developed system has been evaluated using the Amazon's S3 cloud service. This article summarizes the implementation details and presents initial results of the system in practice.}, } @article {pmid21097152, year = {2010}, author = {Al-Shammary, D and Khalil, I}, title = {Compression-based aggregation model for medical web services.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2010}, number = {}, pages = {6174-6177}, doi = {10.1109/IEMBS.2010.5627759}, pmid = {21097152}, issn = {2375-7477}, mesh = {Data Collection/*methods ; Data Compression/*methods ; *Health Services ; Hospital Information Systems/organization & administration ; *Internet ; *Models, Theoretical ; Programming Languages ; }, abstract = {Many organizations such as hospitals have adopted Cloud Web services in applying their network services to avoid investing heavily computing infrastructure. SOAP (Simple Object Access Protocol) is the basic communication protocol of Cloud Web services that is XML based protocol. Generally,Web services often suffer congestions and bottlenecks as a result of the high network traffic that is caused by the large XML overhead size. At the same time, the massive load on Cloud Web services in terms of the large demand of client requests has resulted in the same problem. In this paper, two XML-aware aggregation techniques that are based on exploiting the compression concepts are proposed in order to aggregate the medical Web messages and achieve higher message size reduction.}, } @article {pmid21096347, year = {2010}, author = {Shen, CP and Chen, WH and Chen, JM and Hsu, KP and Lin, JW and Chiu, MJ and Chen, CH and Lai, F}, title = {Bio-signal analysis system design with support vector machines based on cloud computing service architecture.}, journal = {Annual International Conference of the IEEE Engineering in Medicine and Biology Society. IEEE Engineering in Medicine and Biology Society. Annual International Conference}, volume = {2010}, number = {}, pages = {1421-1424}, doi = {10.1109/IEMBS.2010.5626713}, pmid = {21096347}, issn = {2375-7477}, mesh = {*Algorithms ; *Computer Communication Networks ; Diagnosis, Computer-Assisted/*methods ; Electroencephalography/*methods ; Epilepsy/*diagnosis ; Humans ; Pattern Recognition, Automated/*methods ; Reproducibility of Results ; Sensitivity and Specificity ; *Signal Processing, Computer-Assisted ; }, abstract = {Today, many bio-signals such as Electroencephalography (EEG) are recorded in digital format. It is an emerging research area of analyzing these digital bio-signals to extract useful health information in biomedical engineering. In this paper, a bio-signal analyzing cloud computing architecture, called BACCA, is proposed. The system has been designed with the purpose of seamless integration into the National Taiwan University Health Information System. Based on the concept of. NET Service Oriented Architecture, the system integrates heterogeneous platforms, protocols, as well as applications. In this system, we add modern analytic functions such as approximated entropy and adaptive support vector machine (SVM). It is shown that the overall accuracy of EEG bio-signal analysis has increased to nearly 98% for different data sets, including open-source and clinical data sets.}, } @article {pmid21062816, year = {2011}, author = {de Lima Morais, DA and Fang, H and Rackham, OJ and Wilson, D and Pethica, R and Chothia, C and Gough, J}, title = {SUPERFAMILY 1.75 including a domain-centric gene ontology method.}, journal = {Nucleic acids research}, volume = {39}, number = {Database issue}, pages = {D427-34}, pmid = {21062816}, issn = {1362-4962}, support = {BB/G022771/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; MC_U105184318/MRC_/Medical Research Council/United Kingdom ; G022771/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {*Databases, Protein ; Genes ; Phenotype ; Phylogeny ; *Protein Structure, Tertiary ; Proteins/chemistry/*classification/genetics ; Sequence Analysis, Protein ; Software ; }, abstract = {The SUPERFAMILY resource provides protein domain assignments at the structural classification of protein (SCOP) superfamily level for over 1400 completely sequenced genomes, over 120 metagenomes and other gene collections such as UniProt. All models and assignments are available to browse and download at http://supfam.org. A new hidden Markov model library based on SCOP 1.75 has been created and a previously ignored class of SCOP, coiled coils, is now included. Our scoring component now uses HMMER3, which is in orders of magnitude faster and produces superior results. A cloud-based pipeline was implemented and is publicly available at Amazon web services elastic computer cloud. The SUPERFAMILY reference tree of life has been improved allowing the user to highlight a chosen superfamily, family or domain architecture on the tree of life. The most significant advance in SUPERFAMILY is that now it contains a domain-based gene ontology (GO) at the superfamily and family levels. A new methodology was developed to ensure a high quality GO annotation. The new methodology is general purpose and has been used to produce domain-based phenotypic ontologies in addition to GO.}, } @article {pmid21062814, year = {2011}, author = {Kaminuma, E and Kosuge, T and Kodama, Y and Aono, H and Mashima, J and Gojobori, T and Sugawara, H and Ogasawara, O and Takagi, T and Okubo, K and Nakamura, Y}, title = {DDBJ progress report.}, journal = {Nucleic acids research}, volume = {39}, number = {Database issue}, pages = {D22-7}, pmid = {21062814}, issn = {1362-4962}, mesh = {Amino Acid Sequence ; *Databases, Nucleic Acid ; Databases, Protein ; Genomics ; Molecular Sequence Annotation ; Patents as Topic ; Software ; }, abstract = {The DNA Data Bank of Japan (DDBJ, http://www.ddbj.nig.ac.jp) provides a nucleotide sequence archive database and accompanying database tools for sequence submission, entry retrieval and annotation analysis. The DDBJ collected and released 3,637,446 entries/2,272,231,889 bases between July 2009 and June 2010. A highlight of the released data was archive datasets from next-generation sequencing reads of Japanese rice cultivar, Koshihikari submitted by the National Institute of Agrobiological Sciences. In this period, we started a new archive for quantitative genomics data, the DDBJ Omics aRchive (DOR). The DOR stores quantitative data both from the microarray and high-throughput new sequencing platforms. Moreover, we improved the content of the DDBJ patent sequence, released a new submission tool of the DDBJ Sequence Read Archive (DRA) which archives massive raw sequencing reads, and enhanced a cloud computing-based analytical system from sequencing reads, the DDBJ Read Annotation Pipeline. In this article, we describe these new functions of the DDBJ databases and support tools.}, } @article {pmid21057489, year = {2010}, author = {Dudley, JT and Butte, AJ}, title = {In silico research in the era of cloud computing.}, journal = {Nature biotechnology}, volume = {28}, number = {11}, pages = {1181-1185}, pmid = {21057489}, issn = {1546-1696}, support = {R01 GM079719/GM/NIGMS NIH HHS/United States ; T15 LM007033/LM/NLM NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; *Computing Methodologies ; Databases as Topic ; Reproducibility of Results ; }, } @article {pmid20841902, year = {2010}, author = {Mohammed, Y and Dickmann, F and Sax, U and von Voigt, G and Smith, M and Rienhoff, O}, title = {Reaching for the cloud: on the lessons learned from grid computing technology transfer process to the biomedical community.}, journal = {Studies in health technology and informatics}, volume = {160}, number = {Pt 2}, pages = {1339-1343}, pmid = {20841902}, issn = {0926-9630}, mesh = {Biological Science Disciplines ; Biomedical Technology/*methods ; Computer Communication Networks ; Medical Informatics/methods ; Technology Transfer ; }, abstract = {Natural scientists such as physicists pioneered the sharing of computing resources, which led to the creation of the Grid. The inter domain transfer process of this technology has hitherto been an intuitive process without in depth analysis. Some difficulties facing the life science community in this transfer can be understood using the Bozeman's "Effectiveness Model of Technology Transfer". Bozeman's and classical technology transfer approaches deal with technologies which have achieved certain stability. Grid and Cloud solutions are technologies, which are still in flux. We show how Grid computing creates new difficulties in the transfer process that are not considered in Bozeman's model. We show why the success of healthgrids should be measured by the qualified scientific human capital and the opportunities created, and not primarily by the market impact. We conclude with recommendations that can help improve the adoption of Grid and Cloud solutions into the biomedical community. These results give a more concise explanation of the difficulties many life science IT projects are facing in the late funding periods, and show leveraging steps that can help overcoming the "vale of tears".}, } @article {pmid20841809, year = {2010}, author = {Feldman, H and Reti, S and Kaldany, E and Safran, C}, title = {Deployment of a highly secure clinical data repository in an insecure international environment.}, journal = {Studies in health technology and informatics}, volume = {160}, number = {Pt 2}, pages = {869-873}, pmid = {20841809}, issn = {0926-9630}, mesh = {*Computer Security ; Databases, Factual ; *Electronic Health Records/standards ; Humans ; Internet ; }, abstract = {We have designed and deployed a novel approach to protecting Personal Healthcare Information in environments where a data center is remote and its physical security cannot be assured. Our "KeyServer" methodology uses a server-client-server architecture to dynamically serve keys from a distant server in a separate secure data center in the US. The approach combines pre-existing and novel techniques into a layered protective barrier around compromise of patient data. We describe how this technology provides scalable security that makes security breaches highly unlikely. With some careful planning a Clinical Data Repositories fed by Electronic Health Records can be placed in relatively insecure settings, with a high-level of security surrounding data theft, even in the event of hardware theft. Such security architecture is ideal for not only developing nations, but for the evolution of health information to cloud computing platforms.}, } @article {pmid20802903, year = {2010}, author = {Souza, EP and Cabrera, EM and Braile, DM}, title = {The article of the future.}, journal = {Revista brasileira de cirurgia cardiovascular : orgao oficial da Sociedade Brasileira de Cirurgia Cardiovascular}, volume = {25}, number = {2}, pages = {141-148}, doi = {10.1590/s0102-76382010000200003}, pmid = {20802903}, mesh = {Computer Communication Networks/*trends ; Forecasting ; Humans ; *Information Dissemination ; Publishing/*trends ; }, abstract = {Technological advances and the Internet have contributed to the increased disclosure and updating of knowledge and science. Scientific papers are considered the best form of disclosure of information and have been undergoing many changes, not on their way of development, but on the structure of publication. The Future paper, a name for this new structure, uses hypermediatic resources, allowing a quick, easy and organized access to these items online. The exchange of information, comments and criticisms can be performed in real time, providing agility in science disclosure. The trend for the future of documents, both from professionals or enterprises, is the "cloud computing", in which all documents will be developed and updated with the use of various equipments: computer, palm, netbook, ipad, without need to have the software installed on your computer, requiring only an Internet connection.}, } @article {pmid20796305, year = {2010}, author = {Reese, MG and Moore, B and Batchelor, C and Salas, F and Cunningham, F and Marth, GT and Stein, L and Flicek, P and Yandell, M and Eilbeck, K}, title = {A standard variation file format for human genome sequences.}, journal = {Genome biology}, volume = {11}, number = {8}, pages = {R88}, pmid = {20796305}, issn = {1474-760X}, support = {1RC2HG005619/HG/NHGRI NIH HHS/United States ; R01 HG004719-01/HG/NHGRI NIH HHS/United States ; R44 HG003667/HG/NHGRI NIH HHS/United States ; P41HG002273/HG/NHGRI NIH HHS/United States ; R44 HG002993/HG/NHGRI NIH HHS/United States ; R44 HG006579/HG/NHGRI NIH HHS/United States ; 5R01HG004341/HG/NHGRI NIH HHS/United States ; R01 HG004719-02/HG/NHGRI NIH HHS/United States ; 2R44HG003667/HG/NHGRI NIH HHS/United States ; R01 HG004719/HG/NHGRI NIH HHS/United States ; R01 HG004719-04/HG/NHGRI NIH HHS/United States ; R43 LM010874/LM/NLM NIH HHS/United States ; 2R44HG002991/HG/NHGRI NIH HHS/United States ; R44 HG002991/HG/NHGRI NIH HHS/United States ; R01 HG004719-02S1/HG/NHGRI NIH HHS/United States ; R01 HG004719-03/HG/NHGRI NIH HHS/United States ; }, mesh = {Base Sequence ; *Databases, Nucleic Acid ; Genetic Variation ; Genome, Human/*genetics ; Humans ; *Information Storage and Retrieval ; Internet ; }, abstract = {Here we describe the Genome Variation Format (GVF) and the 10Gen dataset. GVF, an extension of Generic Feature Format version 3 (GFF3), is a simple tab-delimited format for DNA variant files, which uses Sequence Ontology to describe genome variation data. The 10Gen dataset, ten human genomes in GVF format, is freely available for community analysis from the Sequence Ontology website and from an Amazon elastic block storage (EBS) snapshot for use in Amazon's EC2 cloud computing environment.}, } @article {pmid20734101, year = {2011}, author = {Philbin, J and Prior, F and Nagy, P}, title = {Will the next generation of PACS be sitting on a cloud?.}, journal = {Journal of digital imaging}, volume = {24}, number = {2}, pages = {179-183}, pmid = {20734101}, issn = {1618-727X}, mesh = {*Computer Communication Networks ; Humans ; Information Storage and Retrieval/methods ; Radiology Department, Hospital/organization & administration ; *Radiology Information Systems ; }, abstract = {Cloud computing has gathered significant attention from information technology (IT) vendors in providing massively scalable applications as well as highly managed remote services. What is cloud computing and how will it impact the medical IT market? Will the next generation of picture archiving and communication systems be leveraging cloud technology?}, } @article {pmid20717155, year = {2010}, author = {Schadt, EE and Linderman, MD and Sorenson, J and Lee, L and Nolan, GP}, title = {Computational solutions to large-scale data management and analysis.}, journal = {Nature reviews. Genetics}, volume = {11}, number = {9}, pages = {647-657}, pmid = {20717155}, issn = {1471-0064}, support = {HHSN268201000034C/HL/NHLBI NIH HHS/United States ; R01 CA130826/CA/NCI NIH HHS/United States ; R01 CA130826-04/CA/NCI NIH HHS/United States ; }, mesh = {Animals ; Computational Biology/*methods ; Genomics/methods ; Humans ; Sequence Analysis, DNA/methods ; }, abstract = {Today we can generate hundreds of gigabases of DNA and RNA sequencing data in a week for less than US$5,000. The astonishing rate of data generation by these low-cost, high-throughput technologies in genomics is being matched by that of other technologies, such as real-time imaging and mass spectrometry-based flow cytometry. Success in the life sciences will depend on our ability to properly interpret the large-scale, high-dimensional data sets that are generated by these technologies, which in turn requires us to adopt advances in informatics. Here we discuss how we can master the different types of computational environments that exist - such as cloud and heterogeneous computing - to successfully tackle our big data problems.}, } @article {pmid20701754, year = {2010}, author = {Langmead, B and Hansen, KD and Leek, JT}, title = {Cloud-scale RNA-sequencing differential expression analysis with Myrna.}, journal = {Genome biology}, volume = {11}, number = {8}, pages = {R83}, pmid = {20701754}, issn = {1474-760X}, support = {R01 HG005220/HG/NHGRI NIH HHS/United States ; P41HG004059/HG/NHGRI NIH HHS/United States ; R01HG005220/HG/NHGRI NIH HHS/United States ; }, mesh = {Computational Biology/*methods ; Gene Expression Profiling/methods ; High-Throughput Nucleotide Sequencing ; Internet ; Sequence Analysis, RNA/*methods ; *Software ; }, abstract = {As sequencing throughput approaches dozens of gigabases per day, there is a growing need for efficient software for analysis of transcriptome sequencing (RNA-Seq) data. Myrna is a cloud-computing pipeline for calculating differential gene expression in large RNA-Seq datasets. We apply Myrna to the analysis of publicly available data sets and assess the goodness of fit of standard statistical models. Myrna is available from http://bowtie-bio.sf.net/myrna.}, } @article {pmid20691073, year = {2010}, author = {Dudley, JT and Pouliot, Y and Chen, R and Morgan, AA and Butte, AJ}, title = {Translational bioinformatics in the cloud: an affordable alternative.}, journal = {Genome medicine}, volume = {2}, number = {8}, pages = {51}, pmid = {20691073}, issn = {1756-994X}, support = {R01 LM009719/LM/NLM NIH HHS/United States ; T15 LM007033/LM/NLM NIH HHS/United States ; }, abstract = {With the continued exponential expansion of publicly available genomic data and access to low-cost, high-throughput molecular technologies for profiling patient populations, computational technologies and informatics are becoming vital considerations in genomic medicine. Although cloud computing technology is being heralded as a key enabling technology for the future of genomic research, available case studies are limited to applications in the domain of high-throughput sequence data analysis. The goal of this study was to evaluate the computational and economic characteristics of cloud computing in performing a large-scale data integration and analysis representative of research problems in genomic medicine. We find that the cloud-based analysis compares favorably in both performance and cost in comparison to a local computational cluster, suggesting that cloud computing technologies might be a viable resource for facilitating large-scale translational research in genomic medicine.}, } @article {pmid20683645, year = {2010}, author = {Ekins, S and Gupta, RR and Gifford, E and Bunin, BA and Waller, CL}, title = {Chemical space: missing pieces in cheminformatics.}, journal = {Pharmaceutical research}, volume = {27}, number = {10}, pages = {2035-2039}, pmid = {20683645}, issn = {1573-904X}, mesh = {*Chemistry, Pharmaceutical/methods/trends ; Databases, Factual ; *Informatics/methods/trends ; *Information Storage and Retrieval/methods/trends ; *Quantitative Structure-Activity Relationship ; Software ; }, abstract = {Cheminformatics is at a turning point, the pharmaceutical industry benefits from using the various methods developed over the last twenty years, but in our opinion we need to see greater development of novel approaches that non-experts can use. This will be achieved by more collaborations between software companies, academics and the evolving pharmaceutical industry. We suggest that cheminformatics should also be looking to other industries that use high performance computing technologies for inspiration. We describe the needs and opportunities which may benefit from the development of open cheminformatics technologies, mobile computing, the movement of software to the cloud and precompetitive initiatives.}, } @article {pmid20622843, year = {2010}, author = {Schatz, MC and Langmead, B and Salzberg, SL}, title = {Cloud computing and the DNA data race.}, journal = {Nature biotechnology}, volume = {28}, number = {7}, pages = {691-693}, pmid = {20622843}, issn = {1546-1696}, support = {R01 GM083873/GM/NIGMS NIH HHS/United States ; R01-LM006845/LM/NLM NIH HHS/United States ; R01 LM006845-08/LM/NLM NIH HHS/United States ; R01 LM006845-10/LM/NLM NIH HHS/United States ; R01 LM006845-09/LM/NLM NIH HHS/United States ; R01 LM006845/LM/NLM NIH HHS/United States ; }, mesh = {*Computational Biology ; *DNA ; Genomics ; }, } @article {pmid20565938, year = {2010}, author = {Loman, NJ and Pallen, MJ}, title = {EntrezAJAX: direct web browser access to the Entrez Programming Utilities.}, journal = {Source code for biology and medicine}, volume = {5}, number = {}, pages = {6}, pmid = {20565938}, issn = {1751-0473}, support = {BB/E011179/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, abstract = {Web applications for biology and medicine often need to integrate data from Entrez services provided by the National Center for Biotechnology Information. However, direct access to Entrez from a web browser is not possible due to 'same-origin' security restrictions. The use of "Asynchronous JavaScript and XML" (AJAX) to create rich, interactive web applications is now commonplace. The ability to access Entrez via AJAX would be advantageous in the creation of integrated biomedical web resources. We describe EntrezAJAX, which provides access to Entrez eUtils and is able to circumvent same-origin browser restrictions. EntrezAJAX is easily implemented by JavaScript developers and provides identical functionality as Entrez eUtils as well as enhanced functionality to ease development. We provide easy-to-understand developer examples written in JavaScript to illustrate potential uses of this service. For the purposes of speed, reliability and scalability, EntrezAJAX has been deployed on Google App Engine, a freely available cloud service. The EntrezAJAX webpage is located at http://entrezajax.appspot.com/}, } @article {pmid20544372, year = {2011}, author = {Langer, SG}, title = {Challenges for data storage in medical imaging research.}, journal = {Journal of digital imaging}, volume = {24}, number = {2}, pages = {203-207}, pmid = {20544372}, issn = {1618-727X}, mesh = {*Database Management Systems ; Diagnostic Imaging/*methods ; Humans ; Image Processing, Computer-Assisted/methods ; Information Storage and Retrieval/*methods ; *Internet ; Medical Records Systems, Computerized ; *Software ; }, abstract = {Researchers in medical imaging have multiple challenges for storing, indexing, maintaining viability, and sharing their data. Addressing all these concerns requires a constellation of tools, but not all of them need to be local to the site. In particular, the data storage challenges faced by researchers can begin to require professional information technology skills. With limited human resources and funds, the medical imaging researcher may be better served with an outsourcing strategy for some management aspects. This paper outlines an approach to manage the main objectives faced by medical imaging scientists whose work includes processing and data mining on non-standard file formats, and relating those files to the their DICOM standard descendents. The capacity of the approach scales as the researcher's need grows by leveraging the on-demand provisioning ability of cloud computing.}, } @article {pmid20543437, year = {2010}, author = {Yim, WW and Chien, S and Kusumoto, Y and Date, S and Haga, J}, title = {Grid heterogeneity in in-silico experiments: an exploration of drug screening using DOCK on cloud environments.}, journal = {Studies in health technology and informatics}, volume = {159}, number = {}, pages = {181-190}, pmid = {20543437}, issn = {0926-9630}, mesh = {Computer Communication Networks/standards ; Databases as Topic ; Drug Evaluation, Preclinical/*methods ; Humans ; *Software Design ; *User-Computer Interface ; }, abstract = {Large-scale in-silico screening is a necessary part of drug discovery and Grid computing is one answer to this demand. A disadvantage of using Grid computing is the heterogeneous computational environments characteristic of a Grid. In our study, we have found that for the molecular docking simulation program DOCK, different clusters within a Grid organization can yield inconsistent results. Because DOCK in-silico virtual screening (VS) is currently used to help select chemical compounds to test with in-vitro experiments, such differences have little effect on the validity of using virtual screening before subsequent steps in the drug discovery process. However, it is difficult to predict whether the accumulation of these discrepancies over sequentially repeated VS experiments will significantly alter the results if VS is used as the primary means for identifying potential drugs. Moreover, such discrepancies may be unacceptable for other applications requiring more stringent thresholds. This highlights the need for establishing a more complete solution to provide the best scientific accuracy when executing an application across Grids. One possible solution to platform heterogeneity in DOCK performance explored in our study involved the use of virtual machines as a layer of abstraction. This study investigated the feasibility and practicality of using virtual machine and recent cloud computing technologies in a biological research application. We examined the differences and variations of DOCK VS variables, across a Grid environment composed of different clusters, with and without virtualization. The uniform computer environment provided by virtual machines eliminated inconsistent DOCK VS results caused by heterogeneous clusters, however, the execution time for the DOCK VS increased. In our particular experiments, overhead costs were found to be an average of 41% and 2% in execution time for two different clusters, while the actual magnitudes of the execution time costs were minimal. Despite the increase in overhead, virtual clusters are an ideal solution for Grid heterogeneity. With greater development of virtual cluster technology in Grid environments, the problem of platform heterogeneity may be eliminated through virtualization, allowing greater usage of VS, and will benefit all Grid applications in general.}, } @article {pmid20543426, year = {2010}, author = {Kraut, A and Moretti, S and Robinson-Rechavi, M and Stockinger, H and Flanders, D}, title = {Phylogenetic code in the cloud - can it meet the expectations?.}, journal = {Studies in health technology and informatics}, volume = {159}, number = {}, pages = {55-63}, pmid = {20543426}, issn = {0926-9630}, mesh = {Computational Biology ; *Computing Methodologies ; *Medical Informatics Applications ; *Phylogeny ; *Software Design ; }, abstract = {Cloud computing has recently become very popular, and several bioinformatics applications exist already in that domain. The aim of this article is to analyse a current cloud system with respect to usability, benchmark its performance and compare its user friendliness with a conventional cluster job submission system. Given the current hype on the theme, user expectations are rather high, but current results show that neither the price/performance ratio nor the usage model is very satisfactory for large-scale embarrassingly parallel applications. However, for small to medium scale applications that require CPU time at certain peak times the cloud is a suitable alternative.}, } @article {pmid20543424, year = {2010}, author = {Mohammed, Y and Sax, U and Dickmann, F and Lippert, J and Solodenko, J and von Voigt, G and Smith, M and Rienhoff, O}, title = {On transferring the grid technology to the biomedical community.}, journal = {Studies in health technology and informatics}, volume = {159}, number = {}, pages = {28-39}, pmid = {20543424}, issn = {0926-9630}, mesh = {*Biomedical Technology ; *Computer Communication Networks ; *Medical Informatics ; *Technology Transfer ; }, abstract = {Natural scientists such as physicists pioneered the sharing of computing resources, which resulted in the Grid. The inter domain transfer process of this technology has been an intuitive process. Some difficulties facing the life science community can be understood using the Bozeman's "Effectiveness Model of Technology Transfer". Bozeman's and classical technology transfer approaches deal with technologies that have achieved certain stability. Grid and Cloud solutions are technologies that are still in flux. We illustrate how Grid computing creates new difficulties for the technology transfer process that are not considered in Bozeman's model. We show why the success of health Grids should be measured by the qualified scientific human capital and opportunities created, and not primarily by the market impact. With two examples we show how the Grid technology transfer theory corresponds to the reality. We conclude with recommendations that can help improve the adoption of Grid solutions into the biomedical community. These results give a more concise explanation of the difficulties most life science IT projects are facing in the late funding periods, and show some leveraging steps which can help to overcome the "vale of tears".}, } @article {pmid20529877, year = {2010}, author = {Ren, J and Williams, N and Clementi, L and Krishnan, S and Li, WW}, title = {Opal web services for biomedical applications.}, journal = {Nucleic acids research}, volume = {38}, number = {Web Server issue}, pages = {W724-31}, pmid = {20529877}, issn = {1362-4962}, support = {P41 RR08605/RR/NCRR NIH HHS/United States ; }, mesh = {*Biomedical Research ; Computational Biology ; Database Management Systems ; Internet ; *Software ; User-Computer Interface ; }, abstract = {Biomedical applications have become increasingly complex, and they often require large-scale high-performance computing resources with a large number of processors and memory. The complexity of application deployment and the advances in cluster, grid and cloud computing require new modes of support for biomedical research. Scientific Software as a Service (sSaaS) enables scalable and transparent access to biomedical applications through simple standards-based Web interfaces. Towards this end, we built a production web server (http://ws.nbcr.net) in August 2007 to support the bioinformatics application called MEME. The server has grown since to include docking analysis with AutoDock and AutoDock Vina, electrostatic calculations using PDB2PQR and APBS, and off-target analysis using SMAP. All the applications on the servers are powered by Opal, a toolkit that allows users to wrap scientific applications easily as web services without any modification to the scientific codes, by writing simple XML configuration files. Opal allows both web forms-based access and programmatic access of all our applications. The Opal toolkit currently supports SOAP-based Web service access to a number of popular applications from the National Biomedical Computation Resource (NBCR) and affiliated collaborative and service projects. In addition, Opal's programmatic access capability allows our applications to be accessed through many workflow tools, including Vision, Kepler, Nimrod/K and VisTrails. From mid-August 2007 to the end of 2009, we have successfully executed 239,814 jobs. The number of successfully executed jobs more than doubled from 205 to 411 per day between 2008 and 2009. The Opal-enabled service model is useful for a wide range of applications. It provides for interoperation with other applications with Web Service interfaces, and allows application developers to focus on the scientific tool and workflow development. Web server availability: http://ws.nbcr.net.}, } @article {pmid20482786, year = {2010}, author = {Wall, DP and Kudtarkar, P and Fusaro, VA and Pivovarov, R and Patil, P and Tonellato, PJ}, title = {Cloud computing for comparative genomics.}, journal = {BMC bioinformatics}, volume = {11}, number = {}, pages = {259}, pmid = {20482786}, issn = {1471-2105}, support = {R01 MH090611/MH/NIMH NIH HHS/United States ; LM009261/LM/NLM NIH HHS/United States ; }, mesh = {Algorithms ; Computational Biology/*methods ; *Genome ; Genomics/*methods ; }, abstract = {BACKGROUND: Large comparative genomics studies and tools are becoming increasingly more compute-expensive as the number of available genome sequences continues to rise. The capacity and cost of local computing infrastructures are likely to become prohibitive with the increase, especially as the breadth of questions continues to rise. Alternative computing architectures, in particular cloud computing environments, may help alleviate this increasing pressure and enable fast, large-scale, and cost-effective comparative genomics strategies going forward. To test this, we redesigned a typical comparative genomics algorithm, the reciprocal smallest distance algorithm (RSD), to run within Amazon's Elastic Computing Cloud (EC2). We then employed the RSD-cloud for ortholog calculations across a wide selection of fully sequenced genomes.

RESULTS: We ran more than 300,000 RSD-cloud processes within the EC2. These jobs were farmed simultaneously to 100 high capacity compute nodes using the Amazon Web Service Elastic Map Reduce and included a wide mix of large and small genomes. The total computation time took just under 70 hours and cost a total of $6,302 USD.

CONCLUSIONS: The effort to transform existing comparative genomics algorithms from local compute infrastructures is not trivial. However, the speed and flexibility of cloud computing environments provides a substantial boost with manageable cost. The procedure designed to transform the RSD algorithm into a cloud-ready application is readily adaptable to similar comparative genomics problems.}, } @article {pmid20480558, year = {2010}, author = {Chatterjee, S}, title = {Silver lining. Cloud computing brings savings, risks.}, journal = {Modern healthcare}, volume = {40}, number = {18}, pages = {26}, pmid = {20480558}, issn = {0160-7480}, mesh = {*Cost Savings ; Hospital Information Systems/*economics/organization & administration ; *Internet ; *Medical Informatics Applications ; Medical Records Systems, Computerized/organization & administration ; United States ; }, } @article {pmid20460430, year = {2010}, author = {Li, H and Homer, N}, title = {A survey of sequence alignment algorithms for next-generation sequencing.}, journal = {Briefings in bioinformatics}, volume = {11}, number = {5}, pages = {473-483}, pmid = {20460430}, issn = {1477-4054}, support = {1U01HG005208-01/HG/NHGRI NIH HHS/United States ; 1U01HG005210-01/HG/NHGRI NIH HHS/United States ; }, mesh = {*Algorithms ; *Base Sequence ; Genome, Human ; Humans ; Sequence Alignment/*methods ; Sequence Analysis, DNA/*methods ; Software ; }, abstract = {Rapidly evolving sequencing technologies produce data on an unparalleled scale. A central challenge to the analysis of this data is sequence alignment, whereby sequence reads must be compared to a reference. A wide variety of alignment algorithms and software have been subsequently developed over the past two years. In this article, we will systematically review the current development of these algorithms and introduce their practical applications on different types of experimental data. We come to the conclusion that short-read alignment is no longer the bottleneck of data analyses. We also consider future development of alignment algorithms with respect to emerging long sequence reads and the prospect of cloud computing.}, } @article {pmid20443161, year = {2010}, author = {Geiger, K}, title = {Cloud computing in pharmaceutical R&D: business risks and mitigations.}, journal = {Current opinion in drug discovery & development}, volume = {13}, number = {3}, pages = {279-285}, pmid = {20443161}, issn = {2040-3437}, mesh = {Clinical Trials as Topic/*methods ; *Computing Methodologies ; Drug Industry/*methods ; Management Information Systems/*trends ; Risk Management/*methods ; }, abstract = {Cloud computing provides information processing power and business services, delivering these services over the Internet from centrally hosted locations. Major technology corporations aim to supply these services to every sector of the economy. Deploying business processes 'in the cloud' requires special attention to the regulatory and business risks assumed when running on both hardware and software that are outside the direct control of a company. The identification of risks at the correct service level allows a good mitigation strategy to be selected. The pharmaceutical industry can take advantage of existing risk management strategies that have already been tested in the finance and electronic commerce sectors. In this review, the business risks associated with the use of cloud computing are discussed, and mitigations achieved through knowledge from securing services for electronic commerce and from good IT practice are highlighted.}, } @article {pmid20441614, year = {2010}, author = {Stein, LD}, title = {The case for cloud computing in genome informatics.}, journal = {Genome biology}, volume = {11}, number = {5}, pages = {207}, pmid = {20441614}, issn = {1474-760X}, mesh = {Computational Biology/economics/*methods ; Genome, Human/*genetics ; Humans ; Sequence Analysis, DNA/economics/*methods ; }, abstract = {With DNA sequencing now getting cheaper more quickly than data storage or computation, the time may have come for genome informatics to migrate to the cloud.}, } @article {pmid20362938, year = {2010}, author = {Andriole, KP and Khorasani, R}, title = {Cloud computing: what is it and could it be useful?.}, journal = {Journal of the American College of Radiology : JACR}, volume = {7}, number = {4}, pages = {252-254}, doi = {10.1016/j.jacr.2010.01.009}, pmid = {20362938}, issn = {1558-349X}, mesh = {*Computing Methodologies ; *Delivery of Health Care ; Humans ; *Information Management ; }, } @article {pmid20331751, year = {2010}, author = {Hunt, RJ and Luchette, J and Schreuder, WA and Rumbaugh, JO and Doherty, J and Tonkin, MJ and Rumbaugh, DB}, title = {Using a cloud to replenish parched groundwater modeling efforts.}, journal = {Ground water}, volume = {48}, number = {3}, pages = {360-365}, doi = {10.1111/j.1745-6584.2010.00699.x}, pmid = {20331751}, issn = {1745-6584}, mesh = {*Models, Theoretical ; *Water Movements ; }, abstract = {Groundwater models can be improved by introduction of additional parameter flexibility and simultaneous use of soft-knowledge. However, these sophisticated approaches have high computational requirements. Cloud computing provides unprecedented access to computing power via the Internet to facilitate the use of these techniques. A modeler can create, launch, and terminate "virtual" computers as needed, paying by the hour, and save machine images for future use. Such cost-effective and flexible computing power empowers groundwater modelers to routinely perform model calibration and uncertainty analysis in ways not previously possible.}, } @article {pmid20298528, year = {2010}, author = {Ertl, P}, title = {Molecular structure input on the web.}, journal = {Journal of cheminformatics}, volume = {2}, number = {1}, pages = {1}, pmid = {20298528}, issn = {1758-2946}, abstract = {A molecule editor, that is program for input and editing of molecules, is an indispensable part of every cheminformatics or molecular processing system. This review focuses on a special type of molecule editors, namely those that are used for molecule structure input on the web. Scientific computing is now moving more and more in the direction of web services and cloud computing, with servers scattered all around the Internet. Thus a web browser has become the universal scientific user interface, and a tool to edit molecules directly within the web browser is essential.The review covers a history of web-based structure input, starting with simple text entry boxes and early molecule editors based on clickable maps, before moving to the current situation dominated by Java applets. One typical example - the popular JME Molecule Editor - will be described in more detail. Modern Ajax server-side molecule editors are also presented. And finally, the possible future direction of web-based molecule editing, based on technologies like JavaScript and Flash, is discussed.}, } @article {pmid20134078, year = {2010}, author = {Memon, FN and Owen, AM and Sanchez-Graillet, O and Upton, GJ and Harrison, AP}, title = {Identifying the impact of G-quadruplexes on Affymetrix 3' arrays using cloud computing.}, journal = {Journal of integrative bioinformatics}, volume = {7}, number = {2}, pages = {111}, doi = {10.2390/biecoll-jib-2010-111}, pmid = {20134078}, issn = {1613-4516}, support = {BB/E001742/1/BB_/Biotechnology and Biological Sciences Research Council/United Kingdom ; }, mesh = {Computational Biology/methods ; DNA/chemistry ; Databases, Genetic ; *G-Quadruplexes ; Guanine/*chemistry ; Oligonucleotide Array Sequence Analysis/*methods ; RNA/chemistry ; }, abstract = {A tetramer quadruplex structure is formed by four parallel strands of DNA/ RNA containing runs of guanine. These quadruplexes are able to form because guanine can Hoogsteen hydrogen bond to other guanines, and a tetrad of guanines can form a stable arrangement. Recently we have discovered that probes on Affymetrix GeneChips that contain runs of guanine do not measure gene expression reliably. We associate this finding with the likelihood that quadruplexes are forming on the surface of GeneChips. In order to cope with the rapidly expanding size of GeneChip array datasets in the public domain, we are exploring the use of cloud computing to replicate our experiments on 3' arrays to look at the effect of the location of G-spots (runs of guanines). Cloud computing is a recently introduced high-performance solution that takes advantage of the computational infrastructure of large organisations such as Amazon and Google. We expect that cloud computing will become widely adopted because it enables bioinformaticians to avoid capital expenditure on expensive computing resources and to only pay a cloud computing provider for what is used. Moreover, as well as financial efficiency, cloud computing is an ecologically-friendly technology, it enables efficient data-sharing and we expect it to be faster for development purposes. Here we propose the advantageous use of cloud computing to perform a large data-mining analysis of public domain 3' arrays.}, } @article {pmid20062015, year = {2010}, author = {}, title = {Gathering clouds and a sequencing storm: why cloud computing could broaden community access to next-generation sequencing.}, journal = {Nature biotechnology}, volume = {28}, number = {1}, pages = {1}, pmid = {20062015}, issn = {1546-1696}, mesh = {*Access to Information ; Algorithms ; Base Sequence ; Computational Biology/*methods ; Genome, Human/genetics ; Humans ; Sequence Analysis, DNA/economics/*instrumentation/statistics & numerical data/*trends ; Software ; }, } @article {pmid23569576, year = {2010}, author = {Price, RC and Pettey, W and Freeman, T and Keahey, K and Leecaster, M and Samore, M and Tobias, J and Facelli, JC}, title = {SaTScan on a Cloud: On-Demand Large Scale Spatial Analysis of Epidemics.}, journal = {Online journal of public health informatics}, volume = {2}, number = {1}, pages = {}, pmid = {23569576}, issn = {1947-2579}, support = {KL2 RR025763/RR/NCRR NIH HHS/United States ; P01 HK000069/HK/PHITPO CDC HHS/United States ; T15 LM007124/LM/NLM NIH HHS/United States ; }, abstract = {By using cloud computing it is possible to provide on- demand resources for epidemic analysis using computer intensive applications like SaTScan. Using 15 virtual machines (VM) on the Nimbus cloud we were able to reduce the total execution time for the same ensemble run from 8896 seconds in a single machine to 842 seconds in the cloud. Using the caBIG tools and our iterative software development methodology the time required to complete the implementation of the SaTScan cloud system took approximately 200 man-hours, which represents an effort that can be secured within the resources available at State Health Departments. The approach proposed here is technically advantageous and practically possible.}, } @article {pmid21364795, year = {2010}, author = {Shankaranarayanan, A and Amaldas, C}, title = {A comparative analysis of dynamic grids vs. virtual grids using the A3pviGrid framework.}, journal = {Bioinformation}, volume = {5}, number = {5}, pages = {186-190}, pmid = {21364795}, issn = {0973-2063}, abstract = {With the proliferation of Quad/Multi-core micro-processors in mainstream platforms such as desktops and workstations; a large number of unused CPU cycles can be utilized for running virtual machines (VMs) as dynamic nodes in distributed environments. Grid services and its service oriented business broker now termed cloud computing could deploy image based virtualization platforms enabling agent based resource management and dynamic fault management. In this paper we present an efficient way of utilizing heterogeneous virtual machines on idle desktops as an environment for consumption of high performance grid services. Spurious and exponential increases in the size of the datasets are constant concerns in medical and pharmaceutical industries due to the constant discovery and publication of large sequence databases. Traditional algorithms are not modeled at handing large data sizes under sudden and dynamic changes in the execution environment as previously discussed. This research was undertaken to compare our previous results with running the same test dataset with that of a virtual Grid platform using virtual machines (Virtualization). The implemented architecture, A3pviGrid utilizes game theoretic optimization and agent based team formation (Coalition) algorithms to improve upon scalability with respect to team formation. Due to the dynamic nature of distributed systems (as discussed in our previous work) all interactions were made local within a team transparently. This paper is a proof of concept of an experimental mini-Grid test-bed compared to running the platform on local virtual machines on a local test cluster. This was done to give every agent its own execution platform enabling anonymity and better control of the dynamic environmental parameters. We also analyze performance and scalability of Blast in a multiple virtual node setup and present our findings. This paper is an extension of our previous research on improving the BLAST application framework using dynamic Grids on virtualization platforms such as the virtual box.}, } @article {pmid20029585, year = {2009}, author = {Chen, RH and Wilkinson, TD}, title = {Computer generated hologram from point cloud using graphics processor.}, journal = {Applied optics}, volume = {48}, number = {36}, pages = {6841-6850}, doi = {10.1364/AO.48.006841}, pmid = {20029585}, issn = {1539-4522}, abstract = {Computer generated holography is an extremely demanding and complex task when it comes to providing realistic reconstructions with full parallax, occlusion, and shadowing. We present an algorithm designed for data-parallel computing on modern graphics processing units to alleviate the computational burden. We apply Gaussian interpolation to create a continuous surface representation from discrete input object points. The algorithm maintains a potential occluder list for each individual hologram plane sample to keep the number of visibility tests to a minimum. We experimented with two approximations that simplify and accelerate occlusion computation. It is observed that letting several neighboring hologram plane samples share visibility information on object points leads to significantly faster computation without causing noticeable artifacts in the reconstructed images. Computing a reduced sample set via nonuniform sampling is also found to be an effective acceleration technique.}, } @article {pmid19936236, year = {2009}, author = {Meir, A and Rubinsky, B}, title = {Distributed network, wireless and cloud computing enabled 3-D ultrasound; a new medical technology paradigm.}, journal = {PloS one}, volume = {4}, number = {11}, pages = {e7974}, pmid = {19936236}, issn = {1932-6203}, mesh = {Algorithms ; *Computer Communication Networks ; Data Compression ; Equipment Design ; Humans ; Image Processing, Computer-Assisted/methods ; Imaging, Three-Dimensional ; Medical Laboratory Science ; Neural Networks, Computer ; Pattern Recognition, Automated ; Phantoms, Imaging ; Reproducibility of Results ; Signal Processing, Computer-Assisted ; Telemedicine/methods ; Ultrasonography/*instrumentation/*methods ; }, abstract = {Medical technologies are indispensable to modern medicine. However, they have become exceedingly expensive and complex and are not available to the economically disadvantaged majority of the world population in underdeveloped as well as developed parts of the world. For example, according to the World Health Organization about two thirds of the world population does not have access to medical imaging. In this paper we introduce a new medical technology paradigm centered on wireless technology and cloud computing that was designed to overcome the problems of increasing health technology costs. We demonstrate the value of the concept with an example; the design of a wireless, distributed network and central (cloud) computing enabled three-dimensional (3-D) ultrasound system. Specifically, we demonstrate the feasibility of producing a 3-D high end ultrasound scan at a central computing facility using the raw data acquired at the remote patient site with an inexpensive low end ultrasound transducer designed for 2-D, through a mobile device and wireless connection link between them. Producing high-end 3D ultrasound images with simple low-end transducers reduces the cost of imaging by orders of magnitude. It also removes the requirement of having a highly trained imaging expert at the patient site, since the need for hand-eye coordination and the ability to reconstruct a 3-D mental image from 2-D scans, which is a necessity for high quality ultrasound imaging, is eliminated. This could enable relatively untrained medical workers in developing nations to administer imaging and a more accurate diagnosis, effectively saving the lives of people.}, } @article {pmid19930550, year = {2009}, author = {Langmead, B and Schatz, MC and Lin, J and Pop, M and Salzberg, SL}, title = {Searching for SNPs with cloud computing.}, journal = {Genome biology}, volume = {10}, number = {11}, pages = {R134}, pmid = {19930550}, issn = {1474-760X}, support = {R01 GM083873/GM/NIGMS NIH HHS/United States ; R01-LM006845/LM/NLM NIH HHS/United States ; R01 LM006845-10/LM/NLM NIH HHS/United States ; R01 HG004885/HG/NHGRI NIH HHS/United States ; R01 GM083873-06/GM/NIGMS NIH HHS/United States ; R01 LM006845/LM/NLM NIH HHS/United States ; R01-HG004885/HG/NHGRI NIH HHS/United States ; }, mesh = {Algorithms ; Alleles ; Chromosomes/ultrastructure ; Chromosomes, Human, Pair 22/genetics ; Chromosomes, Human, X/genetics ; Computational Biology/methods ; *Computer Simulation ; Computers ; Heterozygote ; Humans ; Models, Genetic ; *Polymorphism, Single Nucleotide ; Sequence Analysis, DNA ; *Software ; }, abstract = {As DNA sequencing outpaces improvements in computer speed, there is a critical need to accelerate tasks like alignment and SNP calling. Crossbow is a cloud-computing software tool that combines the aligner Bowtie and the SNP caller SOAPsnp. Executing in parallel using Hadoop, Crossbow analyzes data comprising 38-fold coverage of the human genome in three hours using a 320-CPU cluster rented from a cloud computing service for about $85. Crossbow is available from http://bowtie-bio.sourceforge.net/crossbow/.}, } @article {pmid19884057, year = {2010}, author = {Horn, G and Oommen, BJ}, title = {Solving multiconstraint assignment problems using learning automata.}, journal = {IEEE transactions on systems, man, and cybernetics. Part B, Cybernetics : a publication of the IEEE Systems, Man, and Cybernetics Society}, volume = {40}, number = {1}, pages = {6-18}, doi = {10.1109/TSMCB.2009.2032528}, pmid = {19884057}, issn = {1941-0492}, abstract = {This paper considers the NP-hard problem of object assignment with respect to multiple constraints: assigning a set of elements (or objects) into mutually exclusive classes (or groups), where the elements which are "similar" to each other are hopefully located in the same class. The literature reports solutions in which the similarity constraint consists of a single index that is inappropriate for the type of multiconstraint problems considered here and where the constraints could simultaneously be contradictory. This feature, where we permit possibly contradictory constraints, distinguishes this paper from the state of the art. Indeed, we are aware of no learning automata (or other heuristic) solutions which solve this problem in its most general setting. Such a scenario is illustrated with the static mapping problem, which consists of distributing the processes of a parallel application onto a set of computing nodes. This is a classical and yet very important problem within the areas of parallel computing, grid computing, and cloud computing. We have developed four learning-automata (LA)-based algorithms to solve this problem: First, a fixed-structure stochastic automata algorithm is presented, where the processes try to form pairs to go onto the same node. This algorithm solves the problem, although it requires some centralized coordination. As it is desirable to avoid centralized control, we subsequently present three different variable-structure stochastic automata (VSSA) algorithms, which have superior partitioning properties in certain settings, although they forfeit some of the scalability features of the fixed-structure algorithm. All three VSSA algorithms model the processes as automata having first the hosting nodes as possible actions; second, the processes as possible actions; and, third, attempting to estimate the process communication digraph prior to probabilistically mapping the processes. This paper, which, we believe, comprehensively reports the pioneering LA solutions to this problem, unequivocally demonstrates that LA can play an important role in solving complex combinatorial and integer optimization problems.}, } @article {pmid19846207, year = {2009}, author = {Robinson, BH}, title = {E-waste: an assessment of global production and environmental impacts.}, journal = {The Science of the total environment}, volume = {408}, number = {2}, pages = {183-191}, doi = {10.1016/j.scitotenv.2009.09.044}, pmid = {19846207}, issn = {1879-1026}, mesh = {Conservation of Natural Resources ; *Electronics ; *Environmental Pollution ; Humans ; Refuse Disposal/*methods ; }, abstract = {E-waste comprises discarded electronic appliances, of which computers and mobile telephones are disproportionately abundant because of their short lifespan. The current global production of E-waste is estimated to be 20-25 million tonnes per year, with most E-waste being produced in Europe, the United States and Australasia. China, Eastern Europe and Latin America will become major E-waste producers in the next ten years. Miniaturisation and the development of more efficient cloud computing networks, where computing services are delivered over the internet from remote locations, may offset the increase in E-waste production from global economic growth and the development of pervasive new technologies. E-waste contains valuable metals (Cu, platinum group) as well as potential environmental contaminants, especially Pb, Sb, Hg, Cd, Ni, polybrominated diphenyl ethers (PBDEs), and polychlorinated biphenyls (PCBs). Burning E-waste may generate dioxins, furans, polycyclic aromatic hydrocarbons (PAHs), polyhalogenated aromatic hydrocarbons (PHAHs), and hydrogen chloride. The chemical composition of E-waste changes with the development of new technologies and pressure from environmental organisations on electronics companies to find alternatives to environmentally damaging materials. Most E-waste is disposed in landfills. Effective reprocessing technology, which recovers the valuable materials with minimal environmental impact, is expensive. Consequently, although illegal under the Basel Convention, rich countries export an unknown quantity of E-waste to poor countries, where recycling techniques include burning and dissolution in strong acids with few measures to protect human health and the environment. Such reprocessing initially results in extreme localised contamination followed by migration of the contaminants into receiving waters and food chains. E-waste workers suffer negative health effects through skin contact and inhalation, while the wider community are exposed to the contaminants through smoke, dust, drinking water and food. There is evidence that E-waste associated contaminants may be present in some agricultural or manufactured products for export.}, } @article {pmid19732427, year = {2009}, author = {Wagener, J and Spjuth, O and Willighagen, EL and Wikberg, JE}, title = {XMPP for cloud computing in bioinformatics supporting discovery and invocation of asynchronous web services.}, journal = {BMC bioinformatics}, volume = {10}, number = {}, pages = {279}, pmid = {19732427}, issn = {1471-2105}, mesh = {Computational Biology/*methods ; *Computer Communication Networks ; Information Storage and Retrieval/*methods ; *Internet ; *Software ; User-Computer Interface ; }, abstract = {BACKGROUND: Life sciences make heavily use of the web for both data provision and analysis. However, the increasing amount of available data and the diversity of analysis tools call for machine accessible interfaces in order to be effective. HTTP-based Web service technologies, like the Simple Object Access Protocol (SOAP) and REpresentational State Transfer (REST) services, are today the most common technologies for this in bioinformatics. However, these methods have severe drawbacks, including lack of discoverability, and the inability for services to send status notifications. Several complementary workarounds have been proposed, but the results are ad-hoc solutions of varying quality that can be difficult to use.

RESULTS: We present a novel approach based on the open standard Extensible Messaging and Presence Protocol (XMPP), consisting of an extension (IO Data) to comprise discovery, asynchronous invocation, and definition of data types in the service. That XMPP cloud services are capable of asynchronous communication implies that clients do not have to poll repetitively for status, but the service sends the results back to the client upon completion. Implementations for Bioclipse and Taverna are presented, as are various XMPP cloud services in bio- and cheminformatics.

CONCLUSION: XMPP with its extensions is a powerful protocol for cloud services that demonstrate several advantages over traditional HTTP-based Web services: 1) services are discoverable without the need of an external registry, 2) asynchronous invocation eliminates the need for ad-hoc solutions like polling, and 3) input and output types defined in the service allows for generation of clients on the fly without the need of an external semantics description. The many advantages over existing technologies make XMPP a highly interesting candidate for next generation online services in bioinformatics.}, } @article {pmid19715773, year = {2010}, author = {Rosenthal, A and Mork, P and Li, MH and Stanford, J and Koester, D and Reynolds, P}, title = {Cloud computing: a new business paradigm for biomedical information sharing.}, journal = {Journal of biomedical informatics}, volume = {43}, number = {2}, pages = {342-353}, doi = {10.1016/j.jbi.2009.08.014}, pmid = {19715773}, issn = {1532-0480}, mesh = {Biomedical Research ; *Computer Communication Networks ; Computer Security ; *Database Management Systems ; Information Dissemination ; Information Storage and Retrieval/*methods ; *Internet ; *Medical Informatics ; Software ; }, abstract = {We examine how the biomedical informatics (BMI) community, especially consortia that share data and applications, can take advantage of a new resource called "cloud computing". Clouds generally offer resources on demand. In most clouds, charges are pay per use, based on large farms of inexpensive, dedicated servers, sometimes supporting parallel computing. Substantial economies of scale potentially yield costs much lower than dedicated laboratory systems or even institutional data centers. Overall, even with conservative assumptions, for applications that are not I/O intensive and do not demand a fully mature environment, the numbers suggested that clouds can sometimes provide major improvements, and should be seriously considered for BMI. Methodologically, it was very advantageous to formulate analyses in terms of component technologies; focusing on these specifics enabled us to bypass the cacophony of alternative definitions (e.g., exactly what does a cloud include) and to analyze alternatives that employ some of the component technologies (e.g., an institution's data center). Relative analyses were another great simplifier. Rather than listing the absolute strengths and weaknesses of cloud-based systems (e.g., for security or data preservation), we focus on the changes from a particular starting point, e.g., individual lab systems. We often find a rough parity (in principle), but one needs to examine individual acquisitions--is a loosely managed lab moving to a well managed cloud, or a tightly managed hospital data center moving to a poorly safeguarded cloud?}, } @article {pmid19593055, year = {2009}, author = {Dickmann, F and Kaspar, M and Löhnhardt, B and Knoch, TA and Sax, U}, title = {Perspectives of MediGRID.}, journal = {Studies in health technology and informatics}, volume = {147}, number = {}, pages = {173-182}, pmid = {19593055}, issn = {0926-9630}, mesh = {Computational Biology/*economics/organization & administration ; Germany ; *Information Dissemination ; }, abstract = {Sustainability is a top priority for nearly all grid communities. The German grid communities in the area of life sciences are continuing their dissemination efforts in order to bring the grid to scientists. With cloud computing another concept for distributed IT infrastructures is on the rise. In this regard the grid has a different focus and matches better with life science compute power demands. A comparison of both grid and cloud in addition to the background and present status of the German life science grid give a contemporary impression of the future perspectives of MediGRID.}, } @article {pmid19531204, year = {2009}, author = {Hobson, JC}, title = {Revalidation, multisource feedback and cloud computing.}, journal = {Clinical otolaryngology : official journal of ENT-UK ; official journal of Netherlands Society for Oto-Rhino-Laryngology & Cervico-Facial Surgery}, volume = {34}, number = {3}, pages = {295-296}, doi = {10.1111/j.1749-4486.2009.01949.x}, pmid = {19531204}, issn = {1749-4486}, mesh = {*Electronic Data Processing ; *Employee Performance Appraisal ; *Feedback ; Humans ; Internship and Residency ; *Otolaryngology/education/standards ; Patient Care/*standards ; Practice Patterns, Physicians'/organization & administration ; Referral and Consultation/standards ; Surveys and Questionnaires ; }, } @article {pmid19451100, year = {2009}, author = {Gu, Y and Grossman, RL}, title = {Sector and Sphere: the design and implementation of a high-performance data cloud.}, journal = {Philosophical transactions. Series A, Mathematical, physical, and engineering sciences}, volume = {367}, number = {1897}, pages = {2429-2445}, pmid = {19451100}, issn = {1364-503X}, abstract = {Cloud computing has demonstrated that processing very large datasets over commodity clusters can be done simply, given the right programming model and infrastructure. In this paper, we describe the design and implementation of the Sector storage cloud and the Sphere compute cloud. By contrast with the existing storage and compute clouds, Sector can manage data not only within a data centre, but also across geographically distributed data centres. Similarly, the Sphere compute cloud supports user-defined functions (UDFs) over data both within and across data centres. As a special case, MapReduce-style programming can be implemented in Sphere by using a Map UDF followed by a Reduce UDF. We describe some experimental studies comparing Sector/Sphere and Hadoop using the Terasort benchmark. In these studies, Sector is approximately twice as fast as Hadoop. Sector/Sphere is open source.}, } @article {pmid19435745, year = {2009}, author = {Bateman, A and Wood, M}, title = {Cloud computing.}, journal = {Bioinformatics (Oxford, England)}, volume = {25}, number = {12}, pages = {1475}, doi = {10.1093/bioinformatics/btp274}, pmid = {19435745}, issn = {1367-4811}, mesh = {Genome ; Genomics/*methods ; *Software ; }, } @article {pmid19391778, year = {2009}, author = {Vigneron, JP and Ouedraogo, M and Colomer, JF and Rassart, M}, title = {Spectral sideband produced by a hemispherical concave multilayer on the African shield-bug Calidea panaethiopica (Scutelleridae).}, journal = {Physical review. E, Statistical, nonlinear, and soft matter physics}, volume = {79}, number = {2 Pt 1}, pages = {021907}, doi = {10.1103/PhysRevE.79.021907}, pmid = {19391778}, issn = {1539-3755}, mesh = {Animals ; Computer Simulation ; Insecta/*chemistry/*physiology ; *Models, Biological ; *Models, Chemical ; Refractometry ; Spectrum Analysis ; }, abstract = {The African shield-backed bug Calidea panaethiopica is a very colorful insect which produces a range of iridescent yellow, green, and blue reflections. The cuticle of the dorsal side of the insect, on the shield, the prothorax and part of the head, is pricked of uniformly distributed hemispherical hollow cavities a few tens micrometers deep. Under normal illumination and viewing the insect's muffin-tin shaped surface gives rise to two distinct colors: a yellow spot arising from the bottom of the well and a blue annular cloud that appears to float around the yellow spot. This effect is explained by multiple reflections on a hemispherical Bragg mirror with a mesoscopic curvature. A multiscale computing methodology was found to be needed to evaluate the reflection spectrum for such a curved multilayer. This multiscale approach is very general and should be useful for dealing with visual effects in many natural and artificial systems.}, } @article {pmid19381256, year = {2009}, author = {Shotton, D and Portwin, K and Klyne, G and Miles, A}, title = {Adventures in semantic publishing: exemplar semantic enhancements of a research article.}, journal = {PLoS computational biology}, volume = {5}, number = {4}, pages = {e1000361}, pmid = {19381256}, issn = {1553-7358}, mesh = {Information Dissemination/*methods ; *Natural Language Processing ; *Periodicals as Topic ; *Publishing ; *Research Design ; *Semantics ; *Writing ; }, abstract = {Scientific innovation depends on finding, integrating, and re-using the products of previous research. Here we explore how recent developments in Web technology, particularly those related to the publication of data and metadata, might assist that process by providing semantic enhancements to journal articles within the mainstream process of scholarly journal publishing. We exemplify this by describing semantic enhancements we have made to a recent biomedical research article taken from PLoS Neglected Tropical Diseases, providing enrichment to its content and increased access to datasets within it. These semantic enhancements include provision of live DOIs and hyperlinks; semantic markup of textual terms, with links to relevant third-party information resources; interactive figures; a re-orderable reference list; a document summary containing a study summary, a tag cloud, and a citation analysis; and two novel types of semantic enrichment: the first, a Supporting Claims Tooltip to permit "Citations in Context", and the second, Tag Trees that bring together semantically related terms. In addition, we have published downloadable spreadsheets containing data from within tables and figures, have enriched these with provenance information, and have demonstrated various types of data fusion (mashups) with results from other research articles and with Google Maps. We have also published machine-readable RDF metadata both about the article and about the references it cites, for which we developed a Citation Typing Ontology, CiTO (http://purl.org/net/cito/). The enhanced article, which is available at http://dx.doi.org/10.1371/journal.pntd.0000228.x001, presents a compelling existence proof of the possibilities of semantic publication. We hope the showcase of examples and ideas it contains, described in this paper, will excite the imaginations of researchers and publishers, stimulating them to explore the possibilities of semantic publishing for their own research articles, and thereby break down present barriers to the discovery and re-use of information within traditional modes of scholarly communication.}, } @article {pmid19368363, year = {2009}, author = {Brukhno, AV and Akesson, T and Jönsson, B}, title = {Phase behavior in suspensions of highly charged colloids.}, journal = {The journal of physical chemistry. B}, volume = {113}, number = {19}, pages = {6766-6774}, doi = {10.1021/jp811147v}, pmid = {19368363}, issn = {1520-6106}, abstract = {Attractive interactions between like-charged aggregates (macromolecules, colloidal particles, or micelles) in solution due to electrostatic correlation effects are revisited. The associated phenomenon of phase separation in a colloidal solution of highly charged particles is directly observed in Monte Carlo simulations. We start with a simple, yet instructive, description of polarization effects in a "cloud" of counterions around a single charged aggregate and show how the ion-ion correlations can be mapped onto a classical analogue of the quantum-mechanical dispersion force. We then extend our treatment to the effective pair interaction between two such aggregates and provide an analysis of different interaction regimes, based on a simple coupling parameter. By computing the potential of mean force, we illustrate the physics behind the crossover between the regimes of pure repulsion and attraction with increasing counterion valency. Finally, we turn to semi grand NpT simulations of the corresponding bulk systems where mono- and multivalent ions can exchange with an external reservoir. Thus, the coagulation and phase separation phenomena, widely observed and used in real-life applications, are directly studied in these computer simulations.}, } @article {pmid19358578, year = {2009}, author = {Halligan, BD and Geiger, JF and Vallejos, AK and Greene, AS and Twigger, SN}, title = {Low cost, scalable proteomics data analysis using Amazon's cloud computing services and open source search algorithms.}, journal = {Journal of proteome research}, volume = {8}, number = {6}, pages = {3148-3153}, pmid = {19358578}, issn = {1535-3893}, support = {N01 HV028182/HL/NHLBI NIH HHS/United States ; N01 HV-28182/HV/NHLBI NIH HHS/United States ; }, mesh = {*Algorithms ; Cluster Analysis ; Databases, Protein ; Internet ; Proteomics/*methods ; *Software ; }, abstract = {One of the major difficulties for many laboratories setting up proteomics programs has been obtaining and maintaining the computational infrastructure required for the analysis of the large flow of proteomics data. We describe a system that combines distributed cloud computing and open source software to allow laboratories to set up scalable virtual proteomics analysis clusters without the investment in computational hardware or software licensing fees. Additionally, the pricing structure of distributed computing providers, such as Amazon Web Services, allows laboratories or even individuals to have large-scale computational resources at their disposal at a very low cost per run. We provide detailed step-by-step instructions on how to implement the virtual proteomics analysis clusters as well as a list of current available preconfigured Amazon machine images containing the OMSSA and X!Tandem search algorithms and sequence databases on the Medical College of Wisconsin Proteomics Center Web site (http://proteomics.mcw.edu/vipdac).}, } @article {pmid19357099, year = {2009}, author = {Schatz, MC}, title = {CloudBurst: highly sensitive read mapping with MapReduce.}, journal = {Bioinformatics (Oxford, England)}, volume = {25}, number = {11}, pages = {1363-1369}, pmid = {19357099}, issn = {1367-4811}, support = {R01 LM006845/LM/NLM NIH HHS/United States ; }, mesh = {*Algorithms ; Animals ; Computational Biology/*methods ; DNA ; Genome ; Humans ; Internet ; Sequence Alignment ; Sequence Analysis, DNA/*methods ; }, abstract = {MOTIVATION: Next-generation DNA sequencing machines are generating an enormous amount of sequence data, placing unprecedented demands on traditional single-processor read-mapping algorithms. CloudBurst is a new parallel read-mapping algorithm optimized for mapping next-generation sequence data to the human genome and other reference genomes, for use in a variety of biological analyses including SNP discovery, genotyping and personal genomics. It is modeled after the short read-mapping program RMAP, and reports either all alignments or the unambiguous best alignment for each read with any number of mismatches or differences. This level of sensitivity could be prohibitively time consuming, but CloudBurst uses the open-source Hadoop implementation of MapReduce to parallelize execution using multiple compute nodes.

RESULTS: CloudBurst's running time scales linearly with the number of reads mapped, and with near linear speedup as the number of processors increases. In a 24-processor core configuration, CloudBurst is up to 30 times faster than RMAP executing on a single core, while computing an identical set of alignments. Using a larger remote compute cloud with 96 cores, CloudBurst improved performance by >100-fold, reducing the running time from hours to mere minutes for typical jobs involving mapping of millions of short reads to the human genome.

AVAILABILITY: CloudBurst is available open-source as a model for parallelizing algorithms with MapReduce at (http://cloudburst-bio.sourceforge.net/).}, } @article {pmid23567568, year = {2009}, author = {Kreuter, A and Zangerl, M and Schwarzmann, M and Blumthaler, M}, title = {All-sky imaging: a simple, versatile system for atmospheric research.}, journal = {Applied optics}, volume = {48}, number = {6}, pages = {1091-1097}, doi = {10.1364/ao.48.001091}, pmid = {23567568}, issn = {1559-128X}, abstract = {A simple and inexpensive fully automated all-sky imaging system based on a commercial digital camera with a fish-eye lens and a rotating polarizer is presented. The system is characterized and two examples of applications in atmospheric physics are given: polarization maps and cloud detection. All-sky polarization maps are obtained by acquiring images at different polarizer angles and computing Stokes vectors. The polarization in the principal plane, a vertical cut through the sky containing the Sun, is compared to measurements of a well-characterized spectroradiometer with polarized radiance optics to validate the method. The images are further used for automated cloud detection using a simple color-ratio algorithm. The resulting cloud cover is validated against synoptic cloud observations. A Sun coverage parameter is introduced that shows, in combination with the total cloud cover, useful correlation with UV irradiance.}, } @article {pmid21567691, year = {2009}, author = {Parslow, GR}, title = {Commentary: Cloud computing-Offloading data and processing to the Web.}, journal = {Biochemistry and molecular biology education : a bimonthly publication of the International Union of Biochemistry and Molecular Biology}, volume = {37}, number = {1}, pages = {63-64}, doi = {10.1002/bmb.20253}, pmid = {21567691}, issn = {1470-8175}, } @article {pmid18324281, year = {1999}, author = {Mishchenko, MI and Geogdzhayev, IV and Cairns, B and Rossow, WB and Lacis, AA}, title = {Aerosol retrievals over the ocean by use of channels 1 and 2 AVHRR data: sensitivity analysis and preliminary results.}, journal = {Applied optics}, volume = {38}, number = {36}, pages = {7325-7341}, doi = {10.1364/ao.38.007325}, pmid = {18324281}, issn = {1559-128X}, abstract = {We outline the methodology of interpreting channels 1 and 2 Advanced Very High Resolution Radiometer (AVHRR) radiance data over the oceans and describe a detailed analysis of the sensitivity of monthly averages of retrieved aerosol parameters to the assumptions made in different retrieval algorithms. The analysis is based on using real AVHRR data and exploiting accurate numerical techniques for computing single and multiple scattering and spectral absorption of light in the vertically inhomogeneous atmosphere-ocean system. We show that two-channel algorithms can be expected to provide significantly more accurate and less biased retrievals of the aerosol optical thickness than one-channel algorithms and that imperfect cloud screening and calibration uncertainties are by far the largest sources of errors in the retrieved aerosol parameters. Both underestimating and overestimating aerosol absorption as well as the potentially strong variability of the real part of the aerosol refractive index may lead to regional and/or seasonal biases in optical-thickness retrievals. The Angström exponent appears to be the aerosol size characteristic that is least sensitive to the choice of aerosol model and should be retrieved along with optical thickness as the second aerosol parameter.}, } @article {pmid18252332, year = {1999}, author = {Pajares, G and Cruz, JM and Lopez-Orozco, JA}, title = {Stereo matching using Hebbian learning.}, journal = {IEEE transactions on systems, man, and cybernetics. Part B, Cybernetics : a publication of the IEEE Systems, Man, and Cybernetics Society}, volume = {29}, number = {4}, pages = {553-559}, doi = {10.1109/3477.775274}, pmid = {18252332}, issn = {1083-4419}, abstract = {This paper presents an approach to the local stereo matching problem using edge segments as features with several attributes. We have verified that the differences in attributes for the true matches cluster in a cloud around a center. The correspondence is established on the basis of the minimum distance criterion, computing the Mahalanobis distance between the difference of the attributes for a current pair of features and the cluster center (similarity constraint). We introduce a learning strategy based on the Hebbian Learning to get the best cluster center. A comparative analysis among methods without learning and with other learning strategies is illustrated.}, } @article {pmid17170483, year = {2007}, author = {Leymarie, FF and Kimia, BB}, title = {The medial scaffold of 3D unorganized point clouds.}, journal = {IEEE transactions on pattern analysis and machine intelligence}, volume = {29}, number = {2}, pages = {313-330}, doi = {10.1109/TPAMI.2007.44}, pmid = {17170483}, issn = {0162-8828}, mesh = {*Algorithms ; *Artificial Intelligence ; Image Enhancement/*methods ; Image Interpretation, Computer-Assisted/*methods ; Imaging, Three-Dimensional/*methods ; Information Storage and Retrieval/methods ; Pattern Recognition, Automated/*methods ; }, abstract = {We introduce the notion of the medial scaffold, a hierarchical organization of the medial axis of a 3D shape in the form of a graph constructed from special medial curves connecting special medial points. A key advantage of the scaffold is that it captures the qualitative aspects of shape in a hierarchical and tightly condensed representation. We propose an efficient and exact method for computing the medial scaffold based on a notion of propagation along the scaffold itself, starting from initial sources of the flow and constructing the scaffold during the propagation. We examine this method specifically in the context of an unorganized cloud of points in 3D, e.g., as obtained from laser range finders, which typically involve hundreds of thousands of points, but the ideas are generalizable to data arising from geometrically described surface patches. The computational bottleneck in the propagation-based scheme is in finding the initial sources of the flow. We thus present several ideas to avoid the unnecessary consideration of pairs of points which cannot possibly form a medial point source, such as the "visibility" of a point from another given a third point and the interaction of clusters of points. An application of using the medial scaffold for the representation of point samplings of real-life objects is also illustrated.}, } @article {pmid15837497, year = {2005}, author = {Mayer, RP and Stowe, RA}, title = {Nodoids and toroids: comparison of two geometries for the meniscus profile of a wetting liquid between two touching isolated spheres and extensions to the model of a collection of packed spheres.}, journal = {Journal of colloid and interface science}, volume = {285}, number = {2}, pages = {781-788}, doi = {10.1016/j.jcis.2004.11.067}, pmid = {15837497}, issn = {0021-9797}, abstract = {In the mid 1960s the present authors published two papers dealing with penetration of nonwetting liquids such as mercury into the interstitial void spaces using the model of uniform packed spheres. A circular arc was used to approximate the liquid-vapor interface in both papers. However, our circular arc-toroid values for the pressure-volume relationship in the pendular ring which exists between two touching spheres was criticized. The authors concluded that our approximation led to unacceptably large differences compared to the values calculated from the exact nodoid shape. This incorrect conclusion was never rebutted and has, in fact, been misinterpreted by subsequent workers to include values calculated for the shape of the access opening and the associated pressure for penetration into the void space of a collection of spheres. This leaves a cloud of uncertainty, not only over our original work on nonwetting fluids, but on the application of our procedures to the field of wetting fluids. The contrast in the geometrical shapes of the toroid and nodoid is depicted and the pressure values are compared at equal volumes. In contrast to the claim of excessive error, we show the toroid geometry, in conjunction with a pressure-volume work derivation, to have a maximum error of 0.06% as compared to the nodoid at all liquid-solid contact angles. The toroid also has the advantage of using a readily derived work versus surface free energy balance rather than requiring the use of incomplete elliptic integrals to evaluate the nodoid. Attempts to use radii of curvature to evaluate the toroid shape are shown to give extremely poor approximations of the exact values for the pressure. Values reported for access to the interior void space of a collection of spheres still need adjustment for the effect of contact angles between 0 degrees and 180 degrees for characterizing assemblies of real solids by computing "equivalent spherical" particle size from porosity and mercury penetration data. However, there is no anticipation that use of the circular arc will introduce large errors in the results. This gives confidence to us and others working with wetting media to test the potential applicability of the packed sphere model to various diverse fields.}, } @article {pmid12011433, year = {2002}, author = {Grishaev, A and Llinás, M}, title = {CLOUDS, a protocol for deriving a molecular proton density via NMR.}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, volume = {99}, number = {10}, pages = {6707-6712}, pmid = {12011433}, issn = {0027-8424}, support = {R01 HL029409/HL/NHLBI NIH HHS/United States ; HL-29409/HL/NHLBI NIH HHS/United States ; }, mesh = {Humans ; Kringles ; Matrix Metalloproteinase 2/*chemistry ; Models, Molecular ; Nuclear Magnetic Resonance, Biomolecular/methods ; Plasminogen/*chemistry ; Protein Structure, Tertiary ; Protons ; }, abstract = {We demonstrate the feasibility of computing realistic spatial proton distributions for proteins in solution from experimental NMR nuclear Overhauser effect data only and with minimal assignments. The method, CLOUDS, relies on precise and abundant interproton distance restraints calculated via a relaxation matrix analysis of sets of experimental nuclear Overhauser effect spectroscopy crosspeaks. The MIDGE protocol was adapted for this purpose. A gas of unassigned, unconnected H atoms is condensed into a structured proton distribution (cloud) via a molecular dynamics simulated-annealing scheme in which the internuclear distances and van der Waals repulsive terms are the only active restraints. Proton densities are generated by combining a large number of such clouds, each computed from a different trajectory. After filtering by reference to the cloud closest to the mean, a minimal dispersion proton density (foc) is identified. The latter affords a quasi-continuous hydrogen-only probability distribution that conveys immediate information on the protein surface topology (grooves, protrusions, potential binding site cavities, etc.), directly related to the molecular structure. Feasibility of the method was tested on NMR data measured on two globular protein domains of low regular secondary structure content, the col 2 domain of human matrix metalloproteinase-2 and the kringle 2 domain of human plasminogen, of 60 and 83 amino acid residues, respectively.}, } @article {pmid11778038, year = {2002}, author = {Ward-Thompson, D}, title = {Isolated star formation: from cloud formation to core collapse.}, journal = {Science (New York, N.Y.)}, volume = {295}, number = {5552}, pages = {76-81}, doi = {10.1126/science.1067354}, pmid = {11778038}, issn = {0036-8075}, abstract = {The formation of stars is one of the most fundamental problems in astrophysics, as it underlies many other questions, on scales from the formation of galaxies to the formation of the solar system. The physical processes involve the turbulent behavior of a partially ionized medium containing a non-uniform magnetic field. Current debate centers around the time taken for turbulence to decay and the relative importance of the roles played by magnetic fields and turbulence. Technological advances such as millimeter-wave cameras have made possible observations of the temperature and density profiles, and statistical calculations of the lifetimes, of objects collapsing under their own self-gravity and those on the verge of collapse. Increased computing power allows more complex models to be made that include magnetic and turbulent effects. No current model can reproduce all of the observations.}, } @article {pmid20372475, year = {1982}, author = {Deepak, A and Farrukh, UO and Zardecki, A}, title = {Significance of higher-order multiple scattering for laser beam propagation through hazes, fogs, and clouds.}, journal = {Applied optics}, volume = {21}, number = {3}, pages = {439-447}, doi = {10.1364/AO.21.000439}, pmid = {20372475}, issn = {1559-128X}, abstract = {An approach is outlined for computing the different orders of scattering in any medium that possesses a phase function with a strong forward peak. Computations are done for the case of a Gaussian laser beam incident on such a medium. The formulation adopted does reproduce the natural divergence of general Gaussian beams without the need to assume the presence of point sources or the need to assume perfectly collimated beams within the region of interest. Results are discussed for the case of water cloud particles with a strongly forward-peaked phase function for the incident laser radiation.}, } @article {pmid20076202, year = {1970}, author = {Brennan, B and Bandeen, WR}, title = {Anisotropic reflectance characteristics of natural Earth surfaces.}, journal = {Applied optics}, volume = {9}, number = {2}, pages = {405-412}, doi = {10.1364/AO.9.000405}, pmid = {20076202}, issn = {1559-128X}, abstract = {The patterns of reflection of solar radiation from cloud, water, and land surfaces were measured with an aircraft-borne medium resolution radiometer. Reflectances in the 0.2-4.0-micro and 0.55-0.85-micro portions of the electromagnetic spectrum were investigated. Results indicate that the reflectance characteristics of most of the surface types measured are anisotropic. The anisotropy is dependent on the type of surface and the angles of incidence and reflection. In general, the anisotropy increases with increasing solar zenith angle. Clouds and forests show similar reflectance patterns, with forward and backward scattering peaks. Ocean surfaces yield a pattern similar to those of the clouds and forests but with an additional peak which is associated with sun glitter. Reflectances measured in the 0.2-4.0-micro band are generally lower than those in the 0.55-0.85-micro band under cloudy conditions. Anisotropy and spectral bandwidth should be accounted for when computing the albedo of the earth from narrow field-of-view measurements from satellites; otherwise, large errors may be expected to occur.}, }